diff options
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r-- | drivers/net/ethernet/marvell/mv643xx_eth.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/pxa168_eth.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/skge.c | 75 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/sky2.c | 33 |
4 files changed, 51 insertions, 81 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 9edecfa1f0f..5e1ca0f0509 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -57,7 +57,6 @@ #include <linux/types.h> #include <linux/inet_lro.h> #include <linux/slab.h> -#include <asm/system.h> static char mv643xx_eth_driver_name[] = "mv643xx_eth"; static char mv643xx_eth_driver_version[] = "1.4"; @@ -667,7 +666,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) skb = __skb_dequeue(&mp->rx_recycle); if (skb == NULL) - skb = dev_alloc_skb(mp->skb_size); + skb = netdev_alloc_skb(mp->dev, mp->skb_size); if (skb == NULL) { mp->oom = 1; @@ -1832,7 +1831,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) struct sockaddr *sa = addr; if (!is_valid_ether_addr(sa->sa_data)) - return -EINVAL; + return -EADDRNOTAVAIL; memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 953ba5851f7..efec6b60b32 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -43,7 +43,6 @@ #include <linux/interrupt.h> #include <linux/types.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/cacheflush.h> #include <linux/pxa168_eth.h> @@ -220,7 +219,6 @@ struct pxa168_eth_private { u8 work_todo; int skb_size; - struct net_device_stats stats; /* Size of Tx Ring per queue */ int tx_ring_size; /* Number of tx descriptors in use */ @@ -350,7 +348,7 @@ static void rxq_refill(struct net_device *dev) while (pep->rx_desc_count < pep->rx_ring_size) { int size; - skb = dev_alloc_skb(pep->skb_size); + skb = netdev_alloc_skb(dev, pep->skb_size); if (!skb) break; if (SKB_DMA_REALIGN) @@ -627,8 +625,9 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr) unsigned char oldMac[ETH_ALEN]; if (!is_valid_ether_addr(sa->sa_data)) - return -EINVAL; + return -EADDRNOTAVAIL; memcpy(oldMac, dev->dev_addr, ETH_ALEN); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN); netif_addr_lock_bh(dev); update_hash_table_mac_address(pep, oldMac, dev->dev_addr); @@ -1017,10 +1016,9 @@ static int rxq_init(struct net_device *dev) /* Allocate RX skb rings */ pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size, GFP_KERNEL); - if (!pep->rx_skb) { - printk(KERN_ERR "%s: Cannot alloc RX skb ring\n", dev->name); + if (!pep->rx_skb) return -ENOMEM; - } + /* Allocate RX ring */ pep->rx_desc_count = 0; size = pep->rx_ring_size * sizeof(struct rx_desc); @@ -1081,10 +1079,9 @@ static int txq_init(struct net_device *dev) pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size, GFP_KERNEL); - if (!pep->tx_skb) { - printk(KERN_ERR "%s: Cannot alloc TX skb ring\n", dev->name); + if (!pep->tx_skb) return -ENOMEM; - } + /* Allocate TX ring */ pep->tx_desc_count = 0; size = pep->tx_ring_size * sizeof(struct tx_desc); @@ -1522,7 +1519,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task); printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME); - random_ether_addr(dev->dev_addr); + eth_hw_addr_random(dev); pep->pd = pdev->dev.platform_data; pep->rx_ring_size = NUM_RX_DESCS; diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index edb9bda55d5..5a30bf82309 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -931,20 +931,17 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) } /* Allocate and setup a new buffer for receiving */ -static int skge_rx_setup(struct pci_dev *pdev, - struct skge_element *e, - struct sk_buff *skb, unsigned int bufsize) +static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) { struct skge_rx_desc *rd = e->desc; - dma_addr_t map; + u64 map; - map = pci_map_single(pdev, skb->data, bufsize, + map = pci_map_single(skge->hw->pdev, skb->data, bufsize, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, map)) - goto mapping_error; - rd->dma_lo = lower_32_bits(map); - rd->dma_hi = upper_32_bits(map); + rd->dma_lo = map; + rd->dma_hi = map >> 32; e->skb = skb; rd->csum1_start = ETH_HLEN; rd->csum2_start = ETH_HLEN; @@ -956,13 +953,6 @@ static int skge_rx_setup(struct pci_dev *pdev, rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, bufsize); - return 0; - -mapping_error: - if (net_ratelimit()) - dev_warn(&pdev->dev, "%s: rx mapping error\n", - skb->dev->name); - return -EIO; } /* Resume receiving using existing skb, @@ -1024,11 +1014,7 @@ static int skge_rx_fill(struct net_device *dev) return -ENOMEM; skb_reserve(skb, NET_IP_ALIGN); - if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) { - kfree_skb(skb); - return -ENOMEM; - } - + skge_rx_setup(skge, e, skb, skge->rx_buf_size); } while ((e = e->next) != ring->start); ring->to_clean = ring->start; @@ -2743,7 +2729,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, struct skge_tx_desc *td; int i; u32 control, len; - dma_addr_t map; + u64 map; if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; @@ -2757,14 +2743,11 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, e->skb = skb; len = skb_headlen(skb); map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(hw->pdev, map)) - goto mapping_error; - dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, len); - td->dma_lo = lower_32_bits(map); - td->dma_hi = upper_32_bits(map); + td->dma_lo = map; + td->dma_hi = map >> 32; if (skb->ip_summed == CHECKSUM_PARTIAL) { const int offset = skb_checksum_start_offset(skb); @@ -2795,16 +2778,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (dma_mapping_error(&hw->pdev->dev, map)) - goto mapping_unwind; e = e->next; e->skb = skb; tf = e->desc; BUG_ON(tf->control & BMU_OWN); - tf->dma_lo = lower_32_bits(map); - tf->dma_hi = upper_32_bits(map); + tf->dma_lo = map; + tf->dma_hi = (u64) map >> 32; dma_unmap_addr_set(e, mapaddr, map); dma_unmap_len_set(e, maplen, skb_frag_size(frag)); @@ -2834,28 +2815,6 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, } return NETDEV_TX_OK; - -mapping_unwind: - /* unroll any pages that were already mapped. */ - if (e != skge->tx_ring.to_use) { - struct skge_element *u; - - for (u = skge->tx_ring.to_use->next; u != e; u = u->next) - pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr), - dma_unmap_len(u, maplen), - PCI_DMA_TODEVICE); - e = skge->tx_ring.to_use; - } - /* undo the mapping for the skb header */ - pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr), - dma_unmap_len(e, maplen), - PCI_DMA_TODEVICE); -mapping_error: - /* mapping error causes error message and packet to be discarded. */ - if (net_ratelimit()) - dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); - dev_kfree_skb(skb); - return NETDEV_TX_OK; } @@ -3099,17 +3058,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, if (!nskb) goto resubmit; - if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) { - dev_kfree_skb(nskb); - goto resubmit; - } - pci_unmap_single(skge->hw->pdev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), PCI_DMA_FROMDEVICE); skb = e->skb; prefetch(skb->data); + skge_rx_setup(skge, e, nskb, skge->rx_buf_size); } skb_put(skb, len); @@ -3852,10 +3807,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, struct skge_port *skge; struct net_device *dev = alloc_etherdev(sizeof(*skge)); - if (!dev) { - dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); + if (!dev) return NULL; - } SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->netdev_ops = &skge_netdev_ops; diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 760c2b17dfd..c9b504e2dfc 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -95,6 +95,10 @@ static int disable_msi = 0; module_param(disable_msi, int, 0); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); +static int legacy_pme = 0; +module_param(legacy_pme, int, 0); +MODULE_PARM_DESC(legacy_pme, "Legacy power management"); + static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ @@ -867,6 +871,13 @@ static void sky2_wol_init(struct sky2_port *sky2) /* Disable PiG firmware */ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); + /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */ + if (legacy_pme) { + u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); + reg1 |= PCI_Y2_PME_LEGACY; + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + } + /* block receiver */ sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); sky2_read32(hw, B0_CTST); @@ -1756,13 +1767,14 @@ static int sky2_open(struct net_device *dev) sky2_hw_up(sky2); + /* Enable interrupts from phy/mac for port */ + imask = sky2_read32(hw, B0_IMSK); + if (hw->chip_id == CHIP_ID_YUKON_OPT || hw->chip_id == CHIP_ID_YUKON_PRM || hw->chip_id == CHIP_ID_YUKON_OP_2) imask |= Y2_IS_PHY_QLNK; /* enable PHY Quick Link */ - /* Enable interrupts from phy/mac for port */ - imask = sky2_read32(hw, B0_IMSK); imask |= portirq_msk[port]; sky2_write32(hw, B0_IMSK, imask); sky2_read32(hw, B0_IMSK); @@ -2457,6 +2469,17 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) return err; } +static inline bool needs_copy(const struct rx_ring_info *re, + unsigned length) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* Some architectures need the IP header to be aligned */ + if (!IS_ALIGNED(re->data_addr + ETH_HLEN, sizeof(u32))) + return true; +#endif + return length < copybreak; +} + /* For small just reuse existing skb for next receive */ static struct sk_buff *receive_copy(struct sky2_port *sky2, const struct rx_ring_info *re, @@ -2587,7 +2610,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev, goto error; okay: - if (length < copybreak) + if (needs_copy(re, length)) skb = receive_copy(sky2, re, length); else skb = receive_new(sky2, re, length); @@ -4700,10 +4723,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, struct sky2_port *sky2; struct net_device *dev = alloc_etherdev(sizeof(*sky2)); - if (!dev) { - dev_err(&hw->pdev->dev, "etherdev alloc failed\n"); + if (!dev) return NULL; - } SET_NETDEV_DEV(dev, &hw->pdev->dev); dev->irq = hw->pdev->irq; |