summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c5
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/cpmac.c145
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/netdev.c425
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c28
-rw-r--r--drivers/net/forcedeth.c16
-rw-r--r--drivers/net/ipg.c22
-rw-r--r--drivers/net/ipg.h20
-rw-r--r--drivers/net/irda/au1k_ir.c11
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/mlx4/icm.c4
-rw-r--r--drivers/net/natsemi.c1
-rw-r--r--drivers/net/ppp_mppe.c10
-rw-r--r--drivers/net/rrunner.c8
-rw-r--r--drivers/net/usb/rndis_host.c18
17 files changed, 258 insertions, 465 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6909becb10f..6937ef0e727 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -188,6 +188,7 @@ struct bond_parm_tbl arp_validate_tbl[] = {
/*-------------------------- Forward declarations ---------------------------*/
static void bond_send_gratuitous_arp(struct bonding *bond);
+static void bond_deinit(struct net_device *bond_dev);
/*---------------------------- General routines -----------------------------*/
@@ -3681,7 +3682,7 @@ static int bond_open(struct net_device *bond_dev)
}
if (bond->params.mode == BOND_MODE_8023AD) {
- INIT_DELAYED_WORK(&bond->ad_work, bond_alb_monitor);
+ INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */
bond_register_lacpdu(bond);
@@ -4449,7 +4450,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
/* De-initialize device specific data.
* Caller must hold rtnl_lock.
*/
-void bond_deinit(struct net_device *bond_dev)
+static void bond_deinit(struct net_device *bond_dev)
{
struct bonding *bond = bond_dev->priv;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index d1ed14bf1cc..61c1b4536d3 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -302,7 +302,6 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_de
int bond_create(char *name, struct bond_params *params, struct bonding **newbond);
void bond_destroy(struct bonding *bond);
int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
-void bond_deinit(struct net_device *bond_dev);
int bond_create_sysfs(void);
void bond_destroy_sysfs(void);
void bond_destroy_sysfs_entry(struct bonding *bond);
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 57541d2d9e1..6fd95a2c8ce 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -34,6 +34,7 @@
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <asm/gpio.h>
@@ -53,12 +54,6 @@ MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
#define CPMAC_VERSION "0.5.0"
-/* stolen from net/ieee80211.h */
-#ifndef MAC_FMT
-#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
-#define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \
- ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5]
-#endif
/* frame size + 802.1q tag */
#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4)
#define CPMAC_QUEUES 8
@@ -211,6 +206,7 @@ struct cpmac_priv {
struct net_device *dev;
struct work_struct reset_work;
struct platform_device *pdev;
+ struct napi_struct napi;
};
static irqreturn_t cpmac_irq(int, void *);
@@ -362,47 +358,48 @@ static void cpmac_set_multicast_list(struct net_device *dev)
}
}
-static struct sk_buff *cpmac_rx_one(struct net_device *dev,
- struct cpmac_priv *priv,
+static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
struct cpmac_desc *desc)
{
struct sk_buff *skb, *result = NULL;
if (unlikely(netif_msg_hw(priv)))
- cpmac_dump_desc(dev, desc);
+ cpmac_dump_desc(priv->dev, desc);
cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
if (unlikely(!desc->datalen)) {
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx: spurious interrupt\n",
- dev->name);
+ priv->dev->name);
return NULL;
}
- skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE);
+ skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE);
if (likely(skb)) {
skb_reserve(skb, 2);
skb_put(desc->skb, desc->datalen);
- desc->skb->protocol = eth_type_trans(desc->skb, dev);
+ desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
desc->skb->ip_summed = CHECKSUM_NONE;
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += desc->datalen;
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += desc->datalen;
result = desc->skb;
- dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&priv->dev->dev, desc->data_mapping,
+ CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
desc->skb = skb;
- desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+ desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
CPMAC_SKB_SIZE,
DMA_FROM_DEVICE);
desc->hw_data = (u32)desc->data_mapping;
if (unlikely(netif_msg_pktdata(priv))) {
- printk(KERN_DEBUG "%s: received packet:\n", dev->name);
- cpmac_dump_skb(dev, result);
+ printk(KERN_DEBUG "%s: received packet:\n",
+ priv->dev->name);
+ cpmac_dump_skb(priv->dev, result);
}
} else {
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING
- "%s: low on skbs, dropping packet\n", dev->name);
- dev->stats.rx_dropped++;
+ "%s: low on skbs, dropping packet\n",
+ priv->dev->name);
+ priv->dev->stats.rx_dropped++;
}
desc->buflen = CPMAC_SKB_SIZE;
@@ -411,25 +408,25 @@ static struct sk_buff *cpmac_rx_one(struct net_device *dev,
return result;
}
-static int cpmac_poll(struct net_device *dev, int *budget)
+static int cpmac_poll(struct napi_struct *napi, int budget)
{
struct sk_buff *skb;
struct cpmac_desc *desc;
- int received = 0, quota = min(dev->quota, *budget);
- struct cpmac_priv *priv = netdev_priv(dev);
+ int received = 0;
+ struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
spin_lock(&priv->rx_lock);
if (unlikely(!priv->rx_head)) {
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx: polling, but no queue\n",
- dev->name);
- netif_rx_complete(dev);
+ priv->dev->name);
+ netif_rx_complete(priv->dev, napi);
return 0;
}
desc = priv->rx_head;
- while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) {
- skb = cpmac_rx_one(dev, priv, desc);
+ while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
+ skb = cpmac_rx_one(priv, desc);
if (likely(skb)) {
netif_receive_skb(skb);
received++;
@@ -439,13 +436,11 @@ static int cpmac_poll(struct net_device *dev, int *budget)
priv->rx_head = desc;
spin_unlock(&priv->rx_lock);
- *budget -= received;
- dev->quota -= received;
if (unlikely(netif_msg_rx_status(priv)))
- printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name,
- received);
+ printk(KERN_DEBUG "%s: poll processed %d packets\n",
+ priv->dev->name, received);
if (desc->dataflags & CPMAC_OWN) {
- netif_rx_complete(dev);
+ netif_rx_complete(priv->dev, napi);
cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
return 0;
@@ -655,6 +650,7 @@ static void cpmac_hw_error(struct work_struct *work)
spin_unlock(&priv->rx_lock);
cpmac_clear_tx(priv->dev);
cpmac_hw_start(priv->dev);
+ napi_enable(&priv->napi);
netif_start_queue(priv->dev);
}
@@ -681,8 +677,10 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
if (status & MAC_INT_RX) {
queue = (status >> 8) & 7;
- netif_rx_schedule(dev);
- cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+ if (netif_rx_schedule_prep(dev, &priv->napi)) {
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+ __netif_rx_schedule(dev, &priv->napi);
+ }
}
cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
@@ -692,6 +690,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
printk(KERN_ERR "%s: hw error, resetting...\n",
dev->name);
netif_stop_queue(dev);
+ napi_disable(&priv->napi);
cpmac_hw_stop(dev);
schedule_work(&priv->reset_work);
if (unlikely(netif_msg_hw(priv)))
@@ -849,6 +848,15 @@ static void cpmac_adjust_link(struct net_device *dev)
spin_unlock(&priv->lock);
}
+static int cpmac_link_update(struct net_device *dev,
+ struct fixed_phy_status *status)
+{
+ status->link = 1;
+ status->speed = 100;
+ status->duplex = 1;
+ return 0;
+}
+
static int cpmac_open(struct net_device *dev)
{
int i, size, res;
@@ -857,15 +865,6 @@ static int cpmac_open(struct net_device *dev)
struct cpmac_desc *desc;
struct sk_buff *skb;
- priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
- if (IS_ERR(priv->phy)) {
- if (netif_msg_drv(priv))
- printk(KERN_ERR "%s: Could not attach to PHY\n",
- dev->name);
- return PTR_ERR(priv->phy);
- }
-
mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) {
if (netif_msg_drv(priv))
@@ -927,6 +926,7 @@ static int cpmac_open(struct net_device *dev)
INIT_WORK(&priv->reset_work, cpmac_hw_error);
cpmac_hw_start(dev);
+ napi_enable(&priv->napi);
priv->phy->state = PHY_CHANGELINK;
phy_start(priv->phy);
@@ -951,8 +951,6 @@ fail_remap:
release_mem_region(mem->start, mem->end - mem->start);
fail_reserve:
- phy_disconnect(priv->phy);
-
return res;
}
@@ -965,9 +963,8 @@ static int cpmac_stop(struct net_device *dev)
netif_stop_queue(dev);
cancel_work_sync(&priv->reset_work);
+ napi_disable(&priv->napi);
phy_stop(priv->phy);
- phy_disconnect(priv->phy);
- priv->phy = NULL;
cpmac_hw_stop(dev);
@@ -1001,11 +998,13 @@ static int external_switch;
static int __devinit cpmac_probe(struct platform_device *pdev)
{
- int rc, phy_id;
+ int rc, phy_id, i;
struct resource *mem;
struct cpmac_priv *priv;
struct net_device *dev;
struct plat_cpmac_data *pdata;
+ struct fixed_info *fixed_phy;
+ DECLARE_MAC_BUF(mac);
pdata = pdev->dev.platform_data;
@@ -1053,21 +1052,51 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
dev->set_multicast_list = cpmac_set_multicast_list;
dev->tx_timeout = cpmac_tx_timeout;
dev->ethtool_ops = &cpmac_ethtool_ops;
- dev->poll = cpmac_poll;
- dev->weight = 64;
dev->features |= NETIF_F_MULTI_QUEUE;
+ netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
+
spin_lock_init(&priv->lock);
spin_lock_init(&priv->rx_lock);
priv->dev = dev;
priv->ring_size = 64;
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
+
if (phy_id == 31) {
- snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT,
- cpmac_mii.id, phy_id);
- } else
- snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1);
+ snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, cpmac_mii.id,
+ phy_id);
+ } else {
+ /* Let's try to get a free fixed phy... */
+ for (i = 0; i < MAX_PHY_AMNT; i++) {
+ fixed_phy = fixed_mdio_get_phydev(i);
+ if (!fixed_phy)
+ continue;
+ if (!fixed_phy->phydev->attached_dev) {
+ strncpy(priv->phy_name,
+ fixed_phy->phydev->dev.bus_id,
+ BUS_ID_SIZE);
+ fixed_mdio_set_link_update(fixed_phy->phydev,
+ &cpmac_link_update);
+ goto phy_found;
+ }
+ }
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: Could not find fixed PHY\n",
+ dev->name);
+ rc = -ENODEV;
+ goto fail;
+ }
+
+phy_found:
+ priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(priv->phy)) {
+ if (netif_msg_drv(priv))
+ printk(KERN_ERR "%s: Could not attach to PHY\n",
+ dev->name);
+ return PTR_ERR(priv->phy);
+ }
if ((rc = register_netdev(dev))) {
printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
@@ -1077,9 +1106,9 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
if (netif_msg_probe(priv)) {
printk(KERN_INFO
- "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: "
- MAC_FMT ")\n", dev->name, (void *)mem->start, dev->irq,
- priv->phy_name, MAC_ARG(dev->dev_addr));
+ "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
+ "mac: %s)\n", dev->name, (void *)mem->start, dev->irq,
+ priv->phy_name, print_mac(mac, dev->dev_addr));
}
return 0;
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d2499bb07c1..473f78de4be 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -122,7 +122,8 @@ struct e1000_buffer {
u16 next_to_watch;
};
/* RX */
- struct page *page;
+ /* arrays of page information for packet split */
+ struct e1000_ps_page *ps_pages;
};
};
@@ -142,8 +143,6 @@ struct e1000_ring {
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
- /* arrays of page information for packet split */
- struct e1000_ps_page *ps_pages;
struct sk_buff *rx_skb_top;
struct e1000_queue_stats stats;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 033e124d1c1..4fd2e23720b 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -245,37 +245,36 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
- ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
- + j];
- if (j < adapter->rx_ps_pages) {
+ ps_page = &buffer_info->ps_pages[j];
+ if (j >= adapter->rx_ps_pages) {
+ /* all unused desc entries get hw null ptr */
+ rx_desc->read.buffer_addr[j+1] = ~0;
+ continue;
+ }
+ if (!ps_page->page) {
+ ps_page->page = alloc_page(GFP_ATOMIC);
if (!ps_page->page) {
- ps_page->page = alloc_page(GFP_ATOMIC);
- if (!ps_page->page) {
- adapter->alloc_rx_buff_failed++;
- goto no_buffers;
- }
- ps_page->dma = pci_map_page(pdev,
- ps_page->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(
- ps_page->dma)) {
- dev_err(&adapter->pdev->dev,
- "RX DMA page map failed\n");
- adapter->rx_dma_failed++;
- goto no_buffers;
- }
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ ps_page->dma = pci_map_page(pdev,
+ ps_page->page,
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(ps_page->dma)) {
+ dev_err(&adapter->pdev->dev,
+ "RX DMA page map failed\n");
+ adapter->rx_dma_failed++;
+ goto no_buffers;
}
- /*
- * Refresh the desc even if buffer_addrs
- * didn't change because each write-back
- * erases this info.
- */
- rx_desc->read.buffer_addr[j+1] =
- cpu_to_le64(ps_page->dma);
- } else {
- rx_desc->read.buffer_addr[j+1] = ~0;
}
+ /*
+ * Refresh the desc even if buffer_addrs
+ * didn't change because each write-back
+ * erases this info.
+ */
+ rx_desc->read.buffer_addr[j+1] =
+ cpu_to_le64(ps_page->dma);
}
skb = netdev_alloc_skb(netdev,
@@ -334,94 +333,6 @@ no_buffers:
}
/**
- * e1000_alloc_rx_buffers_jumbo - Replace used jumbo receive buffers
- *
- * @adapter: address of board private structure
- * @cleaned_count: number of buffers to allocate this pass
- **/
-static void e1000_alloc_rx_buffers_jumbo(struct e1000_adapter *adapter,
- int cleaned_count)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_rx_desc *rx_desc;
- struct e1000_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = 256 -
- 16 /*for skb_reserve */ -
- NET_IP_ALIGN;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
-
- while (cleaned_count--) {
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto check_page;
- }
-
- skb = netdev_alloc_skb(netdev, bufsz);
- if (!skb) {
- /* Better luck next round */
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- /* Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
- buffer_info->skb = skb;
-check_page:
- /* allocate a new page if necessary */
- if (!buffer_info->page) {
- buffer_info->page = alloc_page(GFP_ATOMIC);
- if (!buffer_info->page) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
- }
-
- if (!buffer_info->dma)
- buffer_info->dma = pci_map_page(pdev,
- buffer_info->page, 0,
- PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(buffer_info->dma)) {
- dev_err(&adapter->pdev->dev, "RX DMA page map failed\n");
- adapter->rx_dma_failed++;
- break;
- }
-
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
- if (rx_ring->next_to_use != i) {
- rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
- writel(i, adapter->hw.hw_addr + rx_ring->tail);
- }
-}
-
-/**
* e1000_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
*
@@ -495,10 +406,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
goto next_desc;
}
- /* adjust length to remove Ethernet CRC */
- length -= 4;
-
- /* probably a little skewed due to removing CRC */
total_rx_bytes += length;
total_rx_packets++;
@@ -554,15 +461,6 @@ next_desc:
return cleaned;
}
-static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
-{
- bi->page = NULL;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += length;
-}
-
static void e1000_put_txbuf(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info)
{
@@ -699,174 +597,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
}
/**
- * e1000_clean_rx_irq_jumbo - Send received data up the network stack; legacy
- * @adapter: board private structure
- *
- * the return value indicates whether actual cleaning was done, there
- * is no guarantee that everything was cleaned
- **/
-static bool e1000_clean_rx_irq_jumbo(struct e1000_adapter *adapter,
- int *work_done, int work_to_do)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct e1000_ring *rx_ring = adapter->rx_ring;
- struct e1000_rx_desc *rx_desc, *next_rxd;
- struct e1000_buffer *buffer_info, *next_buffer;
- u32 length;
- unsigned int i;
- int cleaned_count = 0;
- bool cleaned = 0;
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
-
- i = rx_ring->next_to_clean;
- rx_desc = E1000_RX_DESC(*rx_ring, i);
- buffer_info = &rx_ring->buffer_info[i];
-
- while (rx_desc->status & E1000_RXD_STAT_DD) {
- struct sk_buff *skb;
- u8 status;
-
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
-
- status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
-
- i++;
- if (i == rx_ring->count)
- i = 0;
- next_rxd = E1000_RX_DESC(*rx_ring, i);
- prefetch(next_rxd);
-
- next_buffer = &rx_ring->buffer_info[i];
-
- cleaned = 1;
- cleaned_count++;
- pci_unmap_page(pdev,
- buffer_info->dma,
- PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
- buffer_info->dma = 0;
-
- length = le16_to_cpu(rx_desc->length);
-
- /* errors is only valid for DD + EOP descriptors */
- if ((status & E1000_RXD_STAT_EOP) &&
- (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
- /* recycle both page and skb */
- buffer_info->skb = skb;
- /* an error means any chain goes out the window too */
- if (rx_ring->rx_skb_top)
- dev_kfree_skb(rx_ring->rx_skb_top);
- rx_ring->rx_skb_top = NULL;
- goto next_desc;
- }
-
-#define rxtop rx_ring->rx_skb_top
- if (!(status & E1000_RXD_STAT_EOP)) {
- /* this descriptor is only the beginning (or middle) */
- if (!rxtop) {
- /* this is the beginning of a chain */
- rxtop = skb;
- skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
- } else {
- /* this is the middle of a chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0,
- length);
- /* re-use the skb, only consumed the page */
- buffer_info->skb = skb;
- }
- e1000_consume_page(buffer_info, rxtop, length);
- goto next_desc;
- } else {
- if (rxtop) {
- /* end of the chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
- /* re-use the current skb, we only consumed the
- * page */
- buffer_info->skb = skb;
- skb = rxtop;
- rxtop = NULL;
- e1000_consume_page(buffer_info, skb, length);
- } else {
- /* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
- if (length <= copybreak &&
- skb_tailroom(skb) >= length) {
- u8 *vaddr;
- vaddr = kmap_atomic(buffer_info->page,
- KM_SKB_DATA_SOFTIRQ);
- memcpy(skb_tail_pointer(skb),
- vaddr, length);
- kunmap_atomic(vaddr,
- KM_SKB_DATA_SOFTIRQ);
- /* re-use the page, so don't erase
- * buffer_info->page */
- skb_put(skb, length);
- } else {
- skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
- e1000_consume_page(buffer_info, skb,
- length);
- }
- }
- }
-
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
-
- pskb_trim(skb, skb->len - 4);
-
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
-
- /* eth type trans needs skb->data to point to something */
- if (!pskb_may_pull(skb, ETH_HLEN)) {
- ndev_err(netdev, "__pskb_pull_tail failed.\n");
- dev_kfree_skb(skb);
- goto next_desc;
- }
-
- e1000_receive_skb(adapter, netdev, skb,status,rx_desc->special);
-
-next_desc:
- rx_desc->status = 0;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
- adapter->alloc_rx_buf(adapter, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
- }
- rx_ring->next_to_clean = i;
-
- cleaned_count = e1000_desc_unused(rx_ring);
- if (cleaned_count)
- adapter->alloc_rx_buf(adapter, cleaned_count);
-
- adapter->total_rx_packets += total_rx_packets;
- adapter->total_rx_bytes += total_rx_bytes;
- return cleaned;
-}
-
-/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
* @adapter: board private structure
*
@@ -953,7 +683,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
((length + l1) <= adapter->rx_ps_bsize0)) {
u8 *vaddr;
- ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS];
+ ps_page = &buffer_info->ps_pages[0];
/* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
@@ -965,8 +695,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
pci_dma_sync_single_for_device(pdev, ps_page->dma,
PAGE_SIZE, PCI_DMA_FROMDEVICE);
- /* remove the CRC */
- l1 -= 4;
+
skb_put(skb, l1);
goto copydone;
} /* if */
@@ -977,7 +706,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
if (!length)
break;
- ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j];
+ ps_page = &buffer_info->ps_pages[j];
pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
ps_page->dma = 0;
@@ -988,10 +717,6 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
skb->truesize += length;
}
- /* strip the ethernet crc, problem is we're using pages now so
- * this whole operation can get a little cpu intensive */
- pskb_trim(skb, skb->len - 4);
-
copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -1043,7 +768,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
struct e1000_buffer *buffer_info;
struct e1000_ps_page *ps_page;
struct pci_dev *pdev = adapter->pdev;
- unsigned long size;
unsigned int i, j;
/* Free all the Rx ring sk_buffs */
@@ -1054,9 +778,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
- else if (adapter->clean_rx == e1000_clean_rx_irq_jumbo)
- pci_unmap_page(pdev, buffer_info->dma,
- PAGE_SIZE, PCI_DMA_FROMDEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
pci_unmap_single(pdev, buffer_info->dma,
adapter->rx_ps_bsize0,
@@ -1064,19 +785,13 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
buffer_info->dma = 0;
}
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- }
-
if (buffer_info->skb) {
dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL;
}
for (j = 0; j < PS_PAGE_BUFFERS; j++) {
- ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
- + j];
+ ps_page = &buffer_info->ps_pages[j];
if (!ps_page->page)
break;
pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
@@ -1093,12 +808,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
rx_ring->rx_skb_top = NULL;
}
- size = sizeof(struct e1000_buffer) * rx_ring->count;
- memset(rx_ring->buffer_info, 0, size);
- size = sizeof(struct e1000_ps_page)
- * (rx_ring->count * PS_PAGE_BUFFERS);
- memset(rx_ring->ps_pages, 0, size);
-
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
@@ -1421,7 +1130,8 @@ err:
int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
{
struct e1000_ring *rx_ring = adapter->rx_ring;
- int size, desc_len, err = -ENOMEM;
+ struct e1000_buffer *buffer_info;
+ int i, size, desc_len, err = -ENOMEM;
size = sizeof(struct e1000_buffer) * rx_ring->count;
rx_ring->buffer_info = vmalloc(size);
@@ -1429,11 +1139,14 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
goto err;
memset(rx_ring->buffer_info, 0, size);
- rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS,
- sizeof(struct e1000_ps_page),
- GFP_KERNEL);
- if (!rx_ring->ps_pages)
- goto err;
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
+ sizeof(struct e1000_ps_page),
+ GFP_KERNEL);
+ if (!buffer_info->ps_pages)
+ goto err_pages;
+ }
desc_len = sizeof(union e1000_rx_desc_packet_split);
@@ -1443,16 +1156,21 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
err = e1000_alloc_ring_dma(adapter, rx_ring);
if (err)
- goto err;
+ goto err_pages;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->rx_skb_top = NULL;
return 0;
+
+err_pages:
+ for (i = 0; i < rx_ring->count; i++) {
+ buffer_info = &rx_ring->buffer_info[i];
+ kfree(buffer_info->ps_pages);
+ }
err:
vfree(rx_ring->buffer_info);
- kfree(rx_ring->ps_pages);
ndev_err(adapter->netdev,
"Unable to allocate memory for the transmit descriptor ring\n");
return err;
@@ -1518,15 +1236,17 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct e1000_ring *rx_ring = adapter->rx_ring;
+ int i;
e1000_clean_rx_ring(adapter);
+ for (i = 0; i < rx_ring->count; i++) {
+ kfree(rx_ring->buffer_info[i].ps_pages);
+ }
+
vfree(rx_ring->buffer_info);
rx_ring->buffer_info = NULL;
- kfree(rx_ring->ps_pages);
- rx_ring->ps_pages = NULL;
-
dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
rx_ring->dma);
rx_ring->desc = NULL;
@@ -2032,9 +1752,11 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ew32(RFCTL, rfctl);
- /* disable the stripping of CRC because it breaks
- * BMC firmware connected over SMBUS */
- rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */;
+ /* Enable Packet split descriptors */
+ rctl |= E1000_RCTL_DTYP_PS;
+
+ /* Enable hardware CRC frame stripping */
+ rctl |= E1000_RCTL_SECRC;
psrctl |= adapter->rx_ps_bsize0 >>
E1000_PSRCTL_BSIZE0_SHIFT;
@@ -2077,11 +1799,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
sizeof(union e1000_rx_desc_packet_split);
adapter->clean_rx = e1000_clean_rx_irq_ps;
adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
- } else if (adapter->netdev->mtu > ETH_FRAME_LEN + VLAN_HLEN + 4) {
- rdlen = rx_ring->count *
- sizeof(struct e1000_rx_desc);
- adapter->clean_rx = e1000_clean_rx_irq_jumbo;
- adapter->alloc_rx_buf = e1000_alloc_rx_buffers_jumbo;
} else {
rdlen = rx_ring->count *
sizeof(struct e1000_rx_desc);
@@ -2326,8 +2043,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_hw *hw = &adapter->hw;
u32 tx_space, min_tx_space, min_rx_space;
+ u32 pba;
u16 hwm;
+ ew32(PBA, adapter->pba);
+
if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) {
/* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets,
@@ -2335,11 +2055,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
* expressed in KB. */
- adapter->pba = er32(PBA);
+ pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
- tx_space = adapter->pba >> 16;
+ tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
- adapter->pba &= 0xffff;
+ pba &= 0xffff;
/* the tx fifo also stores 16 bytes of information about the tx
* but don't include ethernet FCS because hardware appends it */
min_tx_space = (mac->max_frame_size +
@@ -2355,20 +2075,21 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
* allocation, take space away from current Rx allocation */
- if (tx_space < min_tx_space &&
- ((min_tx_space - tx_space) < adapter->pba)) {
- adapter->pba -= - (min_tx_space - tx_space);
+ if ((tx_space < min_tx_space) &&
+ ((min_tx_space - tx_space) < pba)) {
+ pba -= min_tx_space - tx_space;
/* if short on rx space, rx wins and must trump tx
* adjustment or use Early Receive if available */
- if ((adapter->pba < min_rx_space) &&
+ if ((pba < min_rx_space) &&
(!(adapter->flags & FLAG_HAS_ERT)))
/* ERT enabled in e1000_configure_rx */
- adapter->pba = min_rx_space;
+ pba = min_rx_space;
}
+
+ ew32(PBA, pba);
}
- ew32(PBA, adapter->pba);
/* flow control settings */
/* The high water mark must be low enough to fit one full frame
@@ -3624,9 +3345,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
- * i.e. RXBUFFER_2048 --> size-4096 slab
- * however with the new *_jumbo* routines, jumbo receives will use
- * fragmented skbs */
+ * i.e. RXBUFFER_2048 --> size-4096 slab */
if (max_frame <= 256)
adapter->rx_buffer_len = 256;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index b557bb44a36..f78e5bf7cb3 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0078"
+#define DRV_VERSION "EHEA_0080"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 2809c99906e..f0319f1e8e0 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -33,6 +33,9 @@
#include <linux/if.h>
#include <linux/list.h>
#include <linux/if_ether.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+
#include <net/ip.h>
#include "ehea.h"
@@ -2329,7 +2332,7 @@ static void port_napi_disable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
napi_disable(&port->port_res[i].napi);
}
@@ -2337,7 +2340,7 @@ static void port_napi_enable(struct ehea_port *port)
{
int i;
- for (i = 0; i < port->num_def_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
napi_enable(&port->port_res[i].napi);
}
@@ -2373,8 +2376,6 @@ static int ehea_down(struct net_device *dev)
ehea_drop_multicast_list(dev);
ehea_free_interrupts(dev);
- port_napi_disable(port);
-
port->state = EHEA_PORT_DOWN;
ret = ehea_clean_all_portres(port);
@@ -2396,6 +2397,7 @@ static int ehea_stop(struct net_device *dev)
flush_scheduled_work();
down(&port->port_lock);
netif_stop_queue(dev);
+ port_napi_disable(port);
ret = ehea_down(dev);
up(&port->port_lock);
return ret;
@@ -3296,6 +3298,20 @@ static int __devexit ehea_remove(struct of_device *dev)
return 0;
}
+static int ehea_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ if (action == SYS_RESTART) {
+ ehea_info("Reboot: freeing all eHEA resources");
+ ibmebus_unregister_driver(&ehea_driver);
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ehea_reboot_nb = {
+ .notifier_call = ehea_reboot_notifier,
+};
+
static int check_module_parm(void)
{
int ret = 0;
@@ -3352,6 +3368,8 @@ int __init ehea_module_init(void)
if (ret)
goto out;
+ register_reboot_notifier(&ehea_reboot_nb);
+
ret = ibmebus_register_driver(&ehea_driver);
if (ret) {
ehea_error("failed registering eHEA device driver on ebus");
@@ -3363,6 +3381,7 @@ int __init ehea_module_init(void)
if (ret) {
ehea_error("failed to register capabilities attribute, ret=%d",
ret);
+ unregister_reboot_notifier(&ehea_reboot_nb);
ibmebus_unregister_driver(&ehea_driver);
goto out;
}
@@ -3376,6 +3395,7 @@ static void __exit ehea_module_exit(void)
flush_scheduled_work();
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
+ unregister_reboot_notifier(&ehea_reboot_nb);
ehea_destroy_busmap();
}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 70ddf1acfd8..92ce2e38f0d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5597,6 +5597,22 @@ static struct pci_device_id pci_tbl[] = {
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
+ { /* MCP77 Ethernet Controller */
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ },
{0,},
};
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 68887235d7e..dbd23bb65d1 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -55,6 +55,26 @@ MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver "
DrvVer);
MODULE_LICENSE("GPL");
+//variable record -- index by leading revision/length
+//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
+static unsigned short DefaultPhyParam[] = {
+ // 11/12/03 IP1000A v1-3 rev=0x40
+ /*--------------------------------------------------------------------------
+ (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
+ 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
+ 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
+ --------------------------------------------------------------------------*/
+ // 12/17/03 IP1000A v1-4 rev=0x40
+ (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
+ 0x0000,
+ 30, 0x005e, 9, 0x0700,
+ // 01/09/04 IP1000A v1-5 rev=0x41
+ (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
+ 0x0000,
+ 30, 0x005e, 9, 0x0700,
+ 0x0000
+};
+
static const char *ipg_brand_name[] = {
"IC PLUS IP1000 1000/100/10 based NIC",
"Sundance Technology ST2021 based NIC",
@@ -990,7 +1010,7 @@ static void ipg_nic_txcleanup(struct net_device *dev)
}
/* Provides statistical information about the IPG NIC. */
-struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
+static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
{
struct ipg_nic_private *sp = netdev_priv(dev);
void __iomem *ioaddr = sp->ioaddr;
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index e418b9035ca..d5d092c9d0a 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -833,24 +833,4 @@ struct ipg_nic_private {
struct delayed_work task;
};
-//variable record -- index by leading revision/length
-//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
-unsigned short DefaultPhyParam[] = {
- // 11/12/03 IP1000A v1-3 rev=0x40
- /*--------------------------------------------------------------------------
- (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
- 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
- 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
- --------------------------------------------------------------------------*/
- // 12/17/03 IP1000A v1-4 rev=0x40
- (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
- 0x0000,
- 30, 0x005e, 9, 0x0700,
- // 01/09/04 IP1000A v1-5 rev=0x41
- (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
- 0x0000,
- 30, 0x005e, 9, 0x0700,
- 0x0000
-};
-
#endif /* __LINUX_IPG_H */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 4dbdfaaf37b..a1e4508717c 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -627,19 +627,16 @@ static int au1k_irda_rx(struct net_device *dev)
}
-void au1k_irda_interrupt(int irq, void *dev_id)
+static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
-
- if (dev == NULL) {
- printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
- return;
- }
+ struct net_device *dev = dev_id;
writel(0, IR_INT_CLEAR); /* ack irda interrupts */
au1k_irda_rx(dev);
au1k_tx_ack(dev);
+
+ return IRQ_HANDLED;
}
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 662b8d16803..45f30a2974b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -284,7 +284,7 @@ static __net_exit void loopback_net_exit(struct net *net)
unregister_netdev(dev);
}
-static struct pernet_operations __net_initdata loopback_net_ops = {
+static struct pernet_operations loopback_net_ops = {
.init = loopback_net_init,
.exit = loopback_net_exit,
};
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index 887633b207d..2a5bef6388f 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -101,9 +101,7 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_ma
if (!page)
return -ENOMEM;
- sg_set_page(mem, page);
- mem->length = PAGE_SIZE << order;
- mem->offset = 0;
+ sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
}
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 953117152bb..87cde062fd6 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -864,6 +864,7 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
np = netdev_priv(dev);
netif_napi_add(dev, &np->napi, natsemi_poll, 64);
+ np->dev = dev;
np->pci_dev = pdev;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index bcb0885011c..b35d7944950 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -68,7 +68,7 @@ MODULE_VERSION("1.0.2");
static unsigned int
setup_sg(struct scatterlist *sg, const void *address, unsigned int length)
{
- sg_init_one(sg, address, length);
+ sg_set_buf(sg, address, length);
return length;
}
@@ -140,6 +140,8 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
struct scatterlist sg[4];
unsigned int nbytes;
+ sg_init_table(sg, 4);
+
nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
sizeof(sha_pad->sha_pad1));
@@ -166,6 +168,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
if (!initial_key) {
crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
state->keylen);
+ sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 1);
setup_sg(sg_in, state->sha1_digest, state->keylen);
setup_sg(sg_out, state->session_key, state->keylen);
if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
@@ -421,6 +425,8 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
isize -= 2;
/* Encrypt packet */
+ sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 1);
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
@@ -608,6 +614,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
* Decrypt the first byte in order to check if it is
* a compressed or uncompressed protocol field.
*/
+ sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 1);
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 19152f54ef2..b822859c8de 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -79,12 +79,10 @@ static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen
*/
/*
- * These are checked at init time to see if they are at least 256KB
- * and increased to 256KB if they are not. This is done to avoid ending
- * up with socket buffers smaller than the MTU size,
+ * sysctl_[wr]mem_max are checked at init time to see if they are at
+ * least 256KB and increased to 256KB if they are not. This is done to
+ * avoid ending up with socket buffers smaller than the MTU size,
*/
-extern __u32 sysctl_wmem_max;
-extern __u32 sysctl_rmem_max;
static int __devinit rr_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index cd991a0f75b..1ebe3259be0 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -512,11 +512,19 @@ static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
}
tmp = le32_to_cpu(u.init_c->max_transfer_size);
if (tmp < dev->hard_mtu) {
- dev_err(&intf->dev,
- "dev can't take %u byte packets (max %u)\n",
- dev->hard_mtu, tmp);
- retval = -EINVAL;
- goto fail_and_release;
+ if (tmp <= net->hard_header_len) {
+ dev_err(&intf->dev,
+ "dev can't take %u byte packets (max %u)\n",
+ dev->hard_mtu, tmp);
+ retval = -EINVAL;
+ goto fail_and_release;
+ }
+ dev->hard_mtu = tmp;
+ net->mtu = dev->hard_mtu - net->hard_header_len;
+ dev_warn(&intf->dev,
+ "dev can't take %u byte packets (max %u), "
+ "adjusting MTU to %u\n",
+ dev->hard_mtu, tmp, net->mtu);
}
/* REVISIT: peripheral "alignment" request is ignored ... */