diff options
Diffstat (limited to 'drivers/net')
72 files changed, 771 insertions, 752 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 6deb20fc7a0..a8c0f436cdd 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -1555,6 +1555,7 @@ vortex_up(struct net_device *dev) mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); + vp->mii.full_duplex = vp->full_duplex; vortex_check_media(dev, 1); } @@ -2886,7 +2887,6 @@ static const struct ethtool_ops vortex_ethtool_ops = { .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, - .get_perm_addr = ethtool_op_get_perm_addr, }; #ifdef CONFIG_PCI diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index e970e64bf96..a79f28c7a10 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -1578,7 +1578,6 @@ static const struct ethtool_ops cp_ethtool_ops = { .set_wol = cp_set_wol, .get_strings = cp_get_strings, .get_ethtool_stats = cp_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, .get_eeprom_len = cp_get_eeprom_len, .get_eeprom = cp_get_eeprom, .set_eeprom = cp_set_eeprom, diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index 327eaa7b499..f4e4298d24b 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -2452,7 +2452,6 @@ static const struct ethtool_ops rtl8139_ethtool_ops = { .get_strings = rtl8139_get_strings, .get_stats_count = rtl8139_get_stats_count, .get_ethtool_stats = rtl8139_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f8a602caabc..81ef81c9a58 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2851,7 +2851,7 @@ config PPPOATM config PPPOL2TP tristate "PPP over L2TP (EXPERIMENTAL)" - depends on EXPERIMENTAL && PPP + depends on EXPERIMENTAL && PPP && INET help Support for PPP-over-L2TP socket family. L2TP is a protocol used by ISPs and enterprises to tunnel PPP traffic over UDP diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 94b78cc5fe8..e684212fd8e 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -177,7 +177,7 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o -obj-$(CONFIG_LGUEST_GUEST) += lguest_net.o +obj-$(CONFIG_LGUEST_NET) += lguest_net.o obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c index 56f6389a300..3c1984ecf36 100644 --- a/drivers/net/atl1/atl1_main.c +++ b/drivers/net/atl1/atl1_main.c @@ -1704,10 +1704,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } } - local_irq_save(flags); - if (!spin_trylock(&adapter->lock)) { + if (!spin_trylock_irqsave(&adapter->lock, flags)) { /* Can't get lock - tell upper layer to requeue */ - local_irq_restore(flags); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); return NETDEV_TX_LOCKED; } diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c index 1d882360b34..90e0734e603 100644 --- a/drivers/net/ax88796.c +++ b/drivers/net/ax88796.c @@ -580,7 +580,6 @@ static const struct ethtool_ops ax_ethtool_ops = { .set_settings = ax_set_settings, .nway_reset = ax_nway_reset, .get_link = ax_get_link, - .get_perm_addr = ethtool_op_get_perm_addr, }; /* setup code */ @@ -819,11 +818,12 @@ static int ax_probe(struct platform_device *pdev) } ei_status.mem = ioremap(res->start, size); - dev->base_addr = (long)ei_status.mem; + dev->base_addr = (unsigned long)ei_status.mem; if (ei_status.mem == NULL) { - dev_err(&pdev->dev, "Cannot ioremap area (%08zx,%08zx)\n", - res->start, res->end); + dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n", + (unsigned long long)res->start, + (unsigned long long)res->end); ret = -ENXIO; goto exit_req; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 37f1b6ff5c1..0795df23549 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -2033,7 +2033,6 @@ static const struct ethtool_ops b44_ethtool_ops = { .get_strings = b44_get_strings, .get_stats_count = b44_get_stats_count, .get_ethtool_stats = b44_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 9a08d656f1c..2bb97d46468 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -798,6 +798,7 @@ static void bf537mac_shutdown(struct net_device *dev) */ static int bf537mac_open(struct net_device *dev) { + int retval; pr_debug("%s: %s\n", dev->name, __FUNCTION__); /* @@ -811,7 +812,10 @@ static int bf537mac_open(struct net_device *dev) } /* initial rx and tx list */ - desc_list_init(); + retval = desc_list_init(); + + if (retval) + return retval; bf537mac_setphy(dev); setup_system_regs(dev); diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index a729da061bb..24e7f9ab3f5 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -54,8 +54,8 @@ #define DRV_MODULE_NAME "bnx2" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "1.6.3" -#define DRV_MODULE_RELDATE "July 16, 2007" +#define DRV_MODULE_VERSION "1.6.4" +#define DRV_MODULE_RELDATE "August 3, 2007" #define RUN_AT(x) (jiffies + (x)) @@ -6269,7 +6269,6 @@ static const struct ethtool_ops bnx2_ethtool_ops = { .phys_id = bnx2_phys_id, .get_stats_count = bnx2_get_stats_count, .get_ethtool_stats = bnx2_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; /* Called with rtnl_lock */ @@ -6938,6 +6937,11 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) struct bnx2 *bp = netdev_priv(dev); u32 reset_code; + /* PCI register 4 needs to be saved whether netif_running() or not. + * MSI address and data need to be saved if using MSI and + * netif_running(). + */ + pci_save_state(pdev); if (!netif_running(dev)) return 0; @@ -6953,7 +6957,6 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; bnx2_reset_chip(bp, reset_code); bnx2_free_skbs(bp); - pci_save_state(pdev); bnx2_set_power_state(bp, pci_choose_state(pdev, state)); return 0; } @@ -6964,10 +6967,10 @@ bnx2_resume(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnx2 *bp = netdev_priv(dev); + pci_restore_state(pdev); if (!netif_running(dev)) return 0; - pci_restore_state(pdev); bnx2_set_power_state(bp, PCI_D0); netif_device_attach(dev); bnx2_init_nic(bp); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index cb9cb3013f4..1afda3230de 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -613,38 +613,20 @@ down: static int bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; - static int (* ioctl)(struct net_device *, struct ifreq *, int); - struct ifreq ifr; struct ethtool_cmd etool; + int res; /* Fake speed and duplex */ slave->speed = SPEED_100; slave->duplex = DUPLEX_FULL; - if (slave_dev->ethtool_ops) { - int res; - - if (!slave_dev->ethtool_ops->get_settings) { - return -1; - } - - res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); - if (res < 0) { - return -1; - } - - goto verify; - } + if (!slave_dev->ethtool_ops || !slave_dev->ethtool_ops->get_settings) + return -1; - ioctl = slave_dev->do_ioctl; - strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ); - etool.cmd = ETHTOOL_GSET; - ifr.ifr_data = (char*)&etool; - if (!ioctl || (IOCTL(slave_dev, &ifr, SIOCETHTOOL) < 0)) { + res = slave_dev->ethtool_ops->get_settings(slave_dev, &etool); + if (res < 0) return -1; - } -verify: switch (etool.speed) { case SPEED_10: case SPEED_100: @@ -690,7 +672,6 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de static int (* ioctl)(struct net_device *, struct ifreq *, int); struct ifreq ifr; struct mii_ioctl_data *mii; - struct ethtool_value etool; if (bond->params.use_carrier) { return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0; @@ -721,9 +702,10 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de } } - /* try SIOCETHTOOL ioctl, some drivers cache ETHTOOL_GLINK */ - /* for a period of time so we attempt to get link status */ - /* from it last if the above MII ioctls fail... */ + /* + * Some drivers cache ETHTOOL_GLINK for a period of time so we only + * attempt to get link status from it if the above MII ioctls fail. + */ if (slave_dev->ethtool_ops) { if (slave_dev->ethtool_ops->get_link) { u32 link; @@ -734,23 +716,9 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_de } } - if (ioctl) { - strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ); - etool.cmd = ETHTOOL_GLINK; - ifr.ifr_data = (char*)&etool; - if (IOCTL(slave_dev, &ifr, SIOCETHTOOL) == 0) { - if (etool.data == 1) { - return BMSR_LSTATUS; - } else { - dprintk("SIOCETHTOOL shows link down\n"); - return 0; - } - } - } - /* * If reporting, report that either there's no dev->do_ioctl, - * or both SIOCGMIIREG and SIOCETHTOOL failed (meaning that we + * or both SIOCGMIIREG and get_link failed (meaning that we * cannot report link status). If not reporting, pretend * we're ok. */ @@ -1234,43 +1202,35 @@ static int bond_sethwaddr(struct net_device *bond_dev, return 0; } -#define BOND_INTERSECT_FEATURES \ - (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_TSO | NETIF_F_UFO) +#define BOND_VLAN_FEATURES \ + (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \ + NETIF_F_HW_VLAN_FILTER) /* * Compute the common dev->feature set available to all slaves. Some - * feature bits are managed elsewhere, so preserve feature bits set on - * master device that are not part of the examined set. + * feature bits are managed elsewhere, so preserve those feature bits + * on the master device. */ static int bond_compute_features(struct bonding *bond) { - unsigned long features = BOND_INTERSECT_FEATURES; struct slave *slave; struct net_device *bond_dev = bond->dev; + unsigned long features = bond_dev->features; unsigned short max_hard_header_len = ETH_HLEN; int i; + features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); + features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | + NETIF_F_GSO_MASK | NETIF_F_NO_CSUM; + bond_for_each_slave(bond, slave, i) { - features &= (slave->dev->features & BOND_INTERSECT_FEATURES); + features = netdev_compute_features(features, + slave->dev->features); if (slave->dev->hard_header_len > max_hard_header_len) max_hard_header_len = slave->dev->hard_header_len; } - if ((features & NETIF_F_SG) && - !(features & NETIF_F_ALL_CSUM)) - features &= ~NETIF_F_SG; - - /* - * features will include NETIF_F_TSO (NETIF_F_UFO) iff all - * slave devices support NETIF_F_TSO (NETIF_F_UFO), which - * implies that all slaves also support scatter-gather - * (NETIF_F_SG), which implies that features also includes - * NETIF_F_SG. So no need to check whether we have an - * illegal combination of NETIF_F_{TSO,UFO} and - * !NETIF_F_SG - */ - - features |= (bond_dev->features & ~BOND_INTERSECT_FEATURES); + features |= (bond_dev->features & BOND_VLAN_FEATURES); bond_dev->features = features; bond_dev->hard_header_len = max_hard_header_len; diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 6fd1e524183..dc5d26988bb 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -1583,7 +1583,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_wol = get_wol, .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, - .get_perm_addr = ethtool_op_get_perm_addr }; static int in_range(int val, int lo, int hi) diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index ebcf35e4cf5..e620ed4c3ff 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c @@ -699,7 +699,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb) * the buffer. */ static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len, - int gfp) + gfp_t gfp) { if (likely(!skb_cloned(skb))) { BUG_ON(skb->len < len); diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 6b6401e9304..280313b9b06 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -2506,7 +2506,6 @@ static const struct ethtool_ops e100_ethtool_ops = { .phys_id = e100_phys_id, .get_stats_count = e100_get_stats_count, .get_ethtool_stats = e100_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c index bb08375b5f1..4c3785c9d4b 100644 --- a/drivers/net/e1000/e1000_ethtool.c +++ b/drivers/net/e1000/e1000_ethtool.c @@ -1706,6 +1706,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol case E1000_DEV_ID_82545EM_COPPER: case E1000_DEV_ID_82546GB_QUAD_COPPER: case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82571EB_SERDES_QUAD: /* these don't support WoL at all */ wol->supported = 0; break; @@ -1723,6 +1724,7 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wol retval = 0; break; case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: /* quad port adapters only support WoL on port A */ @@ -1973,7 +1975,6 @@ static const struct ethtool_ops e1000_ethtool_ops = { .phys_id = e1000_phys_id, .get_stats_count = e1000_get_stats_count, .get_ethtool_stats = e1000_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; void e1000_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c index 9be44699300..ba120f7fb0b 100644 --- a/drivers/net/e1000/e1000_hw.c +++ b/drivers/net/e1000/e1000_hw.c @@ -384,7 +384,10 @@ e1000_set_mac_type(struct e1000_hw *hw) case E1000_DEV_ID_82571EB_COPPER: case E1000_DEV_ID_82571EB_FIBER: case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: hw->mac_type = e1000_82571; break; @@ -485,6 +488,8 @@ e1000_set_media_type(struct e1000_hw *hw) case E1000_DEV_ID_82545GM_SERDES: case E1000_DEV_ID_82546GB_SERDES: case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: case E1000_DEV_ID_82572EI_SERDES: case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: hw->media_type = e1000_media_type_internal_serdes; diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h index bd000b802ee..fe8714655c9 100644 --- a/drivers/net/e1000/e1000_hw.h +++ b/drivers/net/e1000/e1000_hw.h @@ -475,7 +475,10 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); #define E1000_DEV_ID_82571EB_FIBER 0x105F #define E1000_DEV_ID_82571EB_SERDES 0x1060 #define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 #define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA #define E1000_DEV_ID_82572EI_COPPER 0x107D #define E1000_DEV_ID_82572EI_FIBER 0x107E #define E1000_DEV_ID_82572EI_SERDES 0x107F diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f48b659e0c2..4a225950fb4 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -100,6 +100,7 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x1099), INTEL_E1000_ETHERNET_DEVICE(0x109A), INTEL_E1000_ETHERNET_DEVICE(0x10A4), + INTEL_E1000_ETHERNET_DEVICE(0x10A5), INTEL_E1000_ETHERNET_DEVICE(0x10B5), INTEL_E1000_ETHERNET_DEVICE(0x10B9), INTEL_E1000_ETHERNET_DEVICE(0x10BA), @@ -107,6 +108,8 @@ static struct pci_device_id e1000_pci_tbl[] = { INTEL_E1000_ETHERNET_DEVICE(0x10BC), INTEL_E1000_ETHERNET_DEVICE(0x10C4), INTEL_E1000_ETHERNET_DEVICE(0x10C5), + INTEL_E1000_ETHERNET_DEVICE(0x10D9), + INTEL_E1000_ETHERNET_DEVICE(0x10DA), /* required last entry */ {0,} }; @@ -1096,6 +1099,7 @@ e1000_probe(struct pci_dev *pdev, break; case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 8ee2c2c86b4..d67f97bfa3a 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -39,7 +39,7 @@ #include <asm/io.h> #define DRV_NAME "ehea" -#define DRV_VERSION "EHEA_0072" +#define DRV_VERSION "EHEA_0073" /* eHEA capability flags */ #define DLPAR_PORT_ADD_REM 1 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 58702f54c3f..9756211e83c 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1326,7 +1326,6 @@ static void write_swqe2_TSO(struct sk_buff *skb, u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; int skb_data_size = skb->len - skb->data_len; int headersize; - u64 tmp_addr; /* Packet is TCP with TSO enabled */ swqe->tx_control |= EHEA_SWQE_TSO; @@ -1347,9 +1346,8 @@ static void write_swqe2_TSO(struct sk_buff *skb, /* set sg1entry data */ sg1entry->l_key = lkey; sg1entry->len = skb_data_size - headersize; - - tmp_addr = (u64)(skb->data + headersize); - sg1entry->vaddr = ehea_map_vaddr(tmp_addr); + sg1entry->vaddr = + ehea_map_vaddr(skb->data + headersize); swqe->descriptors++; } } else @@ -1362,7 +1360,6 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, int skb_data_size = skb->len - skb->data_len; u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; - u64 tmp_addr; /* Packet is any nonTSO type * @@ -1379,8 +1376,8 @@ static void write_swqe2_nonTSO(struct sk_buff *skb, /* copy sg1entry data */ sg1entry->l_key = lkey; sg1entry->len = skb_data_size - SWQE2_MAX_IMM; - tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); - sg1entry->vaddr = ehea_map_vaddr(tmp_addr); + sg1entry->vaddr = + ehea_map_vaddr(skb->data + SWQE2_MAX_IMM); swqe->descriptors++; } } else { @@ -1395,7 +1392,6 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, struct ehea_vsgentry *sg_list, *sg1entry, *sgentry; skb_frag_t *frag; int nfrags, sg1entry_contains_frag_data, i; - u64 tmp_addr; nfrags = skb_shinfo(skb)->nr_frags; sg1entry = &swqe->u.immdata_desc.sg_entry; @@ -1417,9 +1413,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, /* copy sg1entry data */ sg1entry->l_key = lkey; sg1entry->len = frag->size; - tmp_addr = (u64)(page_address(frag->page) - + frag->page_offset); - sg1entry->vaddr = ehea_map_vaddr(tmp_addr); + sg1entry->vaddr = + ehea_map_vaddr(page_address(frag->page) + + frag->page_offset); swqe->descriptors++; sg1entry_contains_frag_data = 1; } @@ -1431,10 +1427,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev, sgentry->l_key = lkey; sgentry->len = frag->size; - - tmp_addr = (u64)(page_address(frag->page) - + frag->page_offset); - sgentry->vaddr = ehea_map_vaddr(tmp_addr); + sgentry->vaddr = + ehea_map_vaddr(page_address(frag->page) + + frag->page_offset); swqe->descriptors++; } } @@ -2165,24 +2160,18 @@ static int ehea_clean_all_portres(struct ehea_port *port) return ret; } -static void ehea_remove_adapter_mr (struct ehea_adapter *adapter) +static void ehea_remove_adapter_mr(struct ehea_adapter *adapter) { - int i; - - for (i=0; i < EHEA_MAX_PORTS; i++) - if (adapter->port[i]) - return; + if (adapter->active_ports) + return; ehea_rem_mr(&adapter->mr); } -static int ehea_add_adapter_mr (struct ehea_adapter *adapter) +static int ehea_add_adapter_mr(struct ehea_adapter *adapter) { - int i; - - for (i=0; i < EHEA_MAX_PORTS; i++) - if (adapter->port[i]) - return 0; + if (adapter->active_ports) + return 0; return ehea_reg_kernel_mr(adapter, &adapter->mr); } @@ -3099,6 +3088,7 @@ out: static void __exit ehea_module_exit(void) { + destroy_workqueue(ehea_driver_wq); driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); ehea_destroy_busmap(); diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 03023dd1782..4e8df910c00 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -47,6 +47,7 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/pgtable.h> +#include <asm/cacheflush.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \ defined(CONFIG_M5272) || defined(CONFIG_M528x) || \ @@ -98,8 +99,6 @@ static unsigned char fec_mac_default[] = { #define FEC_FLASHMAC 0xf0006006 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) #define FEC_FLASHMAC 0xf0006000 -#elif defined (CONFIG_MTD_KeyTechnology) -#define FEC_FLASHMAC 0xffe04000 #elif defined(CONFIG_CANCam) #define FEC_FLASHMAC 0xf0020000 #elif defined (CONFIG_M5272C3) @@ -191,6 +190,8 @@ struct fec_enet_private { /* Hardware registers of the FEC device */ volatile fec_t *hwp; + struct net_device *netdev; + /* The saved address of a sent-in-place packet/buffer, for skfree(). */ unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; @@ -1269,7 +1270,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev) icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR3); *icrp = 0x00000ddd; icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); - *icrp = (*icrp & 0x70777777) | 0x0d000000; + *icrp = 0x0d000000; } static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) @@ -1331,7 +1332,7 @@ static void __inline__ fec_disable_phy_intr(void) { volatile unsigned long *icrp; icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); - *icrp = (*icrp & 0x70777777) | 0x08000000; + *icrp = 0x08000000; } static void __inline__ fec_phy_ack_intr(void) @@ -1339,7 +1340,7 @@ static void __inline__ fec_phy_ack_intr(void) volatile unsigned long *icrp; /* Acknowledge the interrupt */ icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); - *icrp = (*icrp & 0x77777777) | 0x08000000; + *icrp = 0x0d000000; } static void __inline__ fec_localhw_setup(void) @@ -1426,6 +1427,29 @@ static void __inline__ fec_request_intrs(struct net_device *dev) *gpio_pehlpar = 0xc0; } #endif + +#if defined(CONFIG_M527x) + /* Set up gpio outputs for MII lines */ + { + volatile u8 *gpio_par_fec; + volatile u16 *gpio_par_feci2c; + + gpio_par_feci2c = (volatile u16 *)(MCF_IPSBAR + 0x100082); + /* Set up gpio outputs for FEC0 MII lines */ + gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100078); + + *gpio_par_feci2c |= 0x0f00; + *gpio_par_fec |= 0xc0; + +#if defined(CONFIG_FEC2) + /* Set up gpio outputs for FEC1 MII lines */ + gpio_par_fec = (volatile u8 *)(MCF_IPSBAR + 0x100079); + + *gpio_par_feci2c |= 0x00a0; + *gpio_par_fec |= 0xc0; +#endif /* CONFIG_FEC2 */ + } +#endif /* CONFIG_M527x */ } static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep) @@ -1940,9 +1964,10 @@ static void mii_display_status(struct net_device *dev) printk(".\n"); } -static void mii_display_config(struct net_device *dev) +static void mii_display_config(struct work_struct *work) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); + struct net_device *dev = fep->netdev; uint status = fep->phy_status; /* @@ -1976,9 +2001,10 @@ static void mii_display_config(struct net_device *dev) fep->sequence_done = 1; } -static void mii_relink(struct net_device *dev) +static void mii_relink(struct work_struct *work) { - struct fec_enet_private *fep = netdev_priv(dev); + struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); + struct net_device *dev = fep->netdev; int duplex; /* @@ -2022,7 +2048,7 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) return; fep->mii_phy_task_queued = 1; - INIT_WORK(&fep->phy_task, (void*)mii_relink, dev); + INIT_WORK(&fep->phy_task, mii_relink); schedule_work(&fep->phy_task); } @@ -2035,7 +2061,7 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev) return; fep->mii_phy_task_queued = 1; - INIT_WORK(&fep->phy_task, (void*)mii_display_config, dev); + INIT_WORK(&fep->phy_task, mii_display_config); schedule_work(&fep->phy_task); } @@ -2330,6 +2356,7 @@ int __init fec_enet_init(struct net_device *dev) fep->index = index; fep->hwp = fecp; + fep->netdev = dev; /* Whack a reset. We should wait for this. */ diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 661c747389e..69f5f365239 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -178,6 +178,7 @@ #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ +#define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */ enum { NvRegIrqStatus = 0x000, @@ -4706,7 +4707,6 @@ static const struct ethtool_ops ops = { .get_regs_len = nv_get_regs_len, .get_regs = nv_get_regs, .nway_reset = nv_nway_reset, - .get_perm_addr = ethtool_op_get_perm_addr, .get_tso = ethtool_op_get_tso, .set_tso = nv_set_tso, .get_ringparam = nv_get_ringparam, @@ -5172,7 +5172,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i /* check the workaround bit for correct mac address order */ txreg = readl(base + NvRegTransmitPoll); - if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) { + if ((txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) || + (id->driver_data & DEV_HAS_CORRECT_MACADDR)) { /* mac address is already in correct order */ dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff; dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff; @@ -5500,67 +5501,67 @@ static struct pci_device_id pci_tbl[] = { }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP61 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP65 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP67 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, { /* MCP73 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, }, {0,}, }; diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index ac3596f45dd..100bf410bf5 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c @@ -245,7 +245,7 @@ int __init gfar_mdio_init(void) return driver_register(&gianfar_mdio_driver); } -void __exit gfar_mdio_exit(void) +void gfar_mdio_exit(void) { driver_unregister(&gianfar_mdio_driver); } diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h index 5d340046951..b373091c703 100644 --- a/drivers/net/gianfar_mii.h +++ b/drivers/net/gianfar_mii.h @@ -42,5 +42,5 @@ struct gfar_mii { int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); int __init gfar_mdio_init(void); -void __exit gfar_mdio_exit(void); +void gfar_mdio_exit(void); #endif /* GIANFAR_PHY_H */ diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 656f2789c9b..cc0ee93669e 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -413,12 +413,12 @@ static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++*pos; if (v == SEQ_START_TOKEN) - p = bpq_devices.next; + p = rcu_dereference(bpq_devices.next); else - p = ((struct bpqdev *)v)->bpq_list.next; + p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next); return (p == &bpq_devices) ? NULL - : rcu_dereference(list_entry(p, struct bpqdev, bpq_list)); + : list_entry(p, struct bpqdev, bpq_list); } static void bpq_seq_stop(struct seq_file *seq, void *v) diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index d96eb722954..acba90f1638 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -963,7 +963,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ { int rc, i; struct net_device *netdev; - struct ibmveth_adapter *adapter = NULL; + struct ibmveth_adapter *adapter; unsigned char *mac_addr_p; unsigned int *mcastFilterSize_p; @@ -997,7 +997,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ SET_MODULE_OWNER(netdev); adapter = netdev->priv; - memset(adapter, 0, sizeof(adapter)); dev->dev.driver_data = netdev; adapter->vdev = dev; @@ -1280,24 +1279,28 @@ const char * buf, size_t count) int i; /* Make sure there is a buffer pool with buffers that can hold a packet of the size of the MTU */ - for(i = 0; i<IbmVethNumBufferPools; i++) { + for (i = 0; i < IbmVethNumBufferPools; i++) { if (pool == &adapter->rx_buff_pool[i]) continue; if (!adapter->rx_buff_pool[i].active) continue; - if (mtu < adapter->rx_buff_pool[i].buff_size) { - pool->active = 0; - h_free_logical_lan_buffer(adapter-> - vdev-> - unit_address, - pool-> - buff_size); - } + if (mtu <= adapter->rx_buff_pool[i].buff_size) + break; } - if (pool->active) { + + if (i == IbmVethNumBufferPools) { ibmveth_error_printk("no active pool >= MTU\n"); return -EPERM; } + + pool->active = 0; + if (netif_running(netdev)) { + adapter->pool_config = 1; + ibmveth_close(netdev); + adapter->pool_config = 0; + if ((rc = ibmveth_open(netdev))) + return rc; + } } } else if (attr == &veth_num_attr) { if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h index bb69ccae8ac..72cc15a6cab 100644 --- a/drivers/net/ibmveth.h +++ b/drivers/net/ibmveth.h @@ -73,9 +73,6 @@ static inline long h_send_logical_lan(unsigned long unit_address, #define h_change_logical_lan_mac(ua, mac) \ plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) -#define h_free_logical_lan_buffer(ua, bufsize) \ - plpar_hcall_norets(H_FREE_LOGICAL_LAN_BUFFER, ua, bufsize) - #define IbmVethNumBufferPools 5 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */ #define IBMVETH_MAX_MTU 68 diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 0ac240ca905..3b0fd83fa26 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -1561,10 +1561,9 @@ static inline struct irda_class_desc *irda_usb_find_class_desc(struct usb_interf struct irda_class_desc *desc; int ret; - desc = kmalloc(sizeof (*desc), GFP_KERNEL); - if (desc == NULL) + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) return NULL; - memset(desc, 0, sizeof(*desc)); /* USB-IrDA class spec 1.0: * 6.1.3: Standard "Get Descriptor" Device Request is not @@ -1617,7 +1616,7 @@ static int irda_usb_probe(struct usb_interface *intf, { struct net_device *net; struct usb_device *dev = interface_to_usbdev(intf); - struct irda_usb_cb *self = NULL; + struct irda_usb_cb *self; struct usb_host_interface *interface; struct irda_class_desc *irda_desc; int ret = -ENOMEM; @@ -1655,7 +1654,7 @@ static int irda_usb_probe(struct usb_interface *intf, self->header_length = USB_IRDA_HEADER; } - self->rx_urb = kzalloc(self->max_rx_urb * sizeof(struct urb *), + self->rx_urb = kcalloc(self->max_rx_urb, sizeof(struct urb *), GFP_KERNEL); for (i = 0; i < self->max_rx_urb; i++) { @@ -1715,7 +1714,7 @@ static int irda_usb_probe(struct usb_interface *intf, /* Find IrDA class descriptor */ irda_desc = irda_usb_find_class_desc(intf); ret = -ENODEV; - if (irda_desc == NULL) + if (!irda_desc) goto err_out_3; if (self->needspatch) { @@ -1738,15 +1737,13 @@ static int irda_usb_probe(struct usb_interface *intf, /* Don't change this buffer size and allocation without doing * some heavy and complete testing. Don't ask why :-( * Jean II */ - self->speed_buff = kmalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL); - if (self->speed_buff == NULL) + self->speed_buff = kzalloc(IRDA_USB_SPEED_MTU, GFP_KERNEL); + if (!self->speed_buff) goto err_out_3; - memset(self->speed_buff, 0, IRDA_USB_SPEED_MTU); - self->tx_buff = kzalloc(IRDA_SKB_MAX_MTU + self->header_length, GFP_KERNEL); - if (self->tx_buff == NULL) + if (!self->tx_buff) goto err_out_4; ret = irda_usb_open(self); @@ -1767,12 +1764,11 @@ static int irda_usb_probe(struct usb_interface *intf, /* replace IrDA class descriptor with what patched device is now reporting */ irda_desc = irda_usb_find_class_desc (self->usbintf); - if (irda_desc == NULL) { + if (!irda_desc) { ret = -ENODEV; goto err_out_6; } - if (self->irda_desc) - kfree (self->irda_desc); + kfree(self->irda_desc); self->irda_desc = irda_desc; irda_usb_init_qos(self); } diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c index afde84868be..0413cd95eda 100644 --- a/drivers/net/ixgb/ixgb_ethtool.c +++ b/drivers/net/ixgb/ixgb_ethtool.c @@ -724,7 +724,6 @@ static const struct ethtool_ops ixgb_ethtool_ops = { .phys_id = ixgb_phys_id, .get_stats_count = ixgb_get_stats_count, .get_ethtool_stats = ixgb_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; void ixgb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/lguest_net.c b/drivers/net/lguest_net.c index 112778652f7..cab57911a80 100644 --- a/drivers/net/lguest_net.c +++ b/drivers/net/lguest_net.c @@ -1,6 +1,13 @@ -/* A simple network driver for lguest. +/*D:500 + * The Guest network driver. * - * Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation + * This is very simple a virtual network driver, and our last Guest driver. + * The only trick is that it can talk directly to multiple other recipients + * (ie. other Guests on the same network). It can also be used with only the + * Host on the network. + :*/ + +/* Copyright 2006 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -28,23 +35,47 @@ #define MAX_LANS 4 #define NUM_SKBS 8 +/*M:011 Network code master Jeff Garzik points out numerous shortcomings in + * this driver if it aspires to greatness. + * + * Firstly, it doesn't use "NAPI": the networking's New API, and is poorer for + * it. As he says "NAPI means system-wide load leveling, across multiple + * network interfaces. Lack of NAPI can mean competition at higher loads." + * + * He also points out that we don't implement set_mac_address, so users cannot + * change the devices hardware address. When I asked why one would want to: + * "Bonding, and situations where you /do/ want the MAC address to "leak" out + * of the host onto the wider net." + * + * Finally, he would like module unloading: "It is not unrealistic to think of + * [un|re|]loading the net support module in an lguest guest. And, adding + * module support makes the programmer more responsible, because they now have + * to learn to clean up after themselves. Any driver that cannot clean up + * after itself is an incomplete driver in my book." + :*/ + +/*D:530 The "struct lguestnet_info" contains all the information we need to + * know about the network device. */ struct lguestnet_info { - /* The shared page(s). */ + /* The mapped device page(s) (an array of "struct lguest_net"). */ struct lguest_net *peer; + /* The physical address of the device page(s) */ unsigned long peer_phys; + /* The size of the device page(s). */ unsigned long mapsize; /* The lguest_device I come from */ struct lguest_device *lgdev; - /* My peerid. */ + /* My peerid (ie. my slot in the array). */ unsigned int me; - /* Receive queue. */ + /* Receive queue: the network packets waiting to be filled. */ struct sk_buff *skb[NUM_SKBS]; struct lguest_dma dma[NUM_SKBS]; }; +/*:*/ /* How many bytes left in this page. */ static unsigned int rest_of_page(void *data) @@ -52,39 +83,82 @@ static unsigned int rest_of_page(void *data) return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); } -/* Simple convention: offset 4 * peernum. */ +/*D:570 Each peer (ie. Guest or Host) on the network binds their receive + * buffers to a different key: we simply use the physical address of the + * device's memory page plus the peer number. The Host insists that all keys + * be a multiple of 4, so we multiply the peer number by 4. */ static unsigned long peer_key(struct lguestnet_info *info, unsigned peernum) { return info->peer_phys + 4 * peernum; } +/* This is the routine which sets up a "struct lguest_dma" to point to a + * network packet, similar to req_to_dma() in lguest_blk.c. The structure of a + * "struct sk_buff" has grown complex over the years: it consists of a "head" + * linear section pointed to by "skb->data", and possibly an array of + * "fragments" in the case of a non-linear packet. + * + * Our receive buffers don't use fragments at all but outgoing skbs might, so + * we handle it. */ static void skb_to_dma(const struct sk_buff *skb, unsigned int headlen, struct lguest_dma *dma) { unsigned int i, seg; + /* First, we put the linear region into the "struct lguest_dma". Each + * entry can't go over a page boundary, so even though all our packets + * are 1514 bytes or less, we might need to use two entries here: */ for (i = seg = 0; i < headlen; seg++, i += rest_of_page(skb->data+i)) { dma->addr[seg] = virt_to_phys(skb->data + i); dma->len[seg] = min((unsigned)(headlen - i), rest_of_page(skb->data + i)); } + + /* Now we handle the fragments: at least they're guaranteed not to go + * over a page. skb_shinfo(skb) returns a pointer to the structure + * which tells us about the number of fragments and the fragment + * array. */ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, seg++) { const skb_frag_t *f = &skb_shinfo(skb)->frags[i]; /* Should not happen with MTU less than 64k - 2 * PAGE_SIZE. */ if (seg == LGUEST_MAX_DMA_SECTIONS) { + /* We will end up sending a truncated packet should + * this ever happen. Plus, a cool log message! */ printk("Woah dude! Megapacket!\n"); break; } dma->addr[seg] = page_to_phys(f->page) + f->page_offset; dma->len[seg] = f->size; } + + /* If after all that we didn't use the entire "struct lguest_dma" + * array, we terminate it with a 0 length. */ if (seg < LGUEST_MAX_DMA_SECTIONS) dma->len[seg] = 0; } -/* We overload multicast bit to show promiscuous mode. */ +/* + * Packet transmission. + * + * Our packet transmission is a little unusual. A real network card would just + * send out the packet and leave the receivers to decide if they're interested. + * Instead, we look through the network device memory page and see if any of + * the ethernet addresses match the packet destination, and if so we send it to + * that Guest. + * + * This is made a little more complicated in two cases. The first case is + * broadcast packets: for that we send the packet to all Guests on the network, + * one at a time. The second case is "promiscuous" mode, where a Guest wants + * to see all the packets on the network. We need a way for the Guest to tell + * us it wants to see all packets, so it sets the "multicast" bit on its + * published MAC address, which is never valid in a real ethernet address. + */ #define PROMISC_BIT 0x01 +/* This is the callback which is summoned whenever the network device's + * multicast or promiscuous state changes. If the card is in promiscuous mode, + * we advertise that in our ethernet address in the device's memory. We do the + * same if Linux wants any or all multicast traffic. */ static void lguestnet_set_multicast(struct net_device *dev) { struct lguestnet_info *info = netdev_priv(dev); @@ -95,11 +169,14 @@ static void lguestnet_set_multicast(struct net_device *dev) info->peer[info->me].mac[0] &= ~PROMISC_BIT; } +/* A simple test function to see if a peer wants to see all packets.*/ static int promisc(struct lguestnet_info *info, unsigned int peer) { return info->peer[peer].mac[0] & PROMISC_BIT; } +/* Another simple function to see if a peer's advertised ethernet address + * matches a packet's destination ethernet address. */ static int mac_eq(const unsigned char mac[ETH_ALEN], struct lguestnet_info *info, unsigned int peer) { @@ -109,6 +186,8 @@ static int mac_eq(const unsigned char mac[ETH_ALEN], return memcmp(mac+1, info->peer[peer].mac+1, ETH_ALEN-1) == 0; } +/* This is the function which actually sends a packet once we've decided a + * peer wants it: */ static void transfer_packet(struct net_device *dev, struct sk_buff *skb, unsigned int peernum) @@ -116,76 +195,134 @@ static void transfer_packet(struct net_device *dev, struct lguestnet_info *info = netdev_priv(dev); struct lguest_dma dma; + /* We use our handy "struct lguest_dma" packing function to prepare + * the skb for sending. */ skb_to_dma(skb, skb_headlen(skb), &dma); pr_debug("xfer length %04x (%u)\n", htons(skb->len), skb->len); + /* This is the actual send call which copies the packet. */ lguest_send_dma(peer_key(info, peernum), &dma); + + /* Check that the entire packet was transmitted. If not, it could mean + * that the other Guest registered a short receive buffer, but this + * driver should never do that. More likely, the peer is dead. */ if (dma.used_len != skb->len) { dev->stats.tx_carrier_errors++; pr_debug("Bad xfer to peer %i: %i of %i (dma %p/%i)\n", peernum, dma.used_len, skb->len, (void *)dma.addr[0], dma.len[0]); } else { + /* On success we update the stats. */ dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; } } +/* Another helper function to tell is if a slot in the device memory is unused. + * Since we always set the Local Assignment bit in the ethernet address, the + * first byte can never be 0. */ static int unused_peer(const struct lguest_net peer[], unsigned int num) { return peer[num].mac[0] == 0; } +/* Finally, here is the routine which handles an outgoing packet. It's called + * "start_xmit" for traditional reasons. */ static int lguestnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int i; int broadcast; struct lguestnet_info *info = netdev_priv(dev); + /* Extract the destination ethernet address from the packet. */ const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; pr_debug("%s: xmit %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, dest[0],dest[1],dest[2],dest[3],dest[4],dest[5]); + /* If it's a multicast packet, we broadcast to everyone. That's not + * very efficient, but there are very few applications which actually + * use multicast, which is a shame really. + * + * As etherdevice.h points out: "By definition the broadcast address is + * also a multicast address." So we don't have to test for broadcast + * packets separately. */ broadcast = is_multicast_ether_addr(dest); + + /* Look through all the published ethernet addresses to see if we + * should send this packet. */ for (i = 0; i < info->mapsize/sizeof(struct lguest_net); i++) { + /* We don't send to ourselves (we actually can't SEND_DMA to + * ourselves anyway), and don't send to unused slots.*/ if (i == info->me || unused_peer(info->peer, i)) continue; + /* If it's broadcast we send it. If they want every packet we + * send it. If the destination matches their address we send + * it. Otherwise we go to the next peer. */ if (!broadcast && !promisc(info, i) && !mac_eq(dest, info, i)) continue; pr_debug("lguestnet %s: sending from %i to %i\n", dev->name, info->me, i); + /* Our routine which actually does the transfer. */ transfer_packet(dev, skb, i); } + + /* An xmit routine is expected to dispose of the packet, so we do. */ dev_kfree_skb(skb); + + /* As per kernel convention, 0 means success. This is why I love + * networking: even if we never sent to anyone, that's still + * success! */ return 0; } -/* Find a new skb to put in this slot in shared mem. */ +/*D:560 + * Packet receiving. + * + * First, here's a helper routine which fills one of our array of receive + * buffers: */ static int fill_slot(struct net_device *dev, unsigned int slot) { struct lguestnet_info *info = netdev_priv(dev); - /* Try to create and register a new one. */ + + /* We can receive ETH_DATA_LEN (1500) byte packets, plus a standard + * ethernet header of ETH_HLEN (14) bytes. */ info->skb[slot] = netdev_alloc_skb(dev, ETH_HLEN + ETH_DATA_LEN); if (!info->skb[slot]) { printk("%s: could not fill slot %i\n", dev->name, slot); return -ENOMEM; } + /* skb_to_dma() is a helper which sets up the "struct lguest_dma" to + * point to the data in the skb: we also use it for sending out a + * packet. */ skb_to_dma(info->skb[slot], ETH_HLEN + ETH_DATA_LEN, &info->dma[slot]); + + /* This is a Write Memory Barrier: it ensures that the entry in the + * receive buffer array is written *before* we set the "used_len" entry + * to 0. If the Host were looking at the receive buffer array from a + * different CPU, it could potentially see "used_len = 0" and not see + * the updated receive buffer information. This would be a horribly + * nasty bug, so make sure the compiler and CPU know this has to happen + * first. */ wmb(); - /* Now we tell hypervisor it can use the slot. */ + /* Writing 0 to "used_len" tells the Host it can use this receive + * buffer now. */ info->dma[slot].used_len = 0; return 0; } +/* This is the actual receive routine. When we receive an interrupt from the + * Host to tell us a packet has been delivered, we arrive here: */ static irqreturn_t lguestnet_rcv(int irq, void *dev_id) { struct net_device *dev = dev_id; struct lguestnet_info *info = netdev_priv(dev); unsigned int i, done = 0; + /* Look through our entire receive array for an entry which has data + * in it. */ for (i = 0; i < ARRAY_SIZE(info->dma); i++) { unsigned int length; struct sk_buff *skb; @@ -194,10 +331,16 @@ static irqreturn_t lguestnet_rcv(int irq, void *dev_id) if (length == 0) continue; + /* We've found one! Remember the skb (we grabbed the length + * above), and immediately refill the slot we've taken it + * from. */ done++; skb = info->skb[i]; fill_slot(dev, i); + /* This shouldn't happen: micropackets could be sent by a + * badly-behaved Guest on the network, but the Host will never + * stuff more data in the buffer than the buffer length. */ if (length < ETH_HLEN || length > ETH_HLEN + ETH_DATA_LEN) { pr_debug(KERN_WARNING "%s: unbelievable skb len: %i\n", dev->name, length); @@ -205,36 +348,72 @@ static irqreturn_t lguestnet_rcv(int irq, void *dev_id) continue; } + /* skb_put(), what a great function! I've ranted about this + * function before (http://lkml.org/lkml/1999/9/26/24). You + * call it after you've added data to the end of an skb (in + * this case, it was the Host which wrote the data). */ skb_put(skb, length); + + /* The ethernet header contains a protocol field: we use the + * standard helper to extract it, and place the result in + * skb->protocol. The helper also sets up skb->pkt_type and + * eats up the ethernet header from the front of the packet. */ skb->protocol = eth_type_trans(skb, dev); - /* This is a reliable transport. */ + + /* If this device doesn't need checksums for sending, we also + * don't need to check the packets when they come in. */ if (dev->features & NETIF_F_NO_CSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* As a last resort for debugging the driver or the lguest I/O + * subsystem, you can uncomment the "#define DEBUG" at the top + * of this file, which turns all the pr_debug() into printk() + * and floods the logs. */ pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); + /* Update the packet and byte counts (visible from ifconfig, + * and good for debugging). */ dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; + + /* Hand our fresh network packet into the stack's "network + * interface receive" routine. That will free the packet + * itself when it's finished. */ netif_rx(skb); } + + /* If we found any packets, we assume the interrupt was for us. */ return done ? IRQ_HANDLED : IRQ_NONE; } +/*D:550 This is where we start: when the device is brought up by dhcpd or + * ifconfig. At this point we advertise our MAC address to the rest of the + * network, and register receive buffers ready for incoming packets. */ static int lguestnet_open(struct net_device *dev) { int i; struct lguestnet_info *info = netdev_priv(dev); - /* Set up our MAC address */ + /* Copy our MAC address into the device page, so others on the network + * can find us. */ memcpy(info->peer[info->me].mac, dev->dev_addr, ETH_ALEN); - /* Turn on promisc mode if needed */ + /* We might already be in promisc mode (dev->flags & IFF_PROMISC). Our + * set_multicast callback handles this already, so we call it now. */ lguestnet_set_multicast(dev); + /* Allocate packets and put them into our "struct lguest_dma" array. + * If we fail to allocate all the packets we could still limp along, + * but it's a sign of real stress so we should probably give up now. */ for (i = 0; i < ARRAY_SIZE(info->dma); i++) { if (fill_slot(dev, i) != 0) goto cleanup; } + + /* Finally we tell the Host where our array of "struct lguest_dma" + * receive buffers is, binding it to the key corresponding to the + * device's physical memory plus our peerid. */ if (lguest_bind_dma(peer_key(info,info->me), info->dma, NUM_SKBS, lgdev_irq(info->lgdev)) != 0) goto cleanup; @@ -245,22 +424,29 @@ cleanup: dev_kfree_skb(info->skb[i]); return -ENOMEM; } +/*:*/ +/* The close routine is called when the device is no longer in use: we clean up + * elegantly. */ static int lguestnet_close(struct net_device *dev) { unsigned int i; struct lguestnet_info *info = netdev_priv(dev); - /* Clear all trace: others might deliver packets, we'll ignore it. */ + /* Clear all trace of our existence out of the device memory by setting + * the slot which held our MAC address to 0 (unused). */ memset(&info->peer[info->me], 0, sizeof(info->peer[info->me])); - /* Deregister sg lists. */ + /* Unregister our array of receive buffers */ lguest_unbind_dma(peer_key(info, info->me), info->dma); for (i = 0; i < ARRAY_SIZE(info->dma); i++) dev_kfree_skb(info->skb[i]); return 0; } +/*D:510 The network device probe function is basically a standard ethernet + * device setup. It reads the "struct lguest_device_desc" and sets the "struct + * net_device". Oh, the line-by-line excitement! Let's skip over it. :*/ static int lguestnet_probe(struct lguest_device *lgdev) { int err, irqf = IRQF_SHARED; @@ -290,10 +476,16 @@ static int lguestnet_probe(struct lguest_device *lgdev) dev->stop = lguestnet_close; dev->hard_start_xmit = lguestnet_start_xmit; - /* Turning on/off promisc will call dev->set_multicast_list. - * We don't actually support multicast yet */ + /* We don't actually support multicast yet, but turning on/off + * promisc also calls dev->set_multicast_list. */ dev->set_multicast_list = lguestnet_set_multicast; SET_NETDEV_DEV(dev, &lgdev->dev); + + /* The network code complains if you have "scatter-gather" capability + * if you don't also handle checksums (it seem that would be + * "illogical"). So we use a lie of omission and don't tell it that we + * can handle scattered packets unless we also don't want checksums, + * even though to us they're completely independent. */ if (desc->features & LGUEST_NET_F_NOCSUM) dev->features = NETIF_F_SG|NETIF_F_NO_CSUM; @@ -325,6 +517,9 @@ static int lguestnet_probe(struct lguest_device *lgdev) } pr_debug("lguestnet: registered device %s\n", dev->name); + /* Finally, we put the "struct net_device" in the generic "struct + * lguest_device"s private pointer. Again, it's not necessary, but + * makes sure the cool kernel kids don't tease us. */ lgdev->private = dev; return 0; @@ -352,3 +547,11 @@ module_init(lguestnet_init); MODULE_DESCRIPTION("Lguest network driver"); MODULE_LICENSE("GPL"); + +/*D:580 + * This is the last of the Drivers, and with this we have covered the many and + * wonderous and fine (and boring) details of the Guest. + * + * "make Launcher" beckons, where we answer questions like "Where do Guests + * come from?", and "What do you do when someone asks for optimization?" + */ diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index 5c86e737f95..c429a5002dd 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c @@ -143,6 +143,52 @@ static void __NS8390_init(struct net_device *dev, int startp); * annoying the transmit function is called bh atomic. That places * restrictions on the user context callers as disable_irq won't save * them. + * + * Additional explanation of problems with locking by Alan Cox: + * + * "The author (me) didn't use spin_lock_irqsave because the slowness of the + * card means that approach caused horrible problems like losing serial data + * at 38400 baud on some chips. Rememeber many 8390 nics on PCI were ISA + * chips with FPGA front ends. + * + * Ok the logic behind the 8390 is very simple: + * + * Things to know + * - IRQ delivery is asynchronous to the PCI bus + * - Blocking the local CPU IRQ via spin locks was too slow + * - The chip has register windows needing locking work + * + * So the path was once (I say once as people appear to have changed it + * in the mean time and it now looks rather bogus if the changes to use + * disable_irq_nosync_irqsave are disabling the local IRQ) + * + * + * Take the page lock + * Mask the IRQ on chip + * Disable the IRQ (but not mask locally- someone seems to have + * broken this with the lock validator stuff) + * [This must be _nosync as the page lock may otherwise + * deadlock us] + * Drop the page lock and turn IRQs back on + * + * At this point an existing IRQ may still be running but we can't + * get a new one + * + * Take the lock (so we know the IRQ has terminated) but don't mask + * the IRQs on the processor + * Set irqlock [for debug] + * + * Transmit (slow as ****) + * + * re-enable the IRQ + * + * + * We have to use disable_irq because otherwise you will get delayed + * interrupts on the APIC bus deadlocking the transmit path. + * + * Quite hairy but the chip simply wasn't designed for SMP and you can't + * even ACK an interrupt without risking corrupting other parallel + * activities on the chip." [lkml, 25 Jul 2007] */ @@ -219,15 +265,6 @@ static void ei_tx_timeout(struct net_device *dev) int txsr, isr, tickssofar = jiffies - dev->trans_start; unsigned long flags; -#if defined(CONFIG_M32R) && defined(CONFIG_SMP) - unsigned long icucr; - - local_irq_save(flags); - icucr = inl(M32R_ICU_CR1_PORTL); - icucr |= M32R_ICUCR_ISMOD11; - outl(icucr, M32R_ICU_CR1_PORTL); - local_irq_restore(flags); -#endif ei_local->stat.tx_errors++; spin_lock_irqsave(&ei_local->page_lock, flags); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 6ba6ed2b480..5106c2328d1 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -229,7 +229,12 @@ struct net_device loopback_dev = { /* Setup and register the loopback device. */ static int __init loopback_init(void) { - return register_netdev(&loopback_dev); + int err = register_netdev(&loopback_dev); + + if (err) + panic("loopback: Failed to register netdevice: %d\n", err); + + return err; }; module_init(loopback_init); diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index d0808fa3ec8..5b87183e62c 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c @@ -255,10 +255,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, int err; index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); - if (index == -1) { - err = -ENOMEM; - goto err; - } + if (index == -1) + return -ENOMEM; mr->iova = iova; mr->size = size; @@ -269,15 +267,8 @@ int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); if (err) - goto err_index; - - return 0; - -err_index: - mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); + mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); -err: - kfree(mr); return err; } EXPORT_SYMBOL_GPL(mlx4_mr_alloc); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index deca65330b0..ae9bb7b7fd6 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -191,6 +191,7 @@ struct myri10ge_priv { struct timer_list watchdog_timer; int watchdog_tx_done; int watchdog_tx_req; + int watchdog_pause; int watchdog_resets; int tx_linearized; int pause; @@ -2800,6 +2801,7 @@ static void myri10ge_watchdog(struct work_struct *work) static void myri10ge_watchdog_timer(unsigned long arg) { struct myri10ge_priv *mgp; + u32 rx_pause_cnt; mgp = (struct myri10ge_priv *)arg; @@ -2816,19 +2818,28 @@ static void myri10ge_watchdog_timer(unsigned long arg) myri10ge_fill_thresh) mgp->rx_big.watchdog_needed = 0; } + rx_pause_cnt = ntohl(mgp->fw_stats->dropped_pause); if (mgp->tx.req != mgp->tx.done && mgp->tx.done == mgp->watchdog_tx_done && - mgp->watchdog_tx_req != mgp->watchdog_tx_done) + mgp->watchdog_tx_req != mgp->watchdog_tx_done) { /* nic seems like it might be stuck.. */ - schedule_work(&mgp->watchdog_work); - else - /* rearm timer */ - mod_timer(&mgp->watchdog_timer, - jiffies + myri10ge_watchdog_timeout * HZ); - + if (rx_pause_cnt != mgp->watchdog_pause) { + if (net_ratelimit()) + printk(KERN_WARNING "myri10ge %s:" + "TX paused, check link partner\n", + mgp->dev->name); + } else { + schedule_work(&mgp->watchdog_work); + return; + } + } + /* rearm timer */ + mod_timer(&mgp->watchdog_timer, + jiffies + myri10ge_watchdog_timeout * HZ); mgp->watchdog_tx_done = mgp->tx.done; mgp->watchdog_tx_req = mgp->tx.req; + mgp->watchdog_pause = rx_pause_cnt; } static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 6bb48ba8096..b47a12d684f 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -2438,13 +2438,16 @@ static void netdev_error(struct net_device *dev, int intr_status) dev->name); } np->stats.rx_fifo_errors++; + np->stats.rx_errors++; } /* Hmmmmm, it's not clear how to recover from PCI faults. */ if (intr_status & IntrPCIErr) { printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name, intr_status & IntrPCIErr); np->stats.tx_fifo_errors++; + np->stats.tx_errors++; np->stats.rx_fifo_errors++; + np->stats.rx_errors++; } spin_unlock(&np->lock); } diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index cfdeaf7aa16..f81d9398d60 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c @@ -638,7 +638,6 @@ static const struct ethtool_ops ne2k_pci_ethtool_ops = { .get_drvinfo = ne2k_pci_get_drvinfo, .get_tx_csum = ethtool_op_get_tx_csum, .get_sg = ethtool_op_get_sg, - .get_perm_addr = ethtool_op_get_perm_addr, }; static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 0175f6c353f..a6138b474b4 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -755,5 +755,4 @@ struct ethtool_ops netxen_nic_ethtool_ops = { .get_strings = netxen_nic_get_strings, .get_stats_count = netxen_nic_get_stats_count, .get_ethtool_stats = netxen_nic_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 19e2fa940ac..08a62acde8b 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -335,7 +335,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->ahw.pdev = pdev; adapter->ahw.pci_func = pci_func_id; spin_lock_init(&adapter->tx_lock); - spin_lock_init(&adapter->lock); /* remap phys address */ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ @@ -895,8 +894,6 @@ static int netxen_nic_open(struct net_device *netdev) /* Done here again so that even if phantom sw overwrote it, * we set it */ - if (adapter->macaddr_set) - adapter->macaddr_set(adapter, netdev->dev_addr); if (adapter->init_port && adapter->init_port(adapter, adapter->portnum) != 0) { del_timer_sync(&adapter->watchdog_timer); @@ -904,6 +901,8 @@ static int netxen_nic_open(struct net_device *netdev) netxen_nic_driver_name, adapter->portnum); return -EIO; } + if (adapter->macaddr_set) + adapter->macaddr_set(adapter, netdev->dev_addr); netxen_nic_set_link_parameters(adapter); @@ -930,6 +929,8 @@ static int netxen_nic_close(struct net_device *netdev) netif_carrier_off(netdev); netif_stop_queue(netdev); + netxen_nic_disable_int(adapter); + cmd_buff = adapter->cmd_buf_arr; for (i = 0; i < adapter->max_tx_desc_count; i++) { buffrag = cmd_buff->frag_array; @@ -1226,15 +1227,12 @@ static void netxen_tx_timeout_task(struct work_struct *work) { struct netxen_adapter *adapter = container_of(work, struct netxen_adapter, tx_timeout_task); - unsigned long flags; printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", netxen_nic_driver_name, adapter->netdev->name); - spin_lock_irqsave(&adapter->lock, flags); netxen_nic_close(adapter->netdev); netxen_nic_open(adapter->netdev); - spin_unlock_irqrestore(&adapter->lock, flags); adapter->netdev->trans_start = jiffies; netif_wake_queue(adapter->netdev); } @@ -1243,28 +1241,12 @@ static int netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev) { u32 ret = 0; - u32 our_int = 0; DPRINTK(INFO, "Entered handle ISR\n"); adapter->stats.ints++; - if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { - our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); - /* not our interrupt */ - if ((our_int & (0x80 << adapter->portnum)) == 0) - return ret; - } - netxen_nic_disable_int(adapter); - if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { - /* claim interrupt */ - if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { - writel(our_int & ~((u32)(0x80 << adapter->portnum)), - NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); - } - } - if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { if (netif_rx_schedule_prep(netdev)) { /* @@ -1298,6 +1280,7 @@ irqreturn_t netxen_intr(int irq, void *data) { struct netxen_adapter *adapter; struct net_device *netdev; + u32 our_int = 0; if (unlikely(!irq)) { return IRQ_NONE; /* Not our interrupt */ @@ -1305,7 +1288,22 @@ irqreturn_t netxen_intr(int irq, void *data) adapter = (struct netxen_adapter *)data; netdev = adapter->netdev; - /* process our status queue (for all 4 ports) */ + + if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { + our_int = readl(NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); + /* not our interrupt */ + if ((our_int & (0x80 << adapter->portnum)) == 0) + return IRQ_NONE; + } + + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { + /* claim interrupt */ + if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { + writel(our_int & ~((u32)(0x80 << adapter->portnum)), + NETXEN_CRB_NORMALIZE(adapter, CRB_INT_VECTOR)); + } + } + if (netif_running(netdev)) netxen_handle_int(adapter, netdev); diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 73da611fd53..997c2d0c83b 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -996,7 +996,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; mace_private *lp = netdev_priv(dev); - kio_addr_t ioaddr = dev->base_addr; + kio_addr_t ioaddr; int status; int IntrCnt = MACE_MAX_IR_ITERATIONS; @@ -1006,6 +1006,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) return IRQ_NONE; } + ioaddr = dev->base_addr; + if (lp->tx_irq_disabled) { printk( (lp->tx_irq_disabled? diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 7912dbd1425..af6728cb49c 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -1368,6 +1368,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) kio_addr_t ioaddr = dev->base_addr; u_short num_pages; short time_out, ir; + unsigned long flags; netif_stop_queue(dev); @@ -1395,6 +1396,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) /* A packet is now waiting. */ smc->packets_waiting++; + spin_lock_irqsave(&smc->lock, flags); SMC_SELECT_BANK(2); /* Paranoia, we should always be in window 2 */ /* need MC_RESET to keep the memory consistent. errata? */ @@ -1411,6 +1413,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Acknowledge the interrupt, send the packet. */ outw((ir&0xff00) | IM_ALLOC_INT, ioaddr + INTERRUPT); smc_hardware_send_packet(dev); /* Send the packet now.. */ + spin_unlock_irqrestore(&smc->lock, flags); return 0; } } @@ -1418,6 +1421,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Otherwise defer until the Tx-space-allocated interrupt. */ DEBUG(2, "%s: memory allocation deferred.\n", dev->name); outw((IM_ALLOC_INT << 8) | (ir & 0xff00), ioaddr + INTERRUPT); + spin_unlock_irqrestore(&smc->lock, flags); return 0; } @@ -1523,6 +1527,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id) DEBUG(3, "%s: SMC91c92 interrupt %d at %#x.\n", dev->name, irq, ioaddr); + spin_lock(&smc->lock); smc->watchdog = 0; saved_bank = inw(ioaddr + BANK_SELECT); if ((saved_bank & 0xff00) != 0x3300) { @@ -1620,6 +1625,7 @@ irq_done: readb(smc->base+MEGAHERTZ_ISR); } #endif + spin_unlock(&smc->lock); return IRQ_RETVAL(handled); } @@ -1902,6 +1908,9 @@ static void media_check(u_long arg) kio_addr_t ioaddr = dev->base_addr; u_short i, media, saved_bank; u_short link; + unsigned long flags; + + spin_lock_irqsave(&smc->lock, flags); saved_bank = inw(ioaddr + BANK_SELECT); @@ -1934,6 +1943,7 @@ static void media_check(u_long arg) smc->media.expires = jiffies + HZ/100; add_timer(&smc->media); SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); return; } @@ -2007,6 +2017,7 @@ reschedule: smc->media.expires = jiffies + HZ; add_timer(&smc->media); SMC_SELECT_BANK(saved_bank); + spin_unlock_irqrestore(&smc->lock, flags); } static int smc_link_ok(struct net_device *dev) @@ -2094,14 +2105,14 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) u16 saved_bank = inw(ioaddr + BANK_SELECT); int ret; - SMC_SELECT_BANK(3); spin_lock_irq(&smc->lock); + SMC_SELECT_BANK(3); if (smc->cfg & CFG_MII_SELECT) ret = mii_ethtool_gset(&smc->mii_if, ecmd); else ret = smc_netdev_get_ecmd(dev, ecmd); - spin_unlock_irq(&smc->lock); SMC_SELECT_BANK(saved_bank); + spin_unlock_irq(&smc->lock); return ret; } @@ -2112,14 +2123,14 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) u16 saved_bank = inw(ioaddr + BANK_SELECT); int ret; - SMC_SELECT_BANK(3); spin_lock_irq(&smc->lock); + SMC_SELECT_BANK(3); if (smc->cfg & CFG_MII_SELECT) ret = mii_ethtool_sset(&smc->mii_if, ecmd); else ret = smc_netdev_set_ecmd(dev, ecmd); - spin_unlock_irq(&smc->lock); SMC_SELECT_BANK(saved_bank); + spin_unlock_irq(&smc->lock); return ret; } @@ -2130,11 +2141,11 @@ static u32 smc_get_link(struct net_device *dev) u16 saved_bank = inw(ioaddr + BANK_SELECT); u32 ret; - SMC_SELECT_BANK(3); spin_lock_irq(&smc->lock); + SMC_SELECT_BANK(3); ret = smc_link_ok(dev); - spin_unlock_irq(&smc->lock); SMC_SELECT_BANK(saved_bank); + spin_unlock_irq(&smc->lock); return ret; } diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 465485a3fbc..e6a67531de9 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -1515,7 +1515,6 @@ static const struct ethtool_ops pcnet32_ethtool_ops = { .phys_id = pcnet32_phys_id, .get_regs_len = pcnet32_get_regs_len, .get_regs = pcnet32_get_regs, - .get_perm_addr = ethtool_op_get_perm_addr, }; /* only probes for non-PCI devices, the rest are handled by diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index f71dab34766..e323efd4ed1 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -261,7 +261,7 @@ void phy_sanitize_settings(struct phy_device *phydev) /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) - phydev->autoneg = 0; + phydev->autoneg = AUTONEG_DISABLE; idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex), features); @@ -374,7 +374,7 @@ int phy_mii_ioctl(struct phy_device *phydev, if (mii_data->phy_id == phydev->addr) { switch(mii_data->reg_num) { case MII_BMCR: - if (val & (BMCR_RESET|BMCR_ANENABLE)) + if ((val & (BMCR_RESET|BMCR_ANENABLE)) == 0) phydev->autoneg = AUTONEG_DISABLE; else phydev->autoneg = AUTONEG_ENABLE; diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 6f98834e6ac..68631a5721a 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -108,19 +108,24 @@ static inline int cmp_addr(struct pppoe_addr *a, unsigned long sid, char *addr) (memcmp(a->remote,addr,ETH_ALEN) == 0)); } -static int hash_item(unsigned long sid, unsigned char *addr) +#if 8%PPPOE_HASH_BITS +#error 8 must be a multiple of PPPOE_HASH_BITS +#endif + +static int hash_item(unsigned int sid, unsigned char *addr) { - char hash = 0; - int i, j; + unsigned char hash = 0; + unsigned int i; - for (i = 0; i < ETH_ALEN ; ++i) { - for (j = 0; j < 8/PPPOE_HASH_BITS ; ++j) { - hash ^= addr[i] >> ( j * PPPOE_HASH_BITS ); - } + for (i = 0 ; i < ETH_ALEN ; i++) { + hash ^= addr[i]; + } + for (i = 0 ; i < sizeof(sid_t)*8 ; i += 8 ){ + hash ^= sid>>i; + } + for (i = 8 ; (i>>=1) >= PPPOE_HASH_BITS ; ) { + hash ^= hash>>i; } - - for (i = 0; i < (sizeof(unsigned long)*8) / PPPOE_HASH_BITS ; ++i) - hash ^= sid >> (i*PPPOE_HASH_BITS); return hash & ( PPPOE_HASH_SIZE - 1 ); } @@ -664,8 +669,8 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); - int val = 0; - int err = 0; + int val; + int err; switch (cmd) { case PPPIOCGMRU: @@ -754,8 +759,9 @@ static int pppoe_ioctl(struct socket *sock, unsigned int cmd, err = 0; break; - default:; - }; + default: + err = -ENOTTY; + } return err; } @@ -773,6 +779,7 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, struct net_device *dev; char *start; + lock_sock(sk); if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) { error = -ENOTCONN; goto end; @@ -783,8 +790,6 @@ static int pppoe_sendmsg(struct kiocb *iocb, struct socket *sock, hdr.code = 0; hdr.sid = po->num; - lock_sock(sk); - dev = po->pppoe_dev; error = -EMSGSIZE; diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index f87176055d0..266e8b38fe1 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c @@ -2054,7 +2054,7 @@ end: */ static int pppol2tp_tunnel_getsockopt(struct sock *sk, struct pppol2tp_tunnel *tunnel, - int optname, int __user *val) + int optname, int *val) { int err = 0; @@ -2077,7 +2077,7 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk, */ static int pppol2tp_session_getsockopt(struct sock *sk, struct pppol2tp_session *session, - int optname, int __user *val) + int optname, int *val) { int err = 0; diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c index f3e47d0c2b3..25c52b55c38 100644 --- a/drivers/net/pppox.c +++ b/drivers/net/pppox.c @@ -73,7 +73,7 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct pppox_sock *po = pppox_sk(sk); - int rc = 0; + int rc; lock_sock(sk); @@ -94,12 +94,9 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) break; } default: - if (pppox_protos[sk->sk_protocol]->ioctl) - rc = pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, - arg); - - break; - }; + rc = pppox_protos[sk->sk_protocol]->ioctl ? + pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY; + } release_sock(sk); return rc; diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 8be8be451ad..69da95b5ad0 100755 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -1904,7 +1904,6 @@ static void ql_get_pauseparam(struct net_device *ndev, static const struct ethtool_ops ql3xxx_ethtool_ops = { .get_settings = ql_get_settings, .get_drvinfo = ql_get_drvinfo, - .get_perm_addr = ethtool_op_get_perm_addr, .get_link = ethtool_op_get_link, .get_msglevel = ql_get_msglevel, .set_msglevel = ql_set_msglevel, diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index bb6896ae315..b85ab4a8f2a 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -725,6 +725,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + if (tp->mac_version == RTL_GIGA_MAC_VER_12) { + /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */ + mdio_write(ioaddr, 0x1f, 0x0000); + mdio_write(ioaddr, 0x0e, 0x0000); + } + tp->phy_auto_nego_reg = auto_nego; tp->phy_1000_ctrl_reg = giga_ctrl; @@ -1066,7 +1072,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_strings = rtl8169_get_strings, .get_stats_count = rtl8169_get_stats_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, @@ -2761,14 +2766,16 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) rtl8169_check_link_status(dev, tp, ioaddr); #ifdef CONFIG_R8169_NAPI - RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); - tp->intr_mask = ~tp->napi_event; - - if (likely(netif_rx_schedule_prep(dev))) - __netif_rx_schedule(dev); - else if (netif_msg_intr(tp)) { - printk(KERN_INFO "%s: interrupt %04x taken in poll\n", - dev->name, status); + if (status & tp->napi_event) { + RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event); + tp->intr_mask = ~tp->napi_event; + + if (likely(netif_rx_schedule_prep(dev))) + __netif_rx_schedule(dev); + else if (netif_msg_intr(tp)) { + printk(KERN_INFO "%s: interrupt %04x in poll\n", + dev->name, status); + } } break; #else diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 4cb710bbe72..cfa26791447 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h @@ -747,10 +747,9 @@ struct XENA_dev_config { #define MC_ERR_REG_MIRI_CRI_ERR_1 BIT(23) #define MC_ERR_REG_SM_ERR BIT(31) #define MC_ERR_REG_ECC_ALL_SNG (BIT(2) | BIT(3) | BIT(4) | BIT(5) |\ - BIT(6) | BIT(7) | BIT(17) | BIT(19)) + BIT(17) | BIT(19)) #define MC_ERR_REG_ECC_ALL_DBL (BIT(10) | BIT(11) | BIT(12) |\ - BIT(13) | BIT(14) | BIT(15) |\ - BIT(18) | BIT(20)) + BIT(13) | BIT(18) | BIT(20)) u64 mc_err_mask; u64 mc_err_alarm; diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index afef6c0c59f..24feb00600e 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -32,12 +32,12 @@ * rx_ring_sz: This defines the number of receive blocks each ring can have. * This is also an array of size 8. * rx_ring_mode: This defines the operation mode of all 8 rings. The valid - * values are 1, 2 and 3. + * values are 1, 2. * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. * tx_fifo_len: This too is an array of 8. Each element defines the number of * Tx descriptors that can be associated with each corresponding FIFO. * intr_type: This defines the type of interrupt. The values can be 0(INTA), - * 1(MSI), 2(MSI_X). Default value is '0(INTA)' + * 2(MSI_X). Default value is '0(INTA)' * lro: Specifies whether to enable Large Receive Offload (LRO) or not. * Possible values '1' for enable '0' for disable. Default is '0' * lro_max_pkts: This parameter defines maximum number of packets can be @@ -84,14 +84,14 @@ #include "s2io.h" #include "s2io-regs.h" -#define DRV_VERSION "2.0.23.1" +#define DRV_VERSION "2.0.25.1" /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; static char s2io_driver_version[] = DRV_VERSION; -static int rxd_size[4] = {32,48,48,64}; -static int rxd_count[4] = {127,85,85,63}; +static int rxd_size[2] = {32,48}; +static int rxd_count[2] = {127,85}; static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) { @@ -282,6 +282,7 @@ static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { ("lro_flush_due_to_max_pkts"), ("lro_avg_aggr_pkts"), ("mem_alloc_fail_cnt"), + ("pci_map_fail_cnt"), ("watchdog_timer_cnt"), ("mem_allocated"), ("mem_freed"), @@ -426,7 +427,7 @@ S2IO_PARM_INT(bimodal, 0); S2IO_PARM_INT(l3l4hdr_size, 128); /* Frequency of Rx desc syncs expressed as power of 2 */ S2IO_PARM_INT(rxsync_frequency, 3); -/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ +/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ S2IO_PARM_INT(intr_type, 0); /* Large receive offload feature */ S2IO_PARM_INT(lro, 0); @@ -701,7 +702,7 @@ static int init_shared_mem(struct s2io_nic *nic) (u64) tmp_p_addr_next; } } - if (nic->rxd_mode >= RXD_MODE_3A) { + if (nic->rxd_mode == RXD_MODE_3B) { /* * Allocation of Storages for buffer addresses in 2BUFF mode * and the buffers as well. @@ -870,7 +871,7 @@ static void free_shared_mem(struct s2io_nic *nic) } } - if (nic->rxd_mode >= RXD_MODE_3A) { + if (nic->rxd_mode == RXD_MODE_3B) { /* Freeing buffer storage addresses in 2BUFF mode. */ for (i = 0; i < config->rx_ring_num; i++) { blk_cnt = config->rx_cfg[i].num_rxd / @@ -2233,44 +2234,6 @@ static void stop_nic(struct s2io_nic *nic) writeq(val64, &bar0->adapter_control); } -static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \ - sk_buff *skb) -{ - struct net_device *dev = nic->dev; - struct sk_buff *frag_list; - void *tmp; - - /* Buffer-1 receives L3/L4 headers */ - ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single - (nic->pdev, skb->data, l3l4hdr_size + 4, - PCI_DMA_FROMDEVICE); - - /* skb_shinfo(skb)->frag_list will have L4 data payload */ - skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE); - if (skb_shinfo(skb)->frag_list == NULL) { - nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; - DBG_PRINT(INFO_DBG, "%s: dev_alloc_skb failed\n ", dev->name); - return -ENOMEM ; - } - frag_list = skb_shinfo(skb)->frag_list; - skb->truesize += frag_list->truesize; - nic->mac_control.stats_info->sw_stat.mem_allocated - += frag_list->truesize; - frag_list->next = NULL; - tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); - frag_list->data = tmp; - skb_reset_tail_pointer(frag_list); - - /* Buffer-2 receives L4 data payload */ - ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, - frag_list->data, dev->mtu, - PCI_DMA_FROMDEVICE); - rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); - rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); - - return SUCCESS; -} - /** * fill_rx_buffers - Allocates the Rx side skbs * @nic: device private variable @@ -2307,6 +2270,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) unsigned long flags; struct RxD_t *first_rxdp = NULL; u64 Buffer0_ptr = 0, Buffer1_ptr = 0; + struct RxD1 *rxdp1; + struct RxD3 *rxdp3; + struct swStat *stats = &nic->mac_control.stats_info->sw_stat; mac_control = &nic->mac_control; config = &nic->config; @@ -2359,7 +2325,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; } if ((rxdp->Control_1 & RXD_OWN_XENA) && - ((nic->rxd_mode >= RXD_MODE_3A) && + ((nic->rxd_mode == RXD_MODE_3B) && (rxdp->Control_2 & BIT(0)))) { mac_control->rings[ring_no].rx_curr_put_info. offset = off; @@ -2370,10 +2336,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) HEADER_802_2_SIZE + HEADER_SNAP_SIZE; if (nic->rxd_mode == RXD_MODE_1) size += NET_IP_ALIGN; - else if (nic->rxd_mode == RXD_MODE_3B) - size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; else - size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; + size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; /* allocate skb */ skb = dev_alloc_skb(size); @@ -2392,33 +2356,35 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) += skb->truesize; if (nic->rxd_mode == RXD_MODE_1) { /* 1 buffer mode - normal operation mode */ + rxdp1 = (struct RxD1*)rxdp; memset(rxdp, 0, sizeof(struct RxD1)); skb_reserve(skb, NET_IP_ALIGN); - ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single + rxdp1->Buffer0_ptr = pci_map_single (nic->pdev, skb->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); + if( (rxdp1->Buffer0_ptr == 0) || + (rxdp1->Buffer0_ptr == + DMA_ERROR_CODE)) + goto pci_map_failed; + rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); - } else if (nic->rxd_mode >= RXD_MODE_3A) { + } else if (nic->rxd_mode == RXD_MODE_3B) { /* - * 2 or 3 buffer mode - - * Both 2 buffer mode and 3 buffer mode provides 128 + * 2 buffer mode - + * 2 buffer mode provides 128 * byte aligned receive buffers. - * - * 3 buffer mode provides header separation where in - * skb->data will have L3/L4 headers where as - * skb_shinfo(skb)->frag_list will have the L4 data - * payload */ + rxdp3 = (struct RxD3*)rxdp; /* save buffer pointers to avoid frequent dma mapping */ - Buffer0_ptr = ((struct RxD3*)rxdp)->Buffer0_ptr; - Buffer1_ptr = ((struct RxD3*)rxdp)->Buffer1_ptr; + Buffer0_ptr = rxdp3->Buffer0_ptr; + Buffer1_ptr = rxdp3->Buffer1_ptr; memset(rxdp, 0, sizeof(struct RxD3)); /* restore the buffer pointers for dma sync*/ - ((struct RxD3*)rxdp)->Buffer0_ptr = Buffer0_ptr; - ((struct RxD3*)rxdp)->Buffer1_ptr = Buffer1_ptr; + rxdp3->Buffer0_ptr = Buffer0_ptr; + rxdp3->Buffer1_ptr = Buffer1_ptr; ba = &mac_control->rings[ring_no].ba[block_no][off]; skb_reserve(skb, BUF0_LEN); @@ -2428,14 +2394,18 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) skb->data = (void *) (unsigned long)tmp; skb_reset_tail_pointer(skb); - if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) - ((struct RxD3*)rxdp)->Buffer0_ptr = + if (!(rxdp3->Buffer0_ptr)) + rxdp3->Buffer0_ptr = pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); else pci_dma_sync_single_for_device(nic->pdev, - (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr, + (dma_addr_t) rxdp3->Buffer0_ptr, BUF0_LEN, PCI_DMA_FROMDEVICE); + if( (rxdp3->Buffer0_ptr == 0) || + (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) + goto pci_map_failed; + rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); if (nic->rxd_mode == RXD_MODE_3B) { /* Two buffer mode */ @@ -2444,33 +2414,30 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) * Buffer2 will have L3/L4 header plus * L4 payload */ - ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single + rxdp3->Buffer2_ptr = pci_map_single (nic->pdev, skb->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - /* Buffer-1 will be dummy buffer. Not used */ - if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) { - ((struct RxD3*)rxdp)->Buffer1_ptr = + if( (rxdp3->Buffer2_ptr == 0) || + (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) + goto pci_map_failed; + + rxdp3->Buffer1_ptr = pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, PCI_DMA_FROMDEVICE); + if( (rxdp3->Buffer1_ptr == 0) || + (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { + pci_unmap_single + (nic->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); + goto pci_map_failed; } rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER2_SIZE_3 (dev->mtu + 4); - } else { - /* 3 buffer mode */ - if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) { - nic->mac_control.stats_info->sw_stat.\ - mem_freed += skb->truesize; - dev_kfree_skb_irq(skb); - if (first_rxdp) { - wmb(); - first_rxdp->Control_1 |= - RXD_OWN_XENA; - } - return -ENOMEM ; - } } rxdp->Control_2 |= BIT(0); } @@ -2505,6 +2472,11 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) } return SUCCESS; +pci_map_failed: + stats->pci_map_fail_cnt++; + stats->mem_freed += skb->truesize; + dev_kfree_skb_irq(skb); + return -ENOMEM; } static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) @@ -2515,6 +2487,8 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) struct RxD_t *rxdp; struct mac_info *mac_control; struct buffAdd *ba; + struct RxD1 *rxdp1; + struct RxD3 *rxdp3; mac_control = &sp->mac_control; for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { @@ -2526,40 +2500,30 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) continue; } if (sp->rxd_mode == RXD_MODE_1) { + rxdp1 = (struct RxD1*)rxdp; pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD1*)rxdp)->Buffer0_ptr, - dev->mtu + - HEADER_ETHERNET_II_802_3_SIZE - + HEADER_802_2_SIZE + - HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + rxdp1->Buffer0_ptr, + dev->mtu + + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + + HEADER_SNAP_SIZE, + PCI_DMA_FROMDEVICE); memset(rxdp, 0, sizeof(struct RxD1)); } else if(sp->rxd_mode == RXD_MODE_3B) { + rxdp3 = (struct RxD3*)rxdp; ba = &mac_control->rings[ring_no]. ba[blk][j]; pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer1_ptr, - BUF1_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); - memset(rxdp, 0, sizeof(struct RxD3)); - } else { - pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN, + rxdp3->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer1_ptr, - l3l4hdr_size + 4, + rxdp3->Buffer1_ptr, + BUF1_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(sp->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu, + rxdp3->Buffer2_ptr, + dev->mtu + 4, PCI_DMA_FROMDEVICE); memset(rxdp, 0, sizeof(struct RxD3)); } @@ -2756,6 +2720,8 @@ static void rx_intr_handler(struct ring_info *ring_data) struct sk_buff *skb; int pkt_cnt = 0; int i; + struct RxD1* rxdp1; + struct RxD3* rxdp3; spin_lock(&nic->rx_lock); if (atomic_read(&nic->card_state) == CARD_DOWN) { @@ -2796,32 +2762,23 @@ static void rx_intr_handler(struct ring_info *ring_data) return; } if (nic->rxd_mode == RXD_MODE_1) { + rxdp1 = (struct RxD1*)rxdp; pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD1*)rxdp)->Buffer0_ptr, - dev->mtu + - HEADER_ETHERNET_II_802_3_SIZE + - HEADER_802_2_SIZE + - HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + rxdp1->Buffer0_ptr, + dev->mtu + + HEADER_ETHERNET_II_802_3_SIZE + + HEADER_802_2_SIZE + + HEADER_SNAP_SIZE, + PCI_DMA_FROMDEVICE); } else if (nic->rxd_mode == RXD_MODE_3B) { + rxdp3 = (struct RxD3*)rxdp; pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, - BUF0_LEN, PCI_DMA_FROMDEVICE); - pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); - } else { - pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer1_ptr, - l3l4hdr_size + 4, - PCI_DMA_FROMDEVICE); + rxdp3->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); pci_unmap_single(nic->pdev, (dma_addr_t) - ((struct RxD3*)rxdp)->Buffer2_ptr, - dev->mtu, PCI_DMA_FROMDEVICE); + rxdp3->Buffer2_ptr, + dev->mtu + 4, + PCI_DMA_FROMDEVICE); } prefetch(skb->data); rx_osm_handler(ring_data, rxdp); @@ -3425,23 +3382,8 @@ static void s2io_reset(struct s2io_nic * sp) /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); - if (sp->device_type == XFRAME_II_DEVICE) { - int ret; - ret = pci_set_power_state(sp->pdev, 3); - if (!ret) - ret = pci_set_power_state(sp->pdev, 0); - else { - DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n", - __FUNCTION__); - goto old_way; - } - msleep(20); - goto new_way; - } -old_way: val64 = SW_RESET_ALL; writeq(val64, &bar0->sw_reset); -new_way: if (strstr(sp->product_name, "CX4")) { msleep(750); } @@ -3731,56 +3673,6 @@ static void store_xmsi_data(struct s2io_nic *nic) } } -int s2io_enable_msi(struct s2io_nic *nic) -{ - struct XENA_dev_config __iomem *bar0 = nic->bar0; - u16 msi_ctrl, msg_val; - struct config_param *config = &nic->config; - struct net_device *dev = nic->dev; - u64 val64, tx_mat, rx_mat; - int i, err; - - val64 = readq(&bar0->pic_control); - val64 &= ~BIT(1); - writeq(val64, &bar0->pic_control); - - err = pci_enable_msi(nic->pdev); - if (err) { - DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n", - nic->dev->name); - return err; - } - - /* - * Enable MSI and use MSI-1 in stead of the standard MSI-0 - * for interrupt handling. - */ - pci_read_config_word(nic->pdev, 0x4c, &msg_val); - msg_val ^= 0x1; - pci_write_config_word(nic->pdev, 0x4c, msg_val); - pci_read_config_word(nic->pdev, 0x4c, &msg_val); - - pci_read_config_word(nic->pdev, 0x42, &msi_ctrl); - msi_ctrl |= 0x10; - pci_write_config_word(nic->pdev, 0x42, msi_ctrl); - - /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */ - tx_mat = readq(&bar0->tx_mat0_n[0]); - for (i=0; i<config->tx_fifo_num; i++) { - tx_mat |= TX_MAT_SET(i, 1); - } - writeq(tx_mat, &bar0->tx_mat0_n[0]); - - rx_mat = readq(&bar0->rx_mat); - for (i=0; i<config->rx_ring_num; i++) { - rx_mat |= RX_MAT_SET(i, 1); - } - writeq(rx_mat, &bar0->rx_mat); - - dev->irq = nic->pdev->irq; - return 0; -} - static int s2io_enable_msi_x(struct s2io_nic *nic) { struct XENA_dev_config __iomem *bar0 = nic->bar0; @@ -4001,6 +3893,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) struct mac_info *mac_control; struct config_param *config; int offload_type; + struct swStat *stats = &sp->mac_control.stats_info->sw_stat; mac_control = &sp->mac_control; config = &sp->config; @@ -4085,11 +3978,18 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Buffer_Pointer = pci_map_single(sp->pdev, sp->ufo_in_band_v, sizeof(u64), PCI_DMA_TODEVICE); + if((txdp->Buffer_Pointer == 0) || + (txdp->Buffer_Pointer == DMA_ERROR_CODE)) + goto pci_map_failed; txdp++; } txdp->Buffer_Pointer = pci_map_single (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); + if((txdp->Buffer_Pointer == 0) || + (txdp->Buffer_Pointer == DMA_ERROR_CODE)) + goto pci_map_failed; + txdp->Host_Control = (unsigned long) skb; txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); if (offload_type == SKB_GSO_UDP) @@ -4146,6 +4046,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irqrestore(&sp->tx_lock, flags); return 0; +pci_map_failed: + stats->pci_map_fail_cnt++; + netif_stop_queue(dev); + stats->mem_freed += skb->truesize; + dev_kfree_skb(skb); + spin_unlock_irqrestore(&sp->tx_lock, flags); + return 0; } static void @@ -4186,39 +4093,6 @@ static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) return 0; } -static irqreturn_t s2io_msi_handle(int irq, void *dev_id) -{ - struct net_device *dev = (struct net_device *) dev_id; - struct s2io_nic *sp = dev->priv; - int i; - struct mac_info *mac_control; - struct config_param *config; - - atomic_inc(&sp->isr_cnt); - mac_control = &sp->mac_control; - config = &sp->config; - DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__); - - /* If Intr is because of Rx Traffic */ - for (i = 0; i < config->rx_ring_num; i++) - rx_intr_handler(&mac_control->rings[i]); - - /* If Intr is because of Tx Traffic */ - for (i = 0; i < config->tx_fifo_num; i++) - tx_intr_handler(&mac_control->fifos[i]); - - /* - * If the Rx buffer count is below the panic threshold then - * reallocate the buffers from the interrupt handler itself, - * else schedule a tasklet to reallocate the buffers. - */ - for (i = 0; i < config->rx_ring_num; i++) - s2io_chk_rx_buffers(sp, i); - - atomic_dec(&sp->isr_cnt); - return IRQ_HANDLED; -} - static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) { struct ring_info *ring = (struct ring_info *)dev_id; @@ -4927,19 +4801,17 @@ static void s2io_ethtool_gringparam(struct net_device *dev, ering->rx_max_pending = MAX_RX_DESC_1; else if (sp->rxd_mode == RXD_MODE_3B) ering->rx_max_pending = MAX_RX_DESC_2; - else if (sp->rxd_mode == RXD_MODE_3A) - ering->rx_max_pending = MAX_RX_DESC_3; ering->tx_max_pending = MAX_TX_DESC; - for (i = 0 ; i < sp->config.tx_fifo_num ; i++) { + for (i = 0 ; i < sp->config.tx_fifo_num ; i++) tx_desc_count += sp->config.tx_cfg[i].fifo_len; - } + DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds); ering->tx_pending = tx_desc_count; rx_desc_count = 0; - for (i = 0 ; i < sp->config.rx_ring_num ; i++) { + for (i = 0 ; i < sp->config.rx_ring_num ; i++) rx_desc_count += sp->config.rx_cfg[i].num_rxd; - } + ering->rx_pending = rx_desc_count; ering->rx_mini_max_pending = 0; @@ -5923,6 +5795,7 @@ static void s2io_get_ethtool_stats(struct net_device *dev, else tmp_stats[i++] = 0; tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt; + tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt; tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt; tmp_stats[i++] = stat_info->sw_stat.mem_allocated; tmp_stats[i++] = stat_info->sw_stat.mem_freed; @@ -6266,9 +6139,10 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, u64 *temp2, int size) { struct net_device *dev = sp->dev; - struct sk_buff *frag_list; + struct swStat *stats = &sp->mac_control.stats_info->sw_stat; if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) { + struct RxD1 *rxdp1 = (struct RxD1 *)rxdp; /* allocate skb */ if (*skb) { DBG_PRINT(INFO_DBG, "SKB is not NULL\n"); @@ -6277,7 +6151,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, * using same mapped address for the Rxd * buffer pointer */ - ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0; + rxdp1->Buffer0_ptr = *temp0; } else { *skb = dev_alloc_skb(size); if (!(*skb)) { @@ -6294,18 +6168,23 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, * such it will be used for next rxd whose * Host Control is NULL */ - ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 = + rxdp1->Buffer0_ptr = *temp0 = pci_map_single( sp->pdev, (*skb)->data, size - NET_IP_ALIGN, PCI_DMA_FROMDEVICE); + if( (rxdp1->Buffer0_ptr == 0) || + (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) { + goto memalloc_failed; + } rxdp->Host_Control = (unsigned long) (*skb); } } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { + struct RxD3 *rxdp3 = (struct RxD3 *)rxdp; /* Two buffer Mode */ if (*skb) { - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; + rxdp3->Buffer2_ptr = *temp2; + rxdp3->Buffer0_ptr = *temp0; + rxdp3->Buffer1_ptr = *temp1; } else { *skb = dev_alloc_skb(size); if (!(*skb)) { @@ -6318,73 +6197,50 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, } sp->mac_control.stats_info->sw_stat.mem_allocated += (*skb)->truesize; - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = + rxdp3->Buffer2_ptr = *temp2 = pci_map_single(sp->pdev, (*skb)->data, dev->mtu + 4, PCI_DMA_FROMDEVICE); - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = + if( (rxdp3->Buffer2_ptr == 0) || + (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) { + goto memalloc_failed; + } + rxdp3->Buffer0_ptr = *temp0 = pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, PCI_DMA_FROMDEVICE); + if( (rxdp3->Buffer0_ptr == 0) || + (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) { + pci_unmap_single (sp->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, PCI_DMA_FROMDEVICE); + goto memalloc_failed; + } rxdp->Host_Control = (unsigned long) (*skb); /* Buffer-1 will be dummy buffer not used */ - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = + rxdp3->Buffer1_ptr = *temp1 = pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); - } - } else if ((rxdp->Host_Control == 0)) { - /* Three buffer mode */ - if (*skb) { - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0; - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1; - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2; - } else { - *skb = dev_alloc_skb(size); - if (!(*skb)) { - DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); - DBG_PRINT(INFO_DBG, "memory to allocate "); - DBG_PRINT(INFO_DBG, "3 buf mode SKBs\n"); - sp->mac_control.stats_info->sw_stat. \ - mem_alloc_fail_cnt++; - return -ENOMEM; - } - sp->mac_control.stats_info->sw_stat.mem_allocated - += (*skb)->truesize; - ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 = - pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, - PCI_DMA_FROMDEVICE); - /* Buffer-1 receives L3/L4 headers */ - ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 = - pci_map_single( sp->pdev, (*skb)->data, - l3l4hdr_size + 4, PCI_DMA_FROMDEVICE); - /* - * skb_shinfo(skb)->frag_list will have L4 - * data payload - */ - skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu + - ALIGN_SIZE); - if (skb_shinfo(*skb)->frag_list == NULL) { - DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \ - failed\n ", dev->name); - sp->mac_control.stats_info->sw_stat. \ - mem_alloc_fail_cnt++; - return -ENOMEM ; + if( (rxdp3->Buffer1_ptr == 0) || + (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { + pci_unmap_single (sp->pdev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, PCI_DMA_FROMDEVICE); + pci_unmap_single (sp->pdev, + (dma_addr_t)rxdp3->Buffer2_ptr, + dev->mtu + 4, PCI_DMA_FROMDEVICE); + goto memalloc_failed; } - frag_list = skb_shinfo(*skb)->frag_list; - frag_list->next = NULL; - sp->mac_control.stats_info->sw_stat.mem_allocated - += frag_list->truesize; - /* - * Buffer-2 receives L4 data payload - */ - ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 = - pci_map_single( sp->pdev, frag_list->data, - dev->mtu, PCI_DMA_FROMDEVICE); } } return 0; + memalloc_failed: + stats->pci_map_fail_cnt++; + stats->mem_freed += (*skb)->truesize; + dev_kfree_skb(*skb); + return -ENOMEM; } + static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, int size) { @@ -6395,10 +6251,6 @@ static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp, rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4); - } else { - rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); - rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); - rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); } } @@ -6420,8 +6272,6 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) size += NET_IP_ALIGN; else if (sp->rxd_mode == RXD_MODE_3B) size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; - else - size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; for (i = 0; i < config->rx_ring_num; i++) { blk_cnt = config->rx_cfg[i].num_rxd / @@ -6431,7 +6281,7 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp) for (k = 0; k < rxd_count[sp->rxd_mode]; k++) { rxdp = mac_control->rings[i]. rx_blocks[j].rxds[k].virt_addr; - if(sp->rxd_mode >= RXD_MODE_3A) + if(sp->rxd_mode == RXD_MODE_3B) ba = &mac_control->rings[i].ba[j][k]; if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,(u64 *)&temp0_64, @@ -6458,9 +6308,7 @@ static int s2io_add_isr(struct s2io_nic * sp) struct net_device *dev = sp->dev; int err = 0; - if (sp->intr_type == MSI) - ret = s2io_enable_msi(sp); - else if (sp->intr_type == MSI_X) + if (sp->intr_type == MSI_X) ret = s2io_enable_msi_x(sp); if (ret) { DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name); @@ -6471,16 +6319,6 @@ static int s2io_add_isr(struct s2io_nic * sp) store_xmsi_data(sp); /* After proper initialization of H/W, register ISR */ - if (sp->intr_type == MSI) { - err = request_irq((int) sp->pdev->irq, s2io_msi_handle, - IRQF_SHARED, sp->name, dev); - if (err) { - pci_disable_msi(sp->pdev); - DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n", - dev->name); - return -1; - } - } if (sp->intr_type == MSI_X) { int i, msix_tx_cnt=0,msix_rx_cnt=0; @@ -6567,14 +6405,6 @@ static void s2io_rem_isr(struct s2io_nic * sp) pci_disable_msix(sp->pdev); } else { free_irq(sp->pdev->irq, dev); - if (sp->intr_type == MSI) { - u16 val; - - pci_disable_msi(sp->pdev); - pci_read_config_word(sp->pdev, 0x4c, &val); - val ^= 0x1; - pci_write_config_word(sp->pdev, 0x4c, val); - } } /* Waiting till all Interrupt handlers are complete */ cnt = 0; @@ -6907,6 +6737,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) } /* Updating statistics */ + sp->stats.rx_packets++; rxdp->Host_Control = 0; if (sp->rxd_mode == RXD_MODE_1) { int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); @@ -6914,7 +6745,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) sp->stats.rx_bytes += len; skb_put(skb, len); - } else if (sp->rxd_mode >= RXD_MODE_3A) { + } else if (sp->rxd_mode == RXD_MODE_3B) { int get_block = ring_data->rx_curr_get_info.block_index; int get_off = ring_data->rx_curr_get_info.offset; int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); @@ -6924,18 +6755,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) struct buffAdd *ba = &ring_data->ba[get_block][get_off]; sp->stats.rx_bytes += buf0_len + buf2_len; memcpy(buff, ba->ba_0, buf0_len); - - if (sp->rxd_mode == RXD_MODE_3A) { - int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2); - - skb_put(skb, buf1_len); - skb->len += buf2_len; - skb->data_len += buf2_len; - skb_put(skb_shinfo(skb)->frag_list, buf2_len); - sp->stats.rx_bytes += buf1_len; - - } else - skb_put(skb, buf2_len); + skb_put(skb, buf2_len); } if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || @@ -7131,7 +6951,7 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) *dev_intr_type = INTA; } #else - if (*dev_intr_type > MSI_X) { + if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " "Defaulting to INTA\n"); *dev_intr_type = INTA; @@ -7145,10 +6965,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) *dev_intr_type = INTA; } - if (rx_ring_mode > 3) { + if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) { DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n"); - DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n"); - rx_ring_mode = 3; + DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n"); + rx_ring_mode = 1; } return SUCCESS; } @@ -7240,28 +7060,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) pci_disable_device(pdev); return -ENOMEM; } - if (dev_intr_type != MSI_X) { - if (pci_request_regions(pdev, s2io_driver_name)) { - DBG_PRINT(ERR_DBG, "Request Regions failed\n"); - pci_disable_device(pdev); - return -ENODEV; - } - } - else { - if (!(request_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0), s2io_driver_name))) { - DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n"); - pci_disable_device(pdev); - return -ENODEV; - } - if (!(request_mem_region(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2), s2io_driver_name))) { - DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n"); - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - pci_disable_device(pdev); - return -ENODEV; - } + if ((ret = pci_request_regions(pdev, s2io_driver_name))) { + DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret); + pci_disable_device(pdev); + return -ENODEV; } dev = alloc_etherdev(sizeof(struct s2io_nic)); @@ -7288,8 +7090,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) sp->rxd_mode = RXD_MODE_1; if (rx_ring_mode == 2) sp->rxd_mode = RXD_MODE_3B; - if (rx_ring_mode == 3) - sp->rxd_mode = RXD_MODE_3A; sp->intr_type = dev_intr_type; @@ -7565,10 +7365,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n", dev->name); break; - case RXD_MODE_3A: - DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n", - dev->name); - break; } if (napi) @@ -7577,9 +7373,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) case INTA: DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); break; - case MSI: - DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name); - break; case MSI_X: DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); break; @@ -7619,14 +7412,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) mem_alloc_failed: free_shared_mem(sp); pci_disable_device(pdev); - if (dev_intr_type != MSI_X) - pci_release_regions(pdev); - else { - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - release_mem_region(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - } + pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(dev); @@ -7661,14 +7447,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev) free_shared_mem(sp); iounmap(sp->bar0); iounmap(sp->bar1); - if (sp->intr_type != MSI_X) - pci_release_regions(pdev); - else { - release_mem_region(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - release_mem_region(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - } + pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); free_netdev(dev); pci_disable_device(pdev); diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 3887fe63a90..92983ee7df8 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h @@ -74,6 +74,10 @@ static int debug_level = ERR_DBG; /* DEBUG message print. */ #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) +#ifndef DMA_ERROR_CODE +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) +#endif + /* Protocol assist features of the NIC */ #define L3_CKSUM_OK 0xFFFF #define L4_CKSUM_OK 0xFFFF @@ -97,6 +101,7 @@ struct swStat { unsigned long long num_aggregations; /* Other statistics */ unsigned long long mem_alloc_fail_cnt; + unsigned long long pci_map_fail_cnt; unsigned long long watchdog_timer_cnt; unsigned long long mem_allocated; unsigned long long mem_freed; @@ -575,8 +580,7 @@ struct RxD_block { #define SIZE_OF_BLOCK 4096 #define RXD_MODE_1 0 /* One Buffer mode */ -#define RXD_MODE_3A 1 /* Three Buffer mode */ -#define RXD_MODE_3B 2 /* Two Buffer mode */ +#define RXD_MODE_3B 1 /* Two Buffer mode */ /* Structure to hold virtual addresses of Buf0 and Buf1 in * 2buf mode. */ @@ -876,7 +880,6 @@ struct s2io_nic { u16 lro_max_aggr_per_sess; #define INTA 0 -#define MSI 1 #define MSI_X 2 u8 intr_type; @@ -1020,8 +1023,6 @@ static int s2io_poll(struct net_device *dev, int *budget); static void s2io_init_pci(struct s2io_nic * sp); static int s2io_set_mac_addr(struct net_device *dev, u8 * addr); static void s2io_alarm_handle(unsigned long data); -static int s2io_enable_msi(struct s2io_nic *nic); -static irqreturn_t s2io_msi_handle(int irq, void *dev_id); static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id); static irqreturn_t diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 5b7284c955d..872cb1cc9c4 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -1402,7 +1402,6 @@ static struct ethtool_ops sc92031_ethtool_ops = { .get_strings = sc92031_ethtool_get_strings, .get_stats_count = sc92031_ethtool_get_stats_count, .get_ethtool_stats = sc92031_ethtool_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, .get_ufo = ethtool_op_get_ufo, }; diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index ec2ad9f0efa..d470b19c081 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -1593,6 +1593,9 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, pci_name(pdev)); isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL); + if (!isa_bridge) + isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0966, NULL); + if (!isa_bridge) { net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n", pci_name(pdev)); diff --git a/drivers/net/skfp/pmf.c b/drivers/net/skfp/pmf.c index efc639c013f..ea85de91823 100644 --- a/drivers/net/skfp/pmf.c +++ b/drivers/net/skfp/pmf.c @@ -575,7 +575,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, int sp_len ; /* - * skip if errror + * skip if error */ if (pcon->pc_err) return ; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 77669294656..e3d8520209b 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -821,7 +821,6 @@ static const struct ethtool_ops skge_ethtool_ops = { .phys_id = skge_phys_id, .get_stats_count = skge_get_stats_count, .get_ethtool_stats = skge_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; /* diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 13f08a390e1..e7a2eadcc3b 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -3548,7 +3548,6 @@ static const struct ethtool_ops sky2_ethtool_ops = { .phys_id = sky2_phys_id, .get_stats_count = sky2_get_stats_count, .get_ethtool_stats = sky2_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; #ifdef CONFIG_SKY2_DEBUG diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index f8429449dc1..6ff3a1627af 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -299,7 +299,7 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define SMC_CAN_USE_8BIT 1 #define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 +#define SMC_CAN_USE_32BIT 0 #define SMC_inb(a, r) inb((a) + (r)) #define SMC_inw(a, r) inw((a) + (r)) @@ -310,8 +310,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #endif /* BOARDS */ -#define set_irq_type(irq, type) do {} while (0) - #elif defined(CONFIG_M32R) #define SMC_CAN_USE_8BIT 0 diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index af0c9831074..a8f2af8f778 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -1586,7 +1586,6 @@ static const struct ethtool_ops ethtool_ops = { .get_link = get_link, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 053b7cb0d94..68e4f660367 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -99,8 +99,7 @@ static char lancestr[] = "LANCE"; #include <asm/byteorder.h> /* Used by the checksum routines */ #include <asm/idprom.h> #include <asm/sbus.h> -#include <asm/openprom.h> -#include <asm/oplib.h> +#include <asm/prom.h> #include <asm/auxio.h> /* For tpe-link-test? setting */ #include <asm/irq.h> @@ -1326,6 +1325,7 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, struct sbus_dev *lebuffer) { static unsigned version_printed; + struct device_node *dp = sdev->ofdev.node; struct net_device *dev; struct lance_private *lp; int i; @@ -1389,54 +1389,46 @@ static int __devinit sparc_lance_probe_one(struct sbus_dev *sdev, lp->rx = lance_rx_dvma; lp->tx = lance_tx_dvma; } - lp->busmaster_regval = prom_getintdefault(sdev->prom_node, - "busmaster-regval", - (LE_C3_BSWP | LE_C3_ACON | - LE_C3_BCON)); + lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval", + (LE_C3_BSWP | + LE_C3_ACON | + LE_C3_BCON)); lp->name = lancestr; lp->ledma = ledma; lp->burst_sizes = 0; if (lp->ledma) { - char prop[6]; + struct device_node *ledma_dp = ledma->sdev->ofdev.node; + const char *prop; unsigned int sbmask; u32 csr; /* Find burst-size property for ledma */ - lp->burst_sizes = prom_getintdefault(ledma->sdev->prom_node, - "burst-sizes", 0); + lp->burst_sizes = of_getintprop_default(ledma_dp, + "burst-sizes", 0); /* ledma may be capable of fast bursts, but sbus may not. */ - sbmask = prom_getintdefault(ledma->sdev->bus->prom_node, - "burst-sizes", DMA_BURSTBITS); + sbmask = of_getintprop_default(ledma_dp, "burst-sizes", + DMA_BURSTBITS); lp->burst_sizes &= sbmask; /* Get the cable-selection property */ - memset(prop, 0, sizeof(prop)); - prom_getstring(ledma->sdev->prom_node, "cable-selection", - prop, sizeof(prop)); - if (prop[0] == 0) { - int topnd, nd; + prop = of_get_property(ledma_dp, "cable-selection", NULL); + if (!prop || prop[0] == '\0') { + struct device_node *nd; - printk(KERN_INFO "SunLance: using auto-carrier-detection.\n"); + printk(KERN_INFO "SunLance: using " + "auto-carrier-detection.\n"); - /* Is this found at /options .attributes in all - * Prom versions? XXX - */ - topnd = prom_getchild(prom_root_node); - - nd = prom_searchsiblings(topnd, "options"); + nd = of_find_node_by_path("/options"); if (!nd) goto no_link_test; - if (!prom_node_has_property(nd, "tpe-link-test?")) + prop = of_get_property(nd, "tpe-link-test?", NULL); + if (!prop) goto no_link_test; - memset(prop, 0, sizeof(prop)); - prom_getstring(nd, "tpe-link-test?", prop, - sizeof(prop)); - if (strcmp(prop, "true")) { printk(KERN_NOTICE "SunLance: warning: overriding option " "'tpe-link-test?'\n"); diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index 61f98251fea..ff1028a597d 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c @@ -906,7 +906,6 @@ static const struct ethtool_ops vnet_ethtool_ops = { .get_msglevel = vnet_get_msglevel, .set_msglevel = vnet_set_msglevel, .get_link = ethtool_op_get_link, - .get_perm_addr = ethtool_op_get_perm_addr, }; static void vnet_port_free_tx_bufs(struct vnet_port *port) diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 7f94ca93098..ec41469eee8 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -2198,7 +2198,6 @@ static const struct ethtool_ops tc35815_ethtool_ops = { .get_strings = tc35815_get_strings, .get_stats_count = tc35815_get_stats_count, .get_ethtool_stats = tc35815_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 887b9a5cfe4..58740428dd0 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -64,8 +64,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.79" -#define DRV_MODULE_RELDATE "July 18, 2007" +#define DRV_MODULE_VERSION "3.80" +#define DRV_MODULE_RELDATE "August 2, 2007" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -9294,7 +9294,6 @@ static const struct ethtool_ops tg3_ethtool_ops = { .get_ethtool_stats = tg3_get_ethtool_stats, .get_coalesce = tg3_get_coalesce, .set_coalesce = tg3_set_coalesce, - .get_perm_addr = ethtool_op_get_perm_addr, }; static void __devinit tg3_get_eeprom_size(struct tg3 *tp) @@ -12112,6 +12111,12 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) struct tg3 *tp = netdev_priv(dev); int err; + /* PCI register 4 needs to be saved whether netif_running() or not. + * MSI address and data need to be saved if using MSI and + * netif_running(). + */ + pci_save_state(pdev); + if (!netif_running(dev)) return 0; @@ -12131,9 +12136,6 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; tg3_full_unlock(tp); - /* Save MSI address and data for resume. */ - pci_save_state(pdev); - err = tg3_set_power_state(tp, pci_choose_state(pdev, state)); if (err) { tg3_full_lock(tp, 0); @@ -12161,11 +12163,11 @@ static int tg3_resume(struct pci_dev *pdev) struct tg3 *tp = netdev_priv(dev); int err; + pci_restore_state(tp->pdev); + if (!netif_running(dev)) return 0; - pci_restore_state(tp->pdev); - err = tg3_set_power_state(tp, PCI_D0); if (err) return err; diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index f87d76981ab..eca984f89bb 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -1471,14 +1471,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, sa_offset = 2; /* Grrr, damn Matrox boards. */ multiport_cnt = 4; } -#ifdef CONFIG_DDB5477 - if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) { - /* DDB5477 MAC address in first EEPROM locations. */ - sa_offset = 0; - /* No media table either */ - tp->flags &= ~HAS_MEDIA_TABLE; - } -#endif #ifdef CONFIG_MIPS_COBALT if ((pdev->bus->number == 0) && ((PCI_SLOT(pdev->devfn) == 7) || diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c index a8994c7b858..64bef7c1236 100644 --- a/drivers/net/ucc_geth_ethtool.c +++ b/drivers/net/ucc_geth_ethtool.c @@ -379,7 +379,6 @@ static const struct ethtool_ops uec_ethtool_ops = { .get_stats_count = uec_get_stats_count, .get_strings = uec_get_strings, .get_ethtool_stats = uec_get_ethtool_stats, - .get_perm_addr = ethtool_op_get_perm_addr, }; void uec_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c index 5f8c2d30a32..6c257b88ce5 100644 --- a/drivers/net/ucc_geth_mii.c +++ b/drivers/net/ucc_geth_mii.c @@ -272,7 +272,8 @@ int __init uec_mdio_init(void) return of_register_platform_driver(&uec_mdio_driver); } -void __exit uec_mdio_exit(void) +/* called from __init ucc_geth_init, therefore can not be __exit */ +void uec_mdio_exit(void) { of_unregister_platform_driver(&uec_mdio_driver); } diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index a05fd97e5bc..04cba6bf3d5 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -768,11 +768,13 @@ done: static void write_bulk_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; - struct net_device *net = pegasus->net; + struct net_device *net; if (!pegasus) return; + net = pegasus->net; + if (!netif_device_present(net) || !netif_running(net)) return; diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index f51c2c138f1..b56dff26772 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -42,7 +42,13 @@ static int max_interrupt_work = 20; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1518 effectively disables this feature. */ +#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \ + || defined(CONFIG_SPARC) || defined(__ia64__) \ + || defined(__sh__) || defined(__mips__) +static int rx_copybreak = 1518; +#else static int rx_copybreak; +#endif /* Work-around for broken BIOSes: they are unable to get the chip back out of power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */ @@ -1805,7 +1811,6 @@ static const struct ethtool_ops netdev_ethtool_ops = { .set_wol = rhine_set_wol, .get_sg = ethtool_op_get_sg, .get_tx_csum = ethtool_op_get_tx_csum, - .get_perm_addr = ethtool_op_get_perm_addr, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index f331843d110..93574add406 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1613,7 +1613,7 @@ static void velocity_error(struct velocity_info *vptr, int status) if (status & ISR_TXSTLI) { struct mac_regs __iomem * regs = vptr->mac_regs; - printk(KERN_ERR "TD structure errror TDindex=%hx\n", readw(®s->TDIdx[0])); + printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); writew(TRDCSR_RUN, ®s->TDCSRClr); netif_stop_queue(vptr->dev); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 15b6e07a438..071a64cacd5 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -212,14 +212,13 @@ static pvc_device* add_pvc(struct net_device *dev, u16 dlci) pvc_p = &(*pvc_p)->next; } - pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC); + pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC); #ifdef DEBUG_PVC printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); #endif if (!pvc) return NULL; - memset(pvc, 0, sizeof(pvc_device)); pvc->dlci = dlci; pvc->frad = dev; pvc->next = *pvc_p; /* Put it in the chain */ diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c index d779199c30d..b37f1e34870 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c @@ -1638,7 +1638,7 @@ void bcm43xx_phy_set_baseband_attenuation(struct bcm43xx_private *bcm, return; } - if (phy->analog == 1) { + if (phy->analog > 1) { value = bcm43xx_phy_read(bcm, 0x0060) & ~0x003C; value |= (baseband_attenuation << 2) & 0x003C; } else { diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c index cea85894b7f..e61c6d5ba1a 100644 --- a/drivers/net/wireless/rtl8187_dev.c +++ b/drivers/net/wireless/rtl8187_dev.c @@ -466,7 +466,7 @@ static int rtl8187_add_interface(struct ieee80211_hw *dev, return -EOPNOTSUPP; } - priv->hwaddr = conf->mac_addr; + priv->hwaddr = conf->mac_addr ? conf->mac_addr : dev->wiphy->perm_addr; return 0; } diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index f6c487aa824..26869d107e5 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -822,7 +822,7 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs, cs->control |= ZD_CS_MULTICAST; /* PS-POLL */ - if (stype == IEEE80211_STYPE_PSPOLL) + if (ftype == IEEE80211_FTYPE_CTL && stype == IEEE80211_STYPE_PSPOLL) cs->control |= ZD_CS_PS_POLL_FRAME; /* Unicast data frames over the threshold should have RTS */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 489f69c5d6c..4445810335a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -566,6 +566,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (notify) notify_remote_via_irq(np->netdev->irq); + np->stats.tx_bytes += skb->len; + np->stats.tx_packets++; + + /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(dev); if (!netfront_tx_slot_available(np)) @@ -573,9 +577,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_irq(&np->tx_lock); - np->stats.tx_bytes += skb->len; - np->stats.tx_packets++; - return 0; drop: |