From 88393161210493e317ae391696ee8ef463cb3c23 Mon Sep 17 00:00:00 2001 From: Thomas Weber Date: Tue, 16 Mar 2010 11:47:56 +0100 Subject: Fix typos in comments [Ss]ytem => [Ss]ystem udpate => update paramters => parameters orginal => original Signed-off-by: Thomas Weber Acked-by: Randy Dunlap Signed-off-by: Jiri Kosina --- drivers/infiniband/hw/ipath/ipath_iba6110.c | 2 +- drivers/infiniband/hw/ipath/ipath_iba6120.c | 4 ++-- drivers/infiniband/hw/ipath/ipath_iba7220.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c index 37d12e5efa4..1d7aea132a0 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6110.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c @@ -1474,7 +1474,7 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd) /** * ipath_pe_put_tid - write a TID in chip * @dd: the infinipath device - * @tidptr: pointer to the expected TID (in chip) to udpate + * @tidptr: pointer to the expected TID (in chip) to update * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing * diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index fbf8c5379ea..4b4a30b0dab 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c @@ -1328,7 +1328,7 @@ bail: /** * ipath_pe_put_tid - write a TID in chip * @dd: the infinipath device - * @tidptr: pointer to the expected TID (in chip) to udpate + * @tidptr: pointer to the expected TID (in chip) to update * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing * @@ -1394,7 +1394,7 @@ static void ipath_pe_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, /** * ipath_pe_put_tid_2 - write a TID in chip, Revision 2 or higher * @dd: the infinipath device - * @tidptr: pointer to the expected TID (in chip) to udpate + * @tidptr: pointer to the expected TID (in chip) to update * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing * diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c index a805402dd4a..34b778ed97f 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c @@ -1738,7 +1738,7 @@ bail: /** * ipath_7220_put_tid - write a TID to the chip * @dd: the infinipath device - * @tidptr: pointer to the expected TID (in chip) to udpate + * @tidptr: pointer to the expected TID (in chip) to update * @tidtype: 0 for eager, 1 for expected * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing * -- cgit v1.2.3-70-g09d2 From 3e4aa12f8a81506c44f04b4f0eb7663981c5a282 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 22 Mar 2010 03:21:39 +0000 Subject: ipoib: remove addrlen check for mc addresses Finally this bit can be removed. Currently, after the bonding driver is changed/fixed (32a806c194ea112cfab00f558482dd97bee5e44e net-next-2.6), that's not possible for an addr with different length than dev->addr_len to be present in list. Removing this check as in new mc_list there will be no addrlen in the record. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d41ea27be5e..19eba3c877c 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -767,11 +767,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev) } } -static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, - const u8 *broadcast) +static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast) { - if (addrlen != INFINIBAND_ALEN) - return 0; /* reserved QPN, prefix, scope */ if (memcmp(addr, broadcast, 6)) return 0; @@ -815,7 +812,6 @@ void ipoib_mcast_restart_task(struct work_struct *work) union ib_gid mgid; if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, - mclist->dmi_addrlen, dev->broadcast)) continue; -- cgit v1.2.3-70-g09d2 From 22bedad3ce112d5ca1eaf043d4990fa2ed698c87 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 1 Apr 2010 21:22:57 +0000 Subject: net: convert multicast list to list_head Converts the list and the core manipulating with it to be the same as uc_list. +uses two functions for adding/removing mc address (normal and "global" variant) instead of a function parameter. +removes dev_mcast.c completely. +exposes netdev_hw_addr_list_* macros along with __hw_addr_* functions for manipulation with lists on a sandbox (used in bonding and 80211 drivers) Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/infiniband/hw/nes/nes_nic.c | 7 +- drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 9 +- drivers/media/dvb/dvb-core/dvb_net.c | 10 +- drivers/net/3c505.c | 7 +- drivers/net/3c523.c | 7 +- drivers/net/3c527.c | 6 +- drivers/net/7990.c | 6 +- drivers/net/8139cp.c | 6 +- drivers/net/8139too.c | 6 +- drivers/net/82596.c | 6 +- drivers/net/a2065.c | 6 +- drivers/net/amd8111e.c | 6 +- drivers/net/arm/am79c961a.c | 6 +- drivers/net/arm/at91_ether.c | 6 +- drivers/net/arm/ixp4xx_eth.c | 8 +- drivers/net/arm/ks8695net.c | 10 +- drivers/net/at1700.c | 6 +- drivers/net/atl1c/atl1c_main.c | 6 +- drivers/net/atl1e/atl1e_main.c | 6 +- drivers/net/atlx/atl2.c | 6 +- drivers/net/atlx/atlx.c | 6 +- drivers/net/atp.c | 6 +- drivers/net/au1000_eth.c | 6 +- drivers/net/b44.c | 6 +- drivers/net/bcm63xx_enet.c | 6 +- drivers/net/benet/be_cmds.c | 6 +- drivers/net/bfin_mac.c | 6 +- drivers/net/bmac.c | 12 +- drivers/net/bnx2.c | 5 +- drivers/net/bnx2x_main.c | 18 +- drivers/net/bonding/bond_main.c | 157 +++++-------- drivers/net/bonding/bonding.h | 2 +- drivers/net/cassini.c | 12 +- drivers/net/chelsio/pm3393.c | 7 +- drivers/net/cpmac.c | 16 +- drivers/net/cris/eth_v10.c | 6 +- drivers/net/cxgb3/xgmac.c | 8 +- drivers/net/davinci_emac.c | 7 +- drivers/net/declance.c | 6 +- drivers/net/defxx.c | 6 +- drivers/net/depca.c | 6 +- drivers/net/dl2k.c | 6 +- drivers/net/dm9000.c | 6 +- drivers/net/e100.c | 6 +- drivers/net/e1000/e1000_main.c | 7 +- drivers/net/e1000e/netdev.c | 7 +- drivers/net/eepro.c | 6 +- drivers/net/eexpress.c | 6 +- drivers/net/ehea/ehea_main.c | 6 +- drivers/net/enic/enic_main.c | 6 +- drivers/net/epic100.c | 6 +- drivers/net/ethoc.c | 6 +- drivers/net/ewrk3.c | 6 +- drivers/net/fealnx.c | 6 +- drivers/net/fec.c | 10 +- drivers/net/fec_mpc52xx.c | 6 +- drivers/net/forcedeth.c | 10 +- drivers/net/fs_enet/mac-fcc.c | 6 +- drivers/net/fs_enet/mac-fec.c | 6 +- drivers/net/fs_enet/mac-scc.c | 6 +- drivers/net/gianfar.c | 9 +- drivers/net/greth.c | 6 +- drivers/net/hamachi.c | 8 +- drivers/net/hp100.c | 6 +- drivers/net/ibm_newemac/core.c | 11 +- drivers/net/ibmlana.c | 8 +- drivers/net/ibmveth.c | 6 +- drivers/net/igb/igb_main.c | 6 +- drivers/net/igbvf/netdev.c | 6 +- drivers/net/ioc3-eth.c | 6 +- drivers/net/ipg.c | 6 +- drivers/net/iseries_veth.c | 6 +- drivers/net/ixgb/ixgb_main.c | 6 +- drivers/net/ixgbe/ixgbe_common.c | 6 +- drivers/net/ixgbevf/vf.c | 6 +- drivers/net/jme.c | 6 +- drivers/net/korina.c | 6 +- drivers/net/ks8851.c | 6 +- drivers/net/ks8851_mll.c | 12 +- drivers/net/ksz884x.c | 11 +- drivers/net/lib82596.c | 6 +- drivers/net/lib8390.c | 6 +- drivers/net/ll_temac_main.c | 16 +- drivers/net/lp486e.c | 6 +- drivers/net/macb.c | 6 +- drivers/net/mace.c | 6 +- drivers/net/macmace.c | 6 +- drivers/net/mlx4/en_netdev.c | 6 +- drivers/net/mv643xx_eth.c | 6 +- drivers/net/myri10ge/myri10ge.c | 8 +- drivers/net/natsemi.c | 6 +- drivers/net/netxen/netxen_nic_hw.c | 12 +- drivers/net/ni52.c | 6 +- drivers/net/niu.c | 5 +- drivers/net/octeon/octeon_mgmt.c | 6 +- drivers/net/pci-skeleton.c | 6 +- drivers/net/pcmcia/axnet_cs.c | 6 +- drivers/net/pcmcia/fmvj18x_cs.c | 6 +- drivers/net/pcmcia/nmclan_cs.c | 6 +- drivers/net/pcmcia/smc91c92_cs.c | 8 +- drivers/net/pcmcia/xirc2ps_cs.c | 6 +- drivers/net/pcnet32.c | 6 +- drivers/net/ps3_gelic_net.c | 6 +- drivers/net/qlcnic/qlcnic_hw.c | 6 +- drivers/net/qlge/qlge_main.c | 6 +- drivers/net/r6040.c | 10 +- drivers/net/r8169.c | 6 +- drivers/net/s2io.c | 8 +- drivers/net/sb1250-mac.c | 6 +- drivers/net/sc92031.c | 6 +- drivers/net/sfc/efx.c | 6 +- drivers/net/sis190.c | 6 +- drivers/net/sis900.c | 10 +- drivers/net/skfp/skfddi.c | 12 +- drivers/net/skge.c | 12 +- drivers/net/sky2.c | 6 +- drivers/net/smc911x.c | 13 +- drivers/net/smc9194.c | 13 +- drivers/net/smc91x.c | 8 +- drivers/net/smsc911x.c | 6 +- drivers/net/smsc9420.c | 6 +- drivers/net/sonic.c | 6 +- drivers/net/spider_net.c | 6 +- drivers/net/starfire.c | 10 +- drivers/net/stmmac/dwmac100.c | 6 +- drivers/net/stmmac/dwmac1000_core.c | 6 +- drivers/net/sun3_82586.c | 6 +- drivers/net/sunbmac.c | 6 +- drivers/net/sundance.c | 6 +- drivers/net/sungem.c | 6 +- drivers/net/sunhme.c | 12 +- drivers/net/sunlance.c | 6 +- drivers/net/sunqe.c | 6 +- drivers/net/sunvnet.c | 8 +- drivers/net/tc35815.c | 6 +- drivers/net/tehuti.c | 6 +- drivers/net/tg3.c | 6 +- drivers/net/tlan.c | 8 +- drivers/net/tokenring/3c359.c | 12 +- drivers/net/tokenring/ibmtr.c | 12 +- drivers/net/tokenring/lanstreamer.c | 12 +- drivers/net/tokenring/olympic.c | 12 +- drivers/net/tokenring/tms380tr.c | 12 +- drivers/net/tsi108_eth.c | 6 +- drivers/net/tulip/de2104x.c | 12 +- drivers/net/tulip/de4x5.c | 10 +- drivers/net/tulip/dmfe.c | 12 +- drivers/net/tulip/tulip_core.c | 27 ++- drivers/net/tulip/uli526x.c | 6 +- drivers/net/tulip/winbond-840.c | 12 +- drivers/net/typhoon.c | 6 +- drivers/net/ucc_geth.c | 8 +- drivers/net/usb/asix.c | 16 +- drivers/net/usb/catc.c | 6 +- drivers/net/usb/dm9601.c | 6 +- drivers/net/usb/mcs7830.c | 6 +- drivers/net/usb/smsc75xx.c | 6 +- drivers/net/usb/smsc95xx.c | 6 +- drivers/net/via-rhine.c | 6 +- drivers/net/via-velocity.c | 6 +- drivers/net/virtio_net.c | 5 +- drivers/net/vmxnet3/vmxnet3_drv.c | 6 +- drivers/net/vxge/vxge-main.c | 6 +- drivers/net/wireless/adm8211.c | 12 +- drivers/net/wireless/ath/ar9170/main.c | 14 +- drivers/net/wireless/ath/ath5k/base.c | 17 +- drivers/net/wireless/libertas/main.c | 12 +- drivers/net/wireless/libertas_tf/main.c | 14 +- drivers/net/wireless/mwl8k.c | 24 +- drivers/net/wireless/orinoco/hw.c | 6 +- drivers/net/wireless/orinoco/hw.h | 1 - drivers/net/wireless/ray_cs.c | 12 +- drivers/net/wireless/rndis_wlan.c | 6 +- drivers/net/wireless/rtl818x/rtl8180_dev.c | 6 +- drivers/net/wireless/rtl818x/rtl8187_dev.c | 4 +- drivers/net/wireless/wl12xx/wl1271_main.c | 25 +- drivers/net/wireless/zd1201.c | 6 +- drivers/net/wireless/zd1211rw/zd_mac.c | 13 +- drivers/net/yellowfin.c | 12 +- drivers/s390/net/qeth_l2_main.c | 5 +- drivers/scsi/fcoe/fcoe.c | 4 +- drivers/staging/arlan/arlan-main.c | 9 +- drivers/staging/et131x/et131x_netdev.c | 6 +- drivers/staging/slicoss/slicoss.c | 6 +- drivers/staging/vt6655/device_main.c | 6 +- drivers/staging/vt6656/main_usb.c | 6 +- drivers/staging/wavelan/wavelan.c | 10 +- drivers/staging/wavelan/wavelan_cs.c | 12 +- drivers/staging/winbond/wbusb.c | 6 +- drivers/staging/wlags49_h2/wl_netdev.c | 12 +- include/linux/netdevice.h | 82 +++---- include/net/mac80211.h | 2 +- net/802/garp.c | 4 +- net/appletalk/ddp.c | 2 +- net/bluetooth/bnep/netdev.c | 8 +- net/core/Makefile | 5 +- net/core/dev.c | 145 +----------- net/core/dev_addr_lists.c | 305 +++++++++++++++++++++++-- net/core/dev_mcast.c | 232 ------------------- net/decnet/dn_dev.c | 12 +- net/ipv4/igmp.c | 4 +- net/ipv4/netfilter/ipt_CLUSTERIP.c | 4 +- net/ipv6/mcast.c | 4 +- net/mac80211/driver-ops.h | 8 +- net/mac80211/ieee80211_i.h | 3 +- net/mac80211/iface.c | 6 +- net/mac80211/main.c | 2 +- net/packet/af_packet.c | 4 +- 208 files changed, 1137 insertions(+), 1327 deletions(-) delete mode 100644 net/core/dev_mcast.c (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 91fdde382e8..ce9ef6bc865 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -876,7 +876,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) if (!mc_all_on) { char *addrs; int i; - struct dev_mc_list *mcaddr; + struct netdev_hw_addr *ha; addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC); if (!addrs) { @@ -884,9 +884,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) goto unlock; } i = 0; - netdev_for_each_mc_addr(mcaddr, netdev) - memcpy(get_addr(addrs, i++), - mcaddr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN); perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + pft_entries_preallocated * 0x8; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 19eba3c877c..c8a0f7dab5b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, restart_task); struct net_device *dev = priv->dev; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct ipoib_mcast *mcast, *tmcast; LIST_HEAD(remove_list); unsigned long flags; @@ -808,14 +808,13 @@ void ipoib_mcast_restart_task(struct work_struct *work) clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); /* Mark all of the entries that are found or don't exist */ - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { union ib_gid mgid; - if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, - dev->broadcast)) + if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast)) continue; - memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); + memcpy(mgid.raw, ha->addr + 4, sizeof mgid); mcast = __ipoib_mcast_find(dev, &mgid); if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index dba1c84058b..cccea412088 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -1109,14 +1109,14 @@ static int dvb_net_feed_stop(struct net_device *dev) } -static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc) +static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr) { struct dvb_net_priv *priv = netdev_priv(dev); if (priv->multi_num == DVB_NET_MULTICAST_MAX) return -ENOMEM; - memcpy(priv->multi_macs[priv->multi_num], mc->dmi_addr, 6); + memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN); priv->multi_num++; return 0; @@ -1140,7 +1140,7 @@ static void wq_set_multicast_list (struct work_struct *work) dprintk("%s: allmulti mode\n", dev->name); priv->rx_mode = RX_MODE_ALL_MULTI; } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; dprintk("%s: set_mc_list, %d entries\n", dev->name, netdev_mc_count(dev)); @@ -1148,8 +1148,8 @@ static void wq_set_multicast_list (struct work_struct *work) priv->rx_mode = RX_MODE_MULTI; priv->multi_num = 0; - netdev_for_each_mc_addr(mc, dev) - dvb_set_mc_filter(dev, mc); + netdev_for_each_mc_addr(ha, dev) + dvb_set_mc_filter(dev, ha->addr); } netif_addr_unlock_bh(dev); diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index 04b5bba1902..81c8b31e629 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c @@ -1216,7 +1216,7 @@ static int elp_close(struct net_device *dev) static void elp_set_mc_list(struct net_device *dev) { elp_device *adapter = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; unsigned long flags; @@ -1231,8 +1231,9 @@ static void elp_set_mc_list(struct net_device *dev) adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; adapter->tx_pcb.length = 6 * netdev_mc_count(dev); i = 0; - netdev_for_each_mc_addr(dmi, dev) - memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) + memcpy(adapter->tx_pcb.data.multicast[i++], + ha->addr, 6); adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; if (!send_pcb(dev, &adapter->tx_pcb)) pr_err("%s: couldn't send set_multicast command\n", dev->name); diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index beed4fa10c6..966cb12e1e0 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -625,7 +625,7 @@ static int init586(struct net_device *dev) volatile struct iasetup_cmd_struct *ias_cmd; volatile struct tdr_cmd_struct *tdr_cmd; volatile struct mcsetup_cmd_struct *mc_cmd; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int num_addrs = netdev_mc_count(dev); ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); @@ -788,8 +788,9 @@ static int init586(struct net_device *dev) mc_cmd->cmd_link = 0xffff; mc_cmd->mc_cnt = num_addrs * 6; i = 0; - netdev_for_each_mc_addr(dmi, dev) - memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) + memcpy((char *) mc_cmd->mc_list[i++], + ha->addr, 6); p->scb->cbl_offset = make16(mc_cmd); p->scb->cmd = CUC_START; elmc_id_attn586(); diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 5c07b147ec9..38395dfa496 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) { unsigned char block[62]; unsigned char *bp; - struct dev_mc_list *dmc; + struct netdev_hw_addr *ha; if(retry==0) lp->mc_list_valid = 0; @@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) block[0]=netdev_mc_count(dev); bp=block+2; - netdev_for_each_mc_addr(dmc, dev) { - memcpy(bp, dmc->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) { + memcpy(bp, ha->addr, 6); bp+=6; } if(mc32_command_nowait(dev, 2, block, diff --git a/drivers/net/7990.c b/drivers/net/7990.c index 4e9a5a20b6a..818837d8ffe 100644 --- a/drivers/net/7990.c +++ b/drivers/net/7990.c @@ -595,7 +595,7 @@ static void lance_load_multicast (struct net_device *dev) struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -610,8 +610,8 @@ static void lance_load_multicast (struct net_device *dev) ib->filter [1] = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 3d4406b1665..e4e2aef1101 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -909,11 +909,11 @@ static void __cp_set_rx_mode (struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index b4efc913978..f61784c3c4a 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c @@ -2502,11 +2502,11 @@ static void __set_rx_mode (struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; diff --git a/drivers/net/82596.c b/drivers/net/82596.c index f94d17d78bb..3a28b1f451d 100644 --- a/drivers/net/82596.c +++ b/drivers/net/82596.c @@ -1542,7 +1542,7 @@ static void set_multicast_list(struct net_device *dev) } if (!netdev_mc_empty(dev)) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char *cp; struct mc_cmd *cmd; @@ -1552,10 +1552,10 @@ static void set_multicast_list(struct net_device *dev) cmd->cmd.command = CmdMulticastList; cmd->mc_cnt = cnt * ETH_ALEN; cp = cmd->mc_addrs; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (!cnt--) break; - memcpy(cp, dmi->dmi_addr, ETH_ALEN); + memcpy(cp, ha->addr, ETH_ALEN); if (i596_debug > 1) DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", dev->name, cp)); diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index bd4d829eca1..7cce643793c 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -603,7 +603,7 @@ static void lance_load_multicast (struct net_device *dev) struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -618,8 +618,8 @@ static void lance_load_multicast (struct net_device *dev) ib->filter [1] = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c index b8a59d255b4..d002c764a26 100644 --- a/drivers/net/amd8111e.c +++ b/drivers/net/amd8111e.c @@ -1377,7 +1377,7 @@ list to the device. */ static void amd8111e_set_multicast_list(struct net_device *dev) { - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; struct amd8111e_priv *lp = netdev_priv(dev); u32 mc_filter[2] ; int bit_num; @@ -1408,8 +1408,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev) /* load all the multicast addresses in the logic filter */ lp->options |= OPTION_MULTICAST_ENABLE; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mc_ptr, dev) { - bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f; mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); } amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index f1f58c5e27b..a4b5b08276f 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev) } else if (dev->flags & IFF_ALLMULTI) { memset(multi_hash, 0xff, sizeof(multi_hash)); } else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; memset(multi_hash, 0x00, sizeof(multi_hash)); - netdev_for_each_mc_addr(dmi, dev) - am79c961_mc_hash(dmi->dmi_addr, multi_hash); + netdev_for_each_mc_addr(ha, dev) + am79c961_mc_hash(ha->addr, multi_hash); } spin_lock_irqsave(&priv->chip_lock, flags); diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 8b23d5a175b..f31e8b6cbf7 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -556,14 +556,14 @@ static int hash_get_index(__u8 *addr) */ static void at91ether_sethashtable(struct net_device *dev) { - struct dev_mc_list *curr; + struct netdev_hw_addr *ha; unsigned long mc_filter[2]; unsigned int bitnr; mc_filter[0] = mc_filter[1] = 0; - netdev_for_each_mc_addr(curr, dev) { - bitnr = hash_get_index(curr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + bitnr = hash_get_index(ha->addr); mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 6e2ae1d06df..f9d168775d0 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -735,7 +735,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) static void eth_set_mcast_list(struct net_device *dev) { struct port *port = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u8 diffs[ETH_ALEN], *addr; int i; @@ -748,11 +748,11 @@ static void eth_set_mcast_list(struct net_device *dev) memset(diffs, 0, ETH_ALEN); addr = NULL; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (!addr) - addr = mclist->dmi_addr; /* first MAC address */ + addr = ha->addr; /* first MAC address */ for (i = 0; i < ETH_ALEN; i++) - diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; + diffs[i] |= addr[i] ^ ha->addr[i]; } for (i = 0; i < ETH_ALEN; i++) { diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index e7810b74f39..6ec245c6394 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c @@ -331,16 +331,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp, { u32 low, high; int i; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; i = 0; - netdev_for_each_mc_addr(dmi, ndev) { + netdev_for_each_mc_addr(ha, ndev) { /* Ran out of space in chip? */ BUG_ON(i == KS8695_NR_ADDRESSES); - low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) | - (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]); - high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]); + low = (ha->addr[2] << 24) | (ha->addr[3] << 16) | + (ha->addr[4] << 8) | (ha->addr[5]); + high = (ha->addr[0] << 8) | (ha->addr[1]); ks8695_writereg(ksp, KS8695_AAL_(i), low); ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c index 309843ab886..e2a549a60e2 100644 --- a/drivers/net/at1700.c +++ b/drivers/net/at1700.c @@ -848,12 +848,12 @@ set_rx_mode(struct net_device *dev) memset(mc_filter, 0x00, sizeof(mc_filter)); outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit = - ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; + ether_crc_le(ETH_ALEN, ha->addr) >> 26; mc_filter[bit >> 3] |= (1 << bit); } outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */ diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index a5508e1b261..3d7051135c3 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -354,7 +354,7 @@ static void atl1c_set_multi(struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); struct atl1c_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 mac_ctrl_data; u32 hash_value; @@ -377,8 +377,8 @@ static void atl1c_set_multi(struct net_device *netdev) AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1c_hash_mc_addr(hw, ha->addr); atl1c_hash_set(hw, hash_value); } } diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index 7231b577912..b6605d433e9 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); struct atl1e_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 mac_ctrl_data = 0; u32 hash_value; @@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev) AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1e_hash_mc_addr(hw, ha->addr); atl1e_hash_set(hw, hash_value); } } diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c index 199f2c9ce74..078d9d1b427 100644 --- a/drivers/net/atlx/atl2.c +++ b/drivers/net/atlx/atl2.c @@ -135,7 +135,7 @@ static void atl2_set_multi(struct net_device *netdev) { struct atl2_adapter *adapter = netdev_priv(netdev); struct atl2_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; @@ -157,8 +157,8 @@ static void atl2_set_multi(struct net_device *netdev) ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); /* comoute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl2_hash_mc_addr(hw, ha->addr); atl2_hash_set(hw, hash_value); } } diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c index 72f3306352e..f979ea2d6d3 100644 --- a/drivers/net/atlx/atlx.c +++ b/drivers/net/atlx/atlx.c @@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); struct atlx_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 rctl; u32 hash_value; @@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev) iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); /* compute mc addresses' hash value ,and put it into hash table */ - netdev_for_each_mc_addr(mc_ptr, netdev) { - hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atlx_hash_mc_addr(hw, ha->addr); atlx_hash_set(hw, hash_value); } } diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 6ad16205dc1..0d730c8329d 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c @@ -883,11 +883,11 @@ static void set_rx_mode_8012(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); new_mode = CMR2h_Normal; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); } new_mode = CMR2h_Normal; diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 4da191b87b0..29631593cc2 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -957,12 +957,12 @@ static void au1000_multicast_list(struct net_device *dev) aup->mac->control &= ~MAC_PROMISCUOUS; printk(KERN_INFO "%s: Pass all multicast\n", dev->name); } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 mc_filter[2]; /* Multicast hash filter */ mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) - set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26, + netdev_for_each_mc_addr(ha, dev) + set_bit(ether_crc(ETH_ALEN, ha->addr)>>26, (long *)mc_filter); aup->mac->multi_hash_high = mc_filter[1]; aup->mac->multi_hash_low = mc_filter[0]; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 332c6035628..b2c5fd7b63a 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -1680,15 +1680,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev) static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i, num_ents; num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); i = 0; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i == num_ents) break; - __b44_cam_write(bp, mclist->dmi_addr, i++ + 1); + __b44_cam_write(bp, ha->addr, i++ + 1); } return i+1; } diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c index 37eb8021de1..51733404478 100644 --- a/drivers/net/bcm63xx_enet.c +++ b/drivers/net/bcm63xx_enet.c @@ -603,7 +603,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p) static void bcm_enet_set_multicast_list(struct net_device *dev) { struct bcm_enet_priv *priv; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 val; int i; @@ -631,14 +631,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev) } i = 0; - netdev_for_each_mc_addr(mc_list, dev) { + netdev_for_each_mc_addr(ha, dev) { u8 *dmi_addr; u32 tmp; if (i == 3) break; /* update perfect match registers */ - dmi_addr = mc_list->dmi_addr; + dmi_addr = ha->addr; tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | (dmi_addr[4] << 8) | dmi_addr[5]; enet_writel(priv, tmp, ENET_PML_REG(i + 1)); diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 9f53d9e86e2..61a9afdb83f 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1159,13 +1159,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, req->interface_id = if_id; if (netdev) { int i; - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; req->num_mac = cpu_to_le16(netdev_mc_count(netdev)); i = 0; - netdev_for_each_mc_addr(mc, netdev) - memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); } else { req->promiscuous = 1; } diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 587f93cf03f..c488cea8f45 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -812,14 +812,14 @@ static void bfin_mac_timeout(struct net_device *dev) static void bfin_mac_multicast_hash(struct net_device *dev) { u32 emac_hashhi, emac_hashlo; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; emac_hashhi = emac_hashlo = 0; - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* skip non-multicast addresses */ if (!(*addrs & 1)) diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c index 119468e7632..1245e983576 100644 --- a/drivers/net/bmac.c +++ b/drivers/net/bmac.c @@ -971,7 +971,7 @@ bmac_remove_multi(struct net_device *dev, */ static void bmac_set_multicast(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct bmac_data *bp = netdev_priv(dev); int num_addrs = netdev_mc_count(dev); unsigned short rx_cfg; @@ -1000,8 +1000,8 @@ static void bmac_set_multicast(struct net_device *dev) rx_cfg = bmac_rx_on(dev, 0, 0); XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); } else { - netdev_for_each_mc_addr(dmi, dev) - bmac_addhash(bp, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + bmac_addhash(bp, ha->addr); bmac_update_hash_table_mask(dev, bp); rx_cfg = bmac_rx_on(dev, 1, 0); XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); @@ -1015,7 +1015,7 @@ static void bmac_set_multicast(struct net_device *dev) static void bmac_set_multicast(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; int i; unsigned short rx_cfg; @@ -1039,8 +1039,8 @@ static void bmac_set_multicast(struct net_device *dev) for(i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if(!(*addrs & 1)) continue; diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 381887ba677..0b69ffb7951 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -3544,7 +3544,6 @@ bnx2_set_rx_mode(struct net_device *dev) } else { /* Accept one or more multicast(s). */ - struct dev_mc_list *mclist; u32 mc_filter[NUM_MC_HASH_REGISTERS]; u32 regidx; u32 bit; @@ -3552,8 +3551,8 @@ bnx2_set_rx_mode(struct net_device *dev) memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); - netdev_for_each_mc_addr(mclist, dev) { - crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); bit = crc & 0xff; regidx = (bit & 0xe0) >> 5; bit &= 0x1f; diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c index f4ea99d06c7..fa9275c2ef5 100644 --- a/drivers/net/bnx2x_main.c +++ b/drivers/net/bnx2x_main.c @@ -11496,21 +11496,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev) else { /* some multicasts */ if (CHIP_IS_E1(bp)) { int i, old, offset; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct mac_configuration_cmd *config = bnx2x_sp(bp, mcast_config); i = 0; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { config->config_table[i]. cam_entry.msb_mac_addr = - swab16(*(u16 *)&mclist->dmi_addr[0]); + swab16(*(u16 *)&ha->addr[0]); config->config_table[i]. cam_entry.middle_mac_addr = - swab16(*(u16 *)&mclist->dmi_addr[2]); + swab16(*(u16 *)&ha->addr[2]); config->config_table[i]. cam_entry.lsb_mac_addr = - swab16(*(u16 *)&mclist->dmi_addr[4]); + swab16(*(u16 *)&ha->addr[4]); config->config_table[i].cam_entry.flags = cpu_to_le16(port); config->config_table[i]. @@ -11564,18 +11564,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev) 0); } else { /* E1H */ /* Accept one or more multicasts */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 mc_filter[MC_HASH_SIZE]; u32 crc, bit, regidx; int i; memset(mc_filter, 0, 4 * MC_HASH_SIZE); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", - mclist->dmi_addr); + ha->addr); - crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); + crc = crc32c_le(0, ha->addr, ETH_ALEN); bit = (crc >> 24) & 0xff; regidx = bit >> 5; bit &= 0x1f; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d6ae63b2cf0..22682f1c847 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -761,32 +761,6 @@ static int bond_check_dev_link(struct bonding *bond, /*----------------------------- Multicast list ------------------------------*/ -/* - * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise - */ -static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1, - const struct dev_mc_list *dmi2) -{ - return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 && - dmi1->dmi_addrlen == dmi2->dmi_addrlen; -} - -/* - * returns dmi entry if found, NULL otherwise - */ -static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi, - struct dev_mc_list *mc_list) -{ - struct dev_mc_list *idmi; - - for (idmi = mc_list; idmi; idmi = idmi->next) { - if (bond_is_dmi_same(dmi, idmi)) - return idmi; - } - - return NULL; -} - /* * Push the promiscuity flag down to appropriate slaves */ @@ -839,18 +813,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc) * Add a Multicast address to slaves * according to mode */ -static void bond_mc_add(struct bonding *bond, void *addr, int alen) +static void bond_mc_add(struct bonding *bond, void *addr) { if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) - dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0); + dev_mc_add(bond->curr_active_slave->dev, addr); } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) - dev_mc_add(slave->dev, addr, alen, 0); + dev_mc_add(slave->dev, addr); } } @@ -858,18 +832,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen) * Remove a multicast address from slave * according to mode */ -static void bond_mc_delete(struct bonding *bond, void *addr, int alen) +static void bond_mc_del(struct bonding *bond, void *addr) { if (USES_PRIMARY(bond->params.mode)) { /* write lock already acquired */ if (bond->curr_active_slave) - dev_mc_delete(bond->curr_active_slave->dev, addr, - alen, 0); + dev_mc_del(bond->curr_active_slave->dev, addr); } else { struct slave *slave; int i; bond_for_each_slave(bond, slave, i) { - dev_mc_delete(slave->dev, addr, alen, 0); + dev_mc_del(slave->dev, addr); } } } @@ -895,50 +868,6 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) rcu_read_unlock(); } -/* - * Totally destroys the mc_list in bond - */ -static void bond_mc_list_destroy(struct bonding *bond) -{ - struct dev_mc_list *dmi; - - dmi = bond->mc_list; - while (dmi) { - bond->mc_list = dmi->next; - kfree(dmi); - dmi = bond->mc_list; - } - - bond->mc_list = NULL; -} - -/* - * Copy all the Multicast addresses from src to the bonding device dst - */ -static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond, - gfp_t gfp_flag) -{ - struct dev_mc_list *dmi, *new_dmi; - - for (dmi = mc_list; dmi; dmi = dmi->next) { - new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag); - - if (!new_dmi) { - /* FIXME: Potential memory leak !!! */ - return -ENOMEM; - } - - new_dmi->next = bond->mc_list; - bond->mc_list = new_dmi; - new_dmi->dmi_addrlen = dmi->dmi_addrlen; - memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen); - new_dmi->dmi_users = dmi->dmi_users; - new_dmi->dmi_gusers = dmi->dmi_gusers; - } - - return 0; -} - /* * flush all members of flush->mc_list from device dev->mc_list */ @@ -946,16 +875,16 @@ static void bond_mc_list_flush(struct net_device *bond_dev, struct net_device *slave_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; - for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) - dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond_dev) + dev_mc_del(slave_dev, ha->addr); if (bond->params.mode == BOND_MODE_8023AD) { /* del lacpdu mc addr from mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0); + dev_mc_del(slave_dev, lacpdu_multicast); } } @@ -969,7 +898,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev, static void bond_mc_swap(struct bonding *bond, struct slave *new_active, struct slave *old_active) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; if (!USES_PRIMARY(bond->params.mode)) /* nothing to do - mc list is already up-to-date on @@ -984,9 +913,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(old_active->dev, -1); - for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) - dev_mc_delete(old_active->dev, dmi->dmi_addr, - dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond->dev) + dev_mc_del(old_active->dev, ha->addr); } if (new_active) { @@ -997,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_ALLMULTI) dev_set_allmulti(new_active->dev, 1); - for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) - dev_mc_add(new_active->dev, dmi->dmi_addr, - dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond->dev) + dev_mc_add(new_active->dev, ha->addr); bond_resend_igmp_join_requests(bond); } } @@ -1406,7 +1333,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) struct bonding *bond = netdev_priv(bond_dev); const struct net_device_ops *slave_ops = slave_dev->netdev_ops; struct slave *new_slave = NULL; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct sockaddr addr; int link_reporting; int old_features = bond_dev->features; @@ -1492,7 +1419,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* Flush unicast and multicast addresses */ dev_uc_flush(bond_dev); - dev_addr_discard(bond_dev); + dev_mc_flush(bond_dev); if (slave_dev->type != ARPHRD_ETHER) bond_setup_by_slave(bond_dev, slave_dev); @@ -1601,9 +1528,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) netif_addr_lock_bh(bond_dev); /* upload master's mc_list to new slave */ - for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) - dev_mc_add(slave_dev, dmi->dmi_addr, - dmi->dmi_addrlen, 0); + netdev_for_each_mc_addr(ha, bond_dev) + dev_mc_add(slave_dev, ha->addr); netif_addr_unlock_bh(bond_dev); } @@ -1611,7 +1537,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* add lacpdu mc addr to mc list */ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0); + dev_mc_add(slave_dev, lacpdu_multicast); } bond_add_vlans_on_slave(bond, slave_dev); @@ -3913,10 +3839,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd return res; } +static bool bond_addr_in_mc_list(unsigned char *addr, + struct netdev_hw_addr_list *list, + int addrlen) +{ + struct netdev_hw_addr *ha; + + netdev_hw_addr_list_for_each(ha, list) + if (!memcmp(ha->addr, addr, addrlen)) + return true; + + return false; +} + static void bond_set_multicast_list(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; + bool found; /* * Do promisc before checking multicast_mode @@ -3951,20 +3891,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev) bond->flags = bond_dev->flags; /* looking for addresses to add to slaves' mc list */ - for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { - if (!bond_mc_list_find_dmi(dmi, bond->mc_list)) - bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen); + netdev_for_each_mc_addr(ha, bond_dev) { + found = bond_addr_in_mc_list(ha->addr, &bond->mc_list, + bond_dev->addr_len); + if (!found) + bond_mc_add(bond, ha->addr); } /* looking for addresses to delete from slaves' list */ - for (dmi = bond->mc_list; dmi; dmi = dmi->next) { - if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list)) - bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen); + netdev_hw_addr_list_for_each(ha, &bond->mc_list) { + found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc, + bond_dev->addr_len); + if (!found) + bond_mc_del(bond, ha->addr); } /* save master's multicast list */ - bond_mc_list_destroy(bond); - bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); + __hw_addr_flush(&bond->mc_list); + __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc, + bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST); read_unlock(&bond->lock); } @@ -4534,9 +4479,7 @@ static void bond_uninit(struct net_device *bond_dev) if (bond->wq) destroy_workqueue(bond->wq); - netif_addr_lock_bh(bond_dev); - bond_mc_list_destroy(bond); - netif_addr_unlock_bh(bond_dev); + __hw_addr_flush(&bond->mc_list); } /*------------------------- Module initialization ---------------------------*/ @@ -4908,6 +4851,8 @@ static int bond_init(struct net_device *bond_dev) list_add_tail(&bond->bond_list, &bn->dev_list); bond_prepare_sysfs_group(bond); + + __hw_addr_init(&bond->mc_list); return 0; } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 257a7a4dfce..2aa33672059 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -202,7 +202,7 @@ struct bonding { char proc_file_name[IFNAMSIZ]; #endif /* CONFIG_PROC_FS */ struct list_head bond_list; - struct dev_mc_list *mc_list; + struct netdev_hw_addr_list mc_list; int (*xmit_hash_policy)(struct sk_buff *, int); __be32 master_ip; u16 flags; diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 9bd155e4111..bd857a20a75 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -2957,20 +2957,20 @@ static void cas_process_mc_list(struct cas *cp) { u16 hash_table[16]; u32 crc; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i = 1; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, cp->dev) { + netdev_for_each_mc_addr(ha, cp->dev) { if (i <= CAS_MC_EXACT_MATCH_SIZE) { /* use the alternate mac address registers for the * first 15 multicast addresses */ - writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], + writel((ha->addr[4] << 8) | ha->addr[5], cp->regs + REG_MAC_ADDRN(i*3 + 0)); - writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], + writel((ha->addr[2] << 8) | ha->addr[3], cp->regs + REG_MAC_ADDRN(i*3 + 1)); - writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], + writel((ha->addr[0] << 8) | ha->addr[1], cp->regs + REG_MAC_ADDRN(i*3 + 2)); i++; } @@ -2978,7 +2978,7 @@ static void cas_process_mc_list(struct cas *cp) /* use hw hash table for the next series of * multicast addresses */ - crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); + crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c index a6eb30a6e2b..13fd9628db1 100644 --- a/drivers/net/chelsio/pm3393.c +++ b/drivers/net/chelsio/pm3393.c @@ -376,12 +376,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm) rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; } else if (t1_rx_mode_mc_cnt(rm)) { /* Accept one or more multicast(s). */ - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int bit; u16 mc_filter[4] = { 0, }; - netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) { - bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */ + netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) { + /* bit[23:28] */ + bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f; mc_filter[bit >> 4] |= 1 << (bit & 0xf); } pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 60777fd90b3..bdfff784645 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c @@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map) static void cpmac_set_multicast_list(struct net_device *dev) { - struct dev_mc_list *iter; + struct netdev_hw_addr *ha; u8 tmp; u32 mbp, bit, hash[2] = { 0, }; struct cpmac_priv *priv = netdev_priv(dev); @@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev) * cpmac uses some strange mac address hashing * (not crc32) */ - netdev_for_each_mc_addr(iter, dev) { + netdev_for_each_mc_addr(ha, dev) { bit = 0; - tmp = iter->dmi_addr[0]; + tmp = ha->addr[0]; bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = iter->dmi_addr[1]; + tmp = ha->addr[1]; bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = iter->dmi_addr[2]; + tmp = ha->addr[2]; bit ^= (tmp >> 6) ^ tmp; - tmp = iter->dmi_addr[3]; + tmp = ha->addr[3]; bit ^= (tmp >> 2) ^ (tmp << 4); - tmp = iter->dmi_addr[4]; + tmp = ha->addr[4]; bit ^= (tmp >> 4) ^ (tmp << 2); - tmp = iter->dmi_addr[5]; + tmp = ha->addr[5]; bit ^= (tmp >> 6) ^ tmp; bit &= 0x3f; hash[bit / 32] |= 1 << (bit % 32); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index dd24aadb778..59110bc119a 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1596,16 +1596,16 @@ set_multicast_list(struct net_device *dev) } else { /* MC mode, receive normal and MC packets */ char hash_ix; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *baddr; lo_bits = 0x00000000ul; hi_bits = 0x00000000ul; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Calculate the hash index for the GA registers */ hash_ix = 0; - baddr = dmi->dmi_addr; + baddr = ha->addr; hash_ix ^= (*baddr) & 0x3f; hash_ix ^= ((*baddr) >> 6) & 0x03; ++baddr; diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c index c142a2132e9..3af19a55037 100644 --- a/drivers/net/cxgb3/xgmac.c +++ b/drivers/net/cxgb3/xgmac.c @@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev) if (dev->flags & IFF_ALLMULTI) hash_lo = hash_hi = 0xffffffff; else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int exact_addr_idx = mac->nucast; hash_lo = hash_hi = 0; - netdev_for_each_mc_addr(dmi, dev) + netdev_for_each_mc_addr(ha, dev) if (exact_addr_idx < EXACT_ADDR_FILTERS) set_addr_filter(mac, exact_addr_idx++, - dmi->dmi_addr); + ha->addr); else { - int hash = hash_hw_addr(dmi->dmi_addr); + int hash = hash_hw_addr(ha->addr); if (hash < 32) hash_lo |= (1 << hash); diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2b8edd2efbf..1f9df5c6a75 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -952,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev) emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); } if (!netdev_mc_empty(ndev)) { - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; + mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); /* program multicast address list into EMAC hardware */ - netdev_for_each_mc_addr(mc_ptr, ndev) { + netdev_for_each_mc_addr(ha, ndev) { emac_add_mcast(priv, EMAC_MULTICAST_ADD, - (u8 *) mc_ptr->dmi_addr); + (u8 *) ha->addr); } } else { mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST); diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 8cf3cc6f20e..fb3f0984c28 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -940,7 +940,7 @@ static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile u16 *ib = (volatile u16 *)dev->mem_start; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -959,8 +959,8 @@ static void lance_load_multicast(struct net_device *dev) *lib_ptr(ib, filter[3], lp->type) = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c index ed53a8d45f4..e5667c55844 100644 --- a/drivers/net/defxx.c +++ b/drivers/net/defxx.c @@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); int i; /* used as index in for loop */ - struct dev_mc_list *dmi; /* ptr to multicast addr entry */ + struct netdev_hw_addr *ha; /* Enable LLC frame promiscuous mode, if necessary */ @@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev) /* Copy addresses to multicast address table, then update adapter CAM */ i = 0; - netdev_for_each_mc_addr(dmi, dev) + netdev_for_each_mc_addr(ha, dev) memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], - dmi->dmi_addr, FDDI_K_ALEN); + ha->addr, FDDI_K_ALEN); if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) { diff --git a/drivers/net/depca.c b/drivers/net/depca.c index 744c1928dfc..a88300a0d1e 100644 --- a/drivers/net/depca.c +++ b/drivers/net/depca.c @@ -1272,7 +1272,7 @@ static void set_multicast_list(struct net_device *dev) static void SetMulticastFilter(struct net_device *dev) { struct depca_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; int i, j, bit, byte; u16 hashcode; @@ -1287,8 +1287,8 @@ static void SetMulticastFilter(struct net_device *dev) lp->init_block.mcast_table[i] = 0; } /* Add multicast addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 1) { /* multicast address? */ crc = ether_crc(ETH_ALEN, addrs); hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index b05bad82982..6579225dbd9 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c @@ -1132,14 +1132,14 @@ set_multicast (struct net_device *dev) /* Receive broadcast and multicast frames */ rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; /* Receive broadcast frames and multicast frames filtering by Hashtable */ rx_mode = ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { int bit, index = 0; - int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); + int crc = ether_crc_le(ETH_ALEN, ha->addr); /* The inverted high significant 6 bits of CRC are used as an index to hashtable */ for (bit = 0; bit < 6; bit++) diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 1c67f1138ca..989f2beb123 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -724,7 +724,7 @@ static void dm9000_hash_table(struct net_device *dev) { board_info_t *db = netdev_priv(dev); - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; int i, oft; u32 hash_val; u16 hash_table[4]; @@ -752,8 +752,8 @@ dm9000_hash_table(struct net_device *dev) rcr |= RCR_ALL; /* the multicast address in Hash Table : 64 bits */ - netdev_for_each_mc_addr(mcptr, dev) { - hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + hash_val = ether_crc_le(6, ha->addr) & 0x3f; hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); } diff --git a/drivers/net/e100.c b/drivers/net/e100.c index c0cd5765668..3e8d0005540 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1545,16 +1545,16 @@ static int e100_hw_init(struct nic *nic) static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) { struct net_device *netdev = nic->netdev; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); cb->command = cpu_to_le16(cb_multi); cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); i = 0; - netdev_for_each_mc_addr(list, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == count) break; - memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr, + memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, ETH_ALEN); } } diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 8be6faee43e..41330349b07 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2101,7 +2101,6 @@ static void e1000_set_rx_mode(struct net_device *netdev) struct e1000_hw *hw = &adapter->hw; struct netdev_hw_addr *ha; bool use_uc = false; - struct dev_addr_list *mc_ptr; u32 rctl; u32 hash_value; int i, rar_entries = E1000_RAR_ENTRIES; @@ -2161,17 +2160,17 @@ static void e1000_set_rx_mode(struct net_device *netdev) WARN_ON(i == rar_entries); - netdev_for_each_mc_addr(mc_ptr, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == rar_entries) { /* load any remaining addresses into the hash table */ u32 hash_reg, hash_bit, mta; - hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); + hash_value = e1000_hash_mc_addr(hw, ha->addr); hash_reg = (hash_value >> 5) & 0x7F; hash_bit = hash_value & 0x1F; mta = (1 << hash_bit); mcarray[hash_reg] |= mta; } else { - e1000_rar_set(hw, mc_ptr->da_addr, i++); + e1000_rar_set(hw, ha->addr, i++); } } diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 5304959ae1f..02f7d20f3c8 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -2567,7 +2567,7 @@ static void e1000_set_multi(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 *mta_list; u32 rctl; int i; @@ -2599,9 +2599,8 @@ static void e1000_set_multi(struct net_device *netdev) /* prepare a packed array of only addresses. */ i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), - mc_ptr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); e1000_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 15d6266b80c..b01e6997403 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1287,7 +1287,7 @@ set_multicast_list(struct net_device *dev) struct eepro_local *lp = netdev_priv(dev); short ioaddr = dev->base_addr; unsigned short mode; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int mc_count = netdev_mc_count(dev); if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63) @@ -1332,8 +1332,8 @@ set_multicast_list(struct net_device *dev) outw(0, ioaddr + IO_PORT); outw(6 * (mc_count + 1), ioaddr + IO_PORT); - netdev_for_each_mc_addr(dmi, dev) { - eaddrs = (unsigned short *) dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (unsigned short *) ha->addr; outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); outw(*eaddrs++, ioaddr + IO_PORT); diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c index 7013dc8a6cb..b3882fd8db6 100644 --- a/drivers/net/eexpress.c +++ b/drivers/net/eexpress.c @@ -1576,7 +1576,7 @@ static void eexp_hw_init586(struct net_device *dev) static void eexp_setup_filter(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned short ioaddr = dev->base_addr; int count = netdev_mc_count(dev); int i; @@ -1589,8 +1589,8 @@ static void eexp_setup_filter(struct net_device *dev) outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); i = 0; - netdev_for_each_mc_addr(dmi, dev) { - unsigned short *data = (unsigned short *) dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + unsigned short *data = (unsigned short *) ha->addr; if (i == count) break; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index b004eaba3d7..b97411aaa77 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -1966,7 +1966,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) static void ehea_set_multicast_list(struct net_device *dev) { struct ehea_port *port = netdev_priv(dev); - struct dev_mc_list *k_mcl_entry; + struct netdev_hw_addr *ha; int ret; if (dev->flags & IFF_PROMISC) { @@ -1997,8 +1997,8 @@ static void ehea_set_multicast_list(struct net_device *dev) goto out; } - netdev_for_each_mc_addr(k_mcl_entry, dev) - ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + ehea_add_multicast_entry(port, ha->addr); } out: diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 6d70c349c95..1232887c243 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -822,7 +822,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr) static void enic_set_multicast_list(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); - struct dev_mc_list *list; + struct netdev_hw_addr *ha; int directed = 1; int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; @@ -852,10 +852,10 @@ static void enic_set_multicast_list(struct net_device *netdev) */ i = 0; - netdev_for_each_mc_addr(list, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == mc_count) break; - memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN); + memcpy(mc_addr[i++], ha->addr, ETH_ALEN); } for (i = 0; i < enic->mc_count; i++) { diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c index 39c271b6be4..f6584a1ad3b 100644 --- a/drivers/net/epic100.c +++ b/drivers/net/epic100.c @@ -1400,12 +1400,12 @@ static void set_rx_mode(struct net_device *dev) outl(0x0004, ioaddr + RxCtrl); return; } else { /* Never executed, for now. */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit_nr = - ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; + ether_crc_le(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit_nr >> 3] |= (1 << bit_nr); } } diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 209742304e2..f6be5aeaf94 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -755,7 +755,7 @@ static void ethoc_set_multicast_list(struct net_device *dev) { struct ethoc *priv = netdev_priv(dev); u32 mode = ethoc_read(priv, MODER); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; u32 hash[2] = { 0, 0 }; /* set loopback mode if requested */ @@ -783,8 +783,8 @@ static void ethoc_set_multicast_list(struct net_device *dev) hash[0] = 0xffffffff; hash[1] = 0xffffffff; } else { - netdev_for_each_mc_addr(mc, dev) { - u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr); int bit = (crc >> 26) & 0x3f; hash[bit >> 5] |= 1 << (bit & 0x1f); } diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c index a2bade58688..11ba70f4997 100644 --- a/drivers/net/ewrk3.c +++ b/drivers/net/ewrk3.c @@ -1169,7 +1169,7 @@ static void set_multicast_list(struct net_device *dev) static void SetMulticastFilter(struct net_device *dev) { struct ewrk3_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u_long iobase = dev->base_addr; int i; char *addrs, bit, byte; @@ -1213,8 +1213,8 @@ static void SetMulticastFilter(struct net_device *dev) } /* Update table */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 1) { /* multicast address? */ crc = ether_crc_le(ETH_ALEN, addrs); hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c index 9d5ad08a119..e8a2705237b 100644 --- a/drivers/net/fealnx.c +++ b/drivers/net/fealnx.c @@ -1792,12 +1792,12 @@ static void __set_rx_mode(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = CR_W_AB | CR_W_AM; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit; - bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; + bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; mc_filter[bit >> 5] |= (1 << bit); } rx_mode = CR_W_AB | CR_W_AM; diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 848eb1968ab..2b1651aee13 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -954,7 +954,7 @@ fec_enet_close(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned int i, bit, data, crc, tmp; unsigned char hash; @@ -984,16 +984,16 @@ static void set_multicast_list(struct net_device *dev) writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Only support group multicast for now */ - if (!(dmi->dmi_addr[0] & 1)) + if (!(ha->addr[0] & 1)) continue; /* calculate crc32 value of mac address */ crc = 0xffffffff; - for (i = 0; i < dmi->dmi_addrlen; i++) { - data = dmi->dmi_addr[i]; + for (i = 0; i < dev->addr_len; i++) { + data = ha->addr[i]; for (bit = 0; bit < 8; bit++, data >>= 1) { crc = (crc >> 1) ^ (((crc ^ data) & 1) ? CRC32_POLY : 0); diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 704155e2bdd..667ba1391b9 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -574,12 +574,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev) out_be32(&fec->gaddr2, 0xffffffff); } else { u32 crc; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 gaddr1 = 0x00000000; u32 gaddr2 = 0x00000000; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc_le(6, dmi->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr) >> 26; if (crc >= 32) gaddr1 |= 1 << (crc-32); else diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index ca05e566202..6a2b64f0a7d 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -3103,12 +3103,14 @@ static void nv_set_multicast(struct net_device *dev) if (dev->flags & IFF_ALLMULTI) { alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; } else { - struct dev_mc_list *walk; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(walk, dev) { + netdev_for_each_mc_addr(ha, dev) { + unsigned char *addr = ha->addr; u32 a, b; - a = le32_to_cpu(*(__le32 *) walk->dmi_addr); - b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); + + a = le32_to_cpu(*(__le32 *) addr); + b = le16_to_cpu(*(__le16 *) (&addr[4])); alwaysOn[0] &= a; alwaysOff[0] &= ~a; alwaysOn[1] &= b; diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index cf4f674f9e2..b3bad7c15d0 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { - struct dev_mc_list *pmc; + struct netdev_hw_addr *ha; if ((dev->flags & IFF_PROMISC) == 0) { set_multicast_start(dev); - netdev_for_each_mc_addr(pmc, dev) - set_multicast_one(dev, pmc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); set_multicast_finish(dev); } else set_promiscuous_mode(dev); diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index cd2c6cca5f2..75974c6d201 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c @@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { - struct dev_mc_list *pmc; + struct netdev_hw_addr *ha; if ((dev->flags & IFF_PROMISC) == 0) { set_multicast_start(dev); - netdev_for_each_mc_addr(pmc, dev) - set_multicast_one(dev, pmc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); set_multicast_finish(dev); } else set_promiscuous_mode(dev); diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index c490a466cae..0ab6a346a19 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c @@ -224,12 +224,12 @@ static void set_multicast_finish(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { - struct dev_mc_list *pmc; + struct netdev_hw_addr *ha; if ((dev->flags & IFF_PROMISC) == 0) { set_multicast_start(dev); - netdev_for_each_mc_addr(pmc, dev) - set_multicast_one(dev, pmc->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + set_multicast_one(dev, ha->addr); set_multicast_finish(dev); } else set_promiscuous_mode(dev); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b6715553cf1..fdd26c2b1a2 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -2797,7 +2797,7 @@ static void adjust_link(struct net_device *dev) * whenever dev->flags is changed */ static void gfar_set_multi(struct net_device *dev) { - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; struct gfar_private *priv = netdev_priv(dev); struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 tempval; @@ -2870,13 +2870,12 @@ static void gfar_set_multi(struct net_device *dev) return; /* Parse the list, and set the appropriate bits */ - netdev_for_each_mc_addr(mc_ptr, dev) { + netdev_for_each_mc_addr(ha, dev) { if (idx < em_num) { - gfar_set_mac_for_addr(dev, idx, - mc_ptr->dmi_addr); + gfar_set_mac_for_addr(dev, idx, ha->addr); idx++; } else - gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); + gfar_set_hash_for_addr(dev, ha->addr); } } diff --git a/drivers/net/greth.c b/drivers/net/greth.c index c5e0d28a6de..fbe6ab6b919 100644 --- a/drivers/net/greth.c +++ b/drivers/net/greth.c @@ -988,7 +988,7 @@ static u32 greth_hash_get_index(__u8 *addr) static void greth_set_hash_filter(struct net_device *dev) { - struct dev_mc_list *curr; + struct netdev_hw_addr *ha; struct greth_private *greth = netdev_priv(dev); struct greth_regs *regs = (struct greth_regs *) greth->regs; u32 mc_filter[2]; @@ -996,8 +996,8 @@ static void greth_set_hash_filter(struct net_device *dev) mc_filter[0] = mc_filter[1] = 0; - netdev_for_each_mc_addr(curr, dev) { - bitnr = greth_hash_get_index(curr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + bitnr = greth_hash_get_index(ha->addr); mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 373546dd083..2bfcca6d180 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c @@ -1858,12 +1858,12 @@ static void set_rx_mode(struct net_device *dev) /* Too many to match, or accept all multicasts. */ writew(0x000B, ioaddr + AddrMode); } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i = 0; - netdev_for_each_mc_addr(mclist, dev) { - writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8); - writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]), + netdev_for_each_mc_addr(ha, dev) { + writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8); + writel(0x20000 | (*(u16 *)&ha->addr[4]), ioaddr + 0x104 + i*8); i++; } diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index b766a69bf0c..86b2b433234 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -2100,15 +2100,15 @@ static void hp100_set_multicast_list(struct net_device *dev) } else { int i, idx; u_char *addrs; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; memset(&lp->hash_bytes, 0x00, 8); #ifdef HP100_DEBUG printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, netdev_mc_count(dev)); #endif - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 0x01) { /* multicast address? */ #ifdef HP100_DEBUG printk("hp100: %s: multicast = %pM, ", diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index d8533a4ef82..40c78507ef1 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -388,18 +388,19 @@ static void emac_hash_mc(struct emac_instance *dev) const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); u32 gaht_temp[regs]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev)); memset(gaht_temp, 0, sizeof (gaht_temp)); - netdev_for_each_mc_addr(dmi, dev->ndev) { + netdev_for_each_mc_addr(ha, dev->ndev) { int slot, reg, mask; - DBG2(dev, "mc %pM" NL, dmi->dmi_addr); + DBG2(dev, "mc %pM" NL, ha->addr); - slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr)); + slot = EMAC_XAHT_CRC_TO_SLOT(dev, + ether_crc(ETH_ALEN, ha->addr)); reg = EMAC_XAHT_SLOT_TO_REG(dev, slot); mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot); @@ -1176,7 +1177,7 @@ static int emac_open(struct net_device *ndev) netif_carrier_on(dev->ndev); /* Required for Pause packet support in EMAC */ - dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); + dev_mc_add_global(ndev, default_mcast_addr); emac_configure(dev); mal_poll_add(dev->mal, &dev->commac); diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c index b5d0f4e973f..76949e08ee8 100644 --- a/drivers/net/ibmlana.c +++ b/drivers/net/ibmlana.c @@ -385,7 +385,7 @@ static void InitBoard(struct net_device *dev) int camcnt; camentry_t cams[16]; u32 cammask; - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; u16 rcrval; /* reset the SONIC */ @@ -420,8 +420,8 @@ static void InitBoard(struct net_device *dev) /* start putting the multicast addresses into the CAM list. Stop if it is full. */ - netdev_for_each_mc_addr(mcptr, dev) { - putcam(cams, &camcnt, mcptr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + putcam(cams, &camcnt, ha->addr); if (camcnt == 16) break; } @@ -479,7 +479,7 @@ static void InitBoard(struct net_device *dev) /* if still multicast addresses left or ALLMULTI is set, set the multicast enable bit */ - if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL)) + if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt) rcrval |= RCREG_AMC; /* promiscous mode ? */ diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 0bc777bac9b..f468590ed45 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c @@ -1072,7 +1072,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); } } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; /* clear the filter table & disable filtering */ lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastEnableRecv | @@ -1083,10 +1083,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); } /* add the addresses to the filter table */ - netdev_for_each_mc_addr(mclist, netdev) { + netdev_for_each_mc_addr(ha, netdev) { // add the multicast address to the filter table unsigned long mcast_addr = 0; - memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); + memcpy(((char *)&mcast_addr)+2, ha->addr, 6); lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastAddFilter, mcast_addr); diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index ea875709f05..78cc742e233 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -2864,7 +2864,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 *mta_list; int i; @@ -2881,8 +2881,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev) /* The shared function expects a packed array of only addresses. */ i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); igb_update_mc_addr_list(hw, mta_list, i); kfree(mta_list); diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index a77afd8a14b..ea8abf5c1ef 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -1399,7 +1399,7 @@ static void igbvf_set_multi(struct net_device *netdev) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 *mta_list = NULL; int i; @@ -1414,8 +1414,8 @@ static void igbvf_set_multi(struct net_device *netdev) /* prepare a packed array of only addresses. */ i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); kfree(mta_list); diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 70871b9b045..57d873da978 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1664,7 +1664,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static void ioc3_set_multicast_list(struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct ioc3_private *ip = netdev_priv(dev); struct ioc3 *ioc3 = ip->regs; u64 ehar = 0; @@ -1688,8 +1688,8 @@ static void ioc3_set_multicast_list(struct net_device *dev) ip->ehar_h = 0xffffffff; ip->ehar_l = 0xffffffff; } else { - netdev_for_each_mc_addr(dmi, dev) { - char *addr = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + char *addr = ha->addr; if (!(*addr & 1)) continue; diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 0d7ad3f2d0f..67cfc7d9d89 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -569,7 +569,7 @@ static int ipg_config_autoneg(struct net_device *dev) static void ipg_nic_set_multicast_list(struct net_device *dev) { void __iomem *ioaddr = ipg_ioaddr(dev); - struct dev_mc_list *mc_list_ptr; + struct netdev_hw_addr *ha; unsigned int hashindex; u32 hashtable[2]; u8 receivemode; @@ -608,9 +608,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev) hashtable[1] = 0x00000000; /* Cycle through all multicast addresses to filter. */ - netdev_for_each_mc_addr(mc_list_ptr, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Calculate CRC result for each multicast address. */ - hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, + hashindex = crc32_le(0xffffffff, ha->addr, ETH_ALEN); /* Use only the least significant 6 bits. */ diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index e6e972d9b7c..cd65b8629bc 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c @@ -961,15 +961,15 @@ static void veth_set_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > VETH_MAX_MCAST)) { port->promiscuous = 1; } else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; port->promiscuous = 0; /* Update table */ port->num_mcast = 0; - netdev_for_each_mc_addr(dmi, dev) { - u8 *addr = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + u8 *addr = ha->addr; u64 xaddr = 0; if (addr[0] & 0x01) {/* multicast address? */ diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index c9fef65cb98..912dd1d5772 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1058,7 +1058,7 @@ ixgb_set_multi(struct net_device *netdev) { struct ixgb_adapter *adapter = netdev_priv(netdev); struct ixgb_hw *hw = &adapter->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u32 rctl; int i; @@ -1089,9 +1089,9 @@ ixgb_set_multi(struct net_device *netdev) IXGB_WRITE_REG(hw, RCTL, rctl); i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) + netdev_for_each_mc_addr(ha, netdev) memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS], - mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); + ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS); ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); } diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 4d1c3a42945..6eb5814ca7d 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -1494,7 +1494,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev) { - struct dev_addr_list *dmi; + struct netdev_hw_addr *ha; u32 i; /* @@ -1510,9 +1510,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); /* Add the new addresses */ - netdev_for_each_mc_addr(dmi, netdev) { + netdev_for_each_mc_addr(ha, netdev) { hw_dbg(hw, " Adding the multicast addresses:\n"); - ixgbe_set_mta(hw, dmi->dmi_addr); + ixgbe_set_mta(hw, ha->addr); } /* Enable mta */ diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c index f457c52b5ed..852e9c4fd93 100644 --- a/drivers/net/ixgbevf/vf.c +++ b/drivers/net/ixgbevf/vf.c @@ -259,7 +259,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, struct net_device *netdev) { - struct dev_addr_list *dmi; + struct netdev_hw_addr *ha; struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; u16 *vector_list = (u16 *)&msgbuf[1]; @@ -281,10 +281,10 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; i = 0; - netdev_for_each_mc_addr(dmi, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == cnt) break; - vector_list[i++] = ixgbevf_mta_vector(hw, dmi->dmi_addr); + vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); diff --git a/drivers/net/jme.c b/drivers/net/jme.c index c0b59a55538..10e816d2caf 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -2009,12 +2009,12 @@ jme_set_multi(struct net_device *netdev) } else if (netdev->flags & IFF_ALLMULTI) { jme->reg_rxmcs |= RXMCS_ALLMULFRAME; } else if (netdev->flags & IFF_MULTICAST) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int bit_nr; jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; - netdev_for_each_mc_addr(mclist, netdev) { - bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; + netdev_for_each_mc_addr(ha, netdev) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F; mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); } diff --git a/drivers/net/korina.c b/drivers/net/korina.c index edaedc7aa03..26bf1b76b99 100644 --- a/drivers/net/korina.c +++ b/drivers/net/korina.c @@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); unsigned long flags; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ int i; @@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev) for (i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - char *addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + char *addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index 66be4e449f0..4dcd61f81ec 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c @@ -956,13 +956,13 @@ static void ks8851_set_rx_mode(struct net_device *dev) rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA | RXCR1_RXMAFMA); } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) { - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; u32 crc; /* accept some multicast */ - netdev_for_each_mc_addr(mcptr, dev) { - crc = ether_crc(ETH_ALEN, mcptr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); crc >>= (32 - 6); /* get top six bits */ rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c index ee3fe30b2ad..70a3d98f4bd 100644 --- a/drivers/net/ks8851_mll.c +++ b/drivers/net/ks8851_mll.c @@ -362,7 +362,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; #define MAX_MCAST_LST 32 #define HW_MCAST_SIZE 8 -#define MAC_ADDR_LEN 6 /** * union ks_tx_hdr - tx header data @@ -450,7 +449,7 @@ struct ks_net { u16 promiscuous; u16 all_mcast; u16 mcast_lst_size; - u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN]; + u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN]; u8 mcast_bits[HW_MCAST_SIZE]; u8 mac_addr[6]; u8 fid; @@ -1170,7 +1169,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast) static void ks_set_rx_mode(struct net_device *netdev) { struct ks_net *ks = netdev_priv(netdev); - struct dev_mc_list *ptr; + struct netdev_hw_addr *ha; /* Turn on/off promiscuous mode. */ if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC) @@ -1187,13 +1186,12 @@ static void ks_set_rx_mode(struct net_device *netdev) if (netdev_mc_count(netdev) <= MAX_MCAST_LST) { int i = 0; - netdev_for_each_mc_addr(ptr, netdev) { - if (!(*ptr->dmi_addr & 1)) + netdev_for_each_mc_addr(ha, netdev) { + if (!(*ha->addr & 1)) continue; if (i >= MAX_MCAST_LST) break; - memcpy(ks->mcast_lst[i++], ptr->dmi_addr, - MAC_ADDR_LEN); + memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN); } ks->mcast_lst_size = (u8)i; ks_set_grpaddr(ks); diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c index b843bf7d8c1..34876952161 100644 --- a/drivers/net/ksz884x.c +++ b/drivers/net/ksz884x.c @@ -5763,7 +5763,7 @@ static void netdev_set_rx_mode(struct net_device *dev) struct dev_priv *priv = netdev_priv(dev); struct dev_info *hw_priv = priv->adapter; struct ksz_hw *hw = &hw_priv->hw; - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; int multicast = (dev->flags & IFF_ALLMULTI); dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); @@ -5780,7 +5780,7 @@ static void netdev_set_rx_mode(struct net_device *dev) int i = 0; /* List too big to support so turn on all multicast mode. */ - if (dev->mc_count > MAX_MULTICAST_LIST) { + if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) { if (MAX_MULTICAST_LIST != hw->multi_list_size) { hw->multi_list_size = MAX_MULTICAST_LIST; ++hw->all_multi; @@ -5789,13 +5789,12 @@ static void netdev_set_rx_mode(struct net_device *dev) return; } - netdev_for_each_mc_addr(mc_ptr, dev) { - if (!(*mc_ptr->dmi_addr & 1)) + netdev_for_each_mc_addr(ha, dev) { + if (!(*ha->addr & 1)) continue; if (i >= MAX_MULTICAST_LIST) break; - memcpy(hw->multi_list[i++], mc_ptr->dmi_addr, - MAC_ADDR_LEN); + memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN); } hw->multi_list_size = (u8) i; hw_set_grp_addr(hw); diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c index 443c39a3732..fddaf921885 100644 --- a/drivers/net/lib82596.c +++ b/drivers/net/lib82596.c @@ -1388,7 +1388,7 @@ static void set_multicast_list(struct net_device *dev) } if (!netdev_mc_empty(dev)) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char *cp; struct mc_cmd *cmd; @@ -1396,10 +1396,10 @@ static void set_multicast_list(struct net_device *dev) cmd->cmd.command = SWAP16(CmdMulticastList); cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); cp = cmd->mc_addrs; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (!cnt--) break; - memcpy(cp, dmi->dmi_addr, 6); + memcpy(cp, ha->addr, 6); if (i596_debug > 1) DEB(DEB_MULTI, printk(KERN_DEBUG diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c index 56f66f48540..526dc9cbc3c 100644 --- a/drivers/net/lib8390.c +++ b/drivers/net/lib8390.c @@ -905,10 +905,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev) static inline void make_mc_bits(u8 *bits, struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(dmi, dev) { - u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc(ETH_ALEN, ha->addr); /* * The 8390 uses the 6 most significant bits of the * CRC to index the multicast table. diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index a18e3485476..30474d6b15c 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -250,20 +250,20 @@ static void temac_set_multicast_list(struct net_device *ndev) temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); } else if (!netdev_mc_empty(ndev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; i = 0; - netdev_for_each_mc_addr(mclist, ndev) { + netdev_for_each_mc_addr(ha, ndev) { if (i >= MULTICAST_CAM_TABLE_NUM) break; - multi_addr_msw = ((mclist->dmi_addr[3] << 24) | - (mclist->dmi_addr[2] << 16) | - (mclist->dmi_addr[1] << 8) | - (mclist->dmi_addr[0])); + multi_addr_msw = ((ha->addr[3] << 24) | + (ha->addr[2] << 16) | + (ha->addr[1] << 8) | + (ha->addr[0])); temac_indirect_out32(lp, XTE_MAW0_OFFSET, multi_addr_msw); - multi_addr_lsw = ((mclist->dmi_addr[5] << 8) | - (mclist->dmi_addr[4]) | (i << 16)); + multi_addr_lsw = ((ha->addr[5] << 8) | + (ha->addr[4]) | (i << 16)); temac_indirect_out32(lp, XTE_MAW1_OFFSET, multi_addr_lsw); i++; diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 3e3cc04defd..72379c5439d 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c @@ -1256,7 +1256,7 @@ static void set_multicast_list(struct net_device *dev) { dev->name, netdev_mc_count(dev)); if (!netdev_mc_empty(dev)) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *cp; cmd = kmalloc(sizeof(struct i596_cmd) + 2 + netdev_mc_count(dev) * 6, GFP_ATOMIC); @@ -1267,8 +1267,8 @@ static void set_multicast_list(struct net_device *dev) { cmd->command = CmdMulticastList; *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6; cp = ((char *)(cmd + 1))+2; - netdev_for_each_mc_addr(dmi, dev) { - memcpy(cp, dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) { + memcpy(cp, ha->addr, 6); cp += 6; } if (i596_debug & LOG_SRCDST) diff --git a/drivers/net/macb.c b/drivers/net/macb.c index c8a18a6203c..eab121945d7 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -882,15 +882,15 @@ static int hash_get_index(__u8 *addr) */ static void macb_sethashtable(struct net_device *dev) { - struct dev_mc_list *curr; + struct netdev_hw_addr *ha; unsigned long mc_filter[2]; unsigned int bitnr; struct macb *bp = netdev_priv(dev); mc_filter[0] = mc_filter[1] = 0; - netdev_for_each_mc_addr(curr, dev) { - bitnr = hash_get_index(curr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + bitnr = hash_get_index(ha->addr); mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } diff --git a/drivers/net/mace.c b/drivers/net/mace.c index ab5f0bf6d1a..2328a7399dc 100644 --- a/drivers/net/mace.c +++ b/drivers/net/mace.c @@ -598,7 +598,7 @@ static void mace_set_multicast(struct net_device *dev) mp->maccc |= PROM; } else { unsigned char multicast_filter[8]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 8; i++) @@ -606,8 +606,8 @@ static void mace_set_multicast(struct net_device *dev) } else { for (i = 0; i < 8; i++) multicast_filter[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc_le(6, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); i = crc >> 26; /* bit number in multicast_filter */ multicast_filter[i >> 3] |= 1 << (i & 7); } diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 13ba8f4afb7..8a50c67e592 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c @@ -508,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev) mb->maccc |= PROM; } else { unsigned char multicast_filter[8]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; if (dev->flags & IFF_ALLMULTI) { for (i = 0; i < 8; i++) { @@ -517,8 +517,8 @@ static void mace_set_multicast(struct net_device *dev) } else { for (i = 0; i < 8; i++) multicast_filter[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc_le(6, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(6, ha->addr); /* bit number in multicast_filter */ i = crc >> 26; multicast_filter[i >> 3] |= 1 << (i & 7); diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 5eb96fe6ec5..455464223b4 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c @@ -168,7 +168,7 @@ static void mlx4_en_clear_list(struct net_device *dev) static void mlx4_en_cache_mclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; char *mc_addrs; int mc_addrs_cnt = netdev_mc_count(dev); int i; @@ -179,8 +179,8 @@ static void mlx4_en_cache_mclist(struct net_device *dev) return; } i = 0; - netdev_for_each_mc_addr(mclist, dev) - memcpy(mc_addrs + i++ * ETH_ALEN, mclist->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) + memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); priv->mc_addrs = mc_addrs; priv->mc_addrs_cnt = mc_addrs_cnt; } diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index c97b6e4365a..d5ebe43b0e6 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -1769,7 +1769,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev) struct mv643xx_eth_private *mp = netdev_priv(dev); u32 *mc_spec; u32 *mc_other; - struct dev_addr_list *addr; + struct netdev_hw_addr *ha; int i; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { @@ -1794,8 +1794,8 @@ oom: memset(mc_spec, 0, 0x100); memset(mc_other, 0, 0x100); - netdev_for_each_mc_addr(addr, dev) { - u8 *a = addr->da_addr; + netdev_for_each_mc_addr(ha, dev) { + u8 *a = ha->addr; u32 *table; int entry; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e84dd3ee9c5..6d33adf988d 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -3001,7 +3001,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) { struct myri10ge_priv *mgp = netdev_priv(dev); struct myri10ge_cmd cmd; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; __be32 data[2] = { 0, 0 }; int err; @@ -3038,8 +3038,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev) } /* Walk the multicast list, and add each address */ - netdev_for_each_mc_addr(mc_list, dev) { - memcpy(data, &mc_list->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) { + memcpy(data, &ha->addr, 6); cmd.data0 = ntohl(data[0]); cmd.data1 = ntohl(data[1]); err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, @@ -3047,7 +3047,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) if (err != 0) { netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n", - err, mc_list->dmi_addr); + err, ha->addr); goto abort; } } diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index e5203878324..9250bf6573e 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -2493,12 +2493,12 @@ static void __set_rx_mode(struct net_device *dev) rx_mode = RxFilterEnable | AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; + netdev_for_each_mc_addr(ha, dev) { + int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff; mc_filter[b/8] |= (1 << (b & 0x07)); } rx_mode = RxFilterEnable | AcceptBroadcast diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index a945591298a..781ca893ee0 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -537,7 +537,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter, void netxen_p2_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 null_addr[6]; int i; @@ -571,8 +571,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev) netxen_nic_enable_mcast_filter(adapter); i = 0; - netdev_for_each_mc_addr(mc_ptr, netdev) - netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) + netxen_nic_set_mcast_addr(adapter, i++, ha->addr); /* Clear out remaining addresses */ while (i < adapter->max_mc_count) @@ -680,7 +680,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, void netxen_p3_nic_set_multi(struct net_device *netdev) { struct netxen_adapter *adapter = netdev_priv(netdev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; LIST_HEAD(del_list); @@ -707,8 +707,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) } if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(mc_ptr, netdev) - nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list); + netdev_for_each_mc_addr(ha, netdev) + nx_p3_nic_add_mac(adapter, ha->addr, &del_list); } send_fw_cmd: diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index 05c29c2cef2..a76fabe2629 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -596,7 +596,7 @@ static int init586(struct net_device *dev) struct iasetup_cmd_struct __iomem *ias_cmd; struct tdr_cmd_struct __iomem *tdr_cmd; struct mcsetup_cmd_struct __iomem *mc_cmd; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int num_addrs = netdev_mc_count(dev); ptr = p->scb + 1; @@ -725,8 +725,8 @@ static int init586(struct net_device *dev) writew(num_addrs * 6, &mc_cmd->mc_cnt); i = 0; - netdev_for_each_mc_addr(dmi, dev) - memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6); + netdev_for_each_mc_addr(ha, dev) + memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6); writew(make16(mc_cmd), &p->scb->cbl_offset); writeb(CUC_START, &p->scb->cmd_cuc); diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 0678f3106cb..7b52c466cf4 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -6313,7 +6313,6 @@ static void niu_set_rx_mode(struct net_device *dev) { struct niu *np = netdev_priv(dev); int i, alt_cnt, err; - struct dev_addr_list *addr; struct netdev_hw_addr *ha; unsigned long flags; u16 hash[16] = { 0, }; @@ -6365,8 +6364,8 @@ static void niu_set_rx_mode(struct net_device *dev) for (i = 0; i < 16; i++) hash[i] = 0xffff; } else if (!netdev_mc_empty(dev)) { - netdev_for_each_mc_addr(addr, dev) { - u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c index be368e5cbf7..ee894ed35f7 100644 --- a/drivers/net/octeon/octeon_mgmt.c +++ b/drivers/net/octeon/octeon_mgmt.c @@ -474,7 +474,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ struct octeon_mgmt_cam_state cam_state; - struct dev_addr_list *list; + struct netdev_hw_addr *ha; struct list_head *pos; int available_cam_entries; @@ -510,8 +510,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) } } if (multicast_mode == 0) { - netdev_for_each_mc_addr(list, netdev) - octeon_mgmt_cam_state_add(&cam_state, list->da_addr); + netdev_for_each_mc_addr(ha, netdev) + octeon_mgmt_cam_state_add(&cam_state, ha->addr); } diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c index 36785853a14..dc3b4c7914f 100644 --- a/drivers/net/pci-skeleton.c +++ b/drivers/net/pci-skeleton.c @@ -1813,12 +1813,12 @@ static void netdrv_set_rx_mode(struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 09291e60d30..333cb3344d5 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -1623,11 +1623,11 @@ static struct net_device_stats *get_stats(struct net_device *dev) static inline void make_mc_bits(u8 *bits, struct net_device *dev) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 crc; - netdev_for_each_mc_addr(dmi, dev) { - crc = ether_crc(ETH_ALEN, dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc(ETH_ALEN, ha->addr); /* * The 8390 uses the 6 most significant bits of the * CRC to index the multicast table. diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index b9dc80b9d04..6734f7d6da9 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -1196,11 +1196,11 @@ static void set_rx_mode(struct net_device *dev) memset(mc_filter, 0x00, sizeof(mc_filter)); outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26; mc_filter[bit >> 3] |= (1 << (bit & 7)); } outb(2, ioaddr + RX_MODE); /* Use normal mode. */ diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index c717b143f11..c516c199635 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -1475,7 +1475,7 @@ static void set_multicast_list(struct net_device *dev) { mace_private *lp = netdev_priv(dev); int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; #ifdef PCMCIA_DEBUG { @@ -1495,8 +1495,8 @@ static void set_multicast_list(struct net_device *dev) if (num_addrs > 0) { /* Calculate multicast logical address filter */ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); - netdev_for_each_mc_addr(dmi, dev) { - memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy(adr, ha->addr, ETHER_ADDR_LEN); BuildLAF(lp->multicast_ladrf, adr); } } diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 5adc662c4bf..f45c626003a 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -1616,12 +1616,12 @@ static void set_rx_mode(struct net_device *dev) rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; else { if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mc_addr; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mc_addr, dev) { - u_int position = ether_crc(6, mc_addr->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + u_int position = ether_crc(6, ha->addr); #ifndef final_version /* Verify multicast address. */ - if ((mc_addr->dmi_addr[0] & 1) == 0) + if ((ha->addr[0] & 1) == 0) continue; #endif multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 4d1802e457b..656be931207 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -1398,7 +1398,7 @@ static void set_addresses(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; local_info_t *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct set_address_info sa_info; int i; @@ -1413,10 +1413,10 @@ static void set_addresses(struct net_device *dev) set_address(&sa_info, dev->dev_addr); i = 0; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i++ == 9) break; - set_address(&sa_info, dmi->dmi_addr); + set_address(&sa_info, ha->addr); } while (i++ < 9) set_address(&sa_info, dev->dev_addr); diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 084d78dd163..a2254f749a9 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c @@ -2590,7 +2590,7 @@ static void pcnet32_load_multicast(struct net_device *dev) struct pcnet32_private *lp = netdev_priv(dev); volatile struct pcnet32_init_block *ib = lp->init_block; volatile __le16 *mcast_table = (__le16 *)ib->filter; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned long ioaddr = dev->base_addr; char *addrs; int i; @@ -2611,8 +2611,8 @@ static void pcnet32_load_multicast(struct net_device *dev) ib->filter[1] = 0; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c index a602da6418c..fed5df9c5ea 100644 --- a/drivers/net/ps3_gelic_net.c +++ b/drivers/net/ps3_gelic_net.c @@ -546,7 +546,7 @@ out: void gelic_net_set_multi(struct net_device *netdev) { struct gelic_card *card = netdev_card(netdev); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; unsigned int i; uint8_t *p; u64 addr; @@ -580,9 +580,9 @@ void gelic_net_set_multi(struct net_device *netdev) } /* set multicast addresses */ - netdev_for_each_mc_addr(mc, netdev) { + netdev_for_each_mc_addr(ha, netdev) { addr = 0; - p = mc->dmi_addr; + p = ha->addr; for (i = 0; i < ETH_ALEN; i++) { addr <<= 8; addr |= *p++; diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c index 14c999ab422..9a1daa4ebe2 100644 --- a/drivers/net/qlcnic/qlcnic_hw.c +++ b/drivers/net/qlcnic/qlcnic_hw.c @@ -415,7 +415,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr) void qlcnic_set_multi(struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; u32 mode = VPORT_MISS_MODE_DROP; @@ -434,8 +434,8 @@ void qlcnic_set_multi(struct net_device *netdev) } if (!netdev_mc_empty(netdev)) { - netdev_for_each_mc_addr(mc_ptr, netdev) { - qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + qlcnic_nic_add_mac(adapter, ha->addr); } } diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index fd34f266c0a..76df96813a7 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device static void qlge_set_multicast_list(struct net_device *ndev) { struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); - struct dev_mc_list *mc_ptr; + struct netdev_hw_addr *ha; int i, status; status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); @@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev) if (status) goto exit; i = 0; - netdev_for_each_mc_addr(mc_ptr, ndev) { - if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, + netdev_for_each_mc_addr(ha, ndev) { + if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr, MAC_ADDR_TYPE_MULTI_MAC, i)) { netif_err(qdev, hw, qdev->ndev, "Failed to loadmulticast address.\n"); diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 15d5373dc8f..f5a0e963e68 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -938,7 +938,7 @@ static void r6040_multicast_list(struct net_device *dev) u16 *adrp; u16 reg; unsigned long flags; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; /* MAC Address */ @@ -973,8 +973,8 @@ static void r6040_multicast_list(struct net_device *dev) for (i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - char *addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + char *addrs = ha->addr; if (!(*addrs & 1)) continue; @@ -994,9 +994,9 @@ static void r6040_multicast_list(struct net_device *dev) } /* Multicast Address 1~4 case */ i = 0; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i < MCAST_MAX) { - adrp = (u16 *) dmi->dmi_addr; + adrp = (u16 *) ha->addr; iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index f7ffa5d8ffe..64cd250f642 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -4785,12 +4785,12 @@ static void rtl_set_rx_mode(struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 2eb7f8a0d92..ad5a6a873b2 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c @@ -4964,7 +4964,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) static void s2io_set_multicast(struct net_device *dev) { int i, j, prev_cnt; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct s2io_nic *sp = netdev_priv(dev); struct XENA_dev_config __iomem *bar0 = sp->bar0; u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = @@ -5093,12 +5093,12 @@ static void s2io_set_multicast(struct net_device *dev) /* Create the new Rx filter list and update the same in H/W. */ i = 0; - netdev_for_each_mc_addr(mclist, dev) { - memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr, + netdev_for_each_mc_addr(ha, dev) { + memcpy(sp->usr_addrs[i].addr, ha->addr, ETH_ALEN); mac_addr = 0; for (j = 0; j < ETH_ALEN; j++) { - mac_addr |= mclist->dmi_addr[j]; + mac_addr |= ha->addr[j]; mac_addr <<= 8; } mac_addr >>= 8; diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 9944e5d662c..f7de960ca06 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c @@ -2112,7 +2112,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc) uint64_t reg; void __iomem *port; int idx; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct net_device *dev = sc->sbm_dev; /* @@ -2161,10 +2161,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc) * XXX if the table overflows */ idx = 1; /* skip station address */ - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (idx == MAC_ADDR_COUNT) break; - reg = sbmac_addr2reg(mclist->dmi_addr); + reg = sbmac_addr2reg(ha->addr); port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); __raw_writeq(reg, port); idx++; diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index d87c4787fff..1b326058893 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c @@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev) (dev->flags & IFF_ALLMULTI)) mar0 = mar1 = 0xffffffff; else if (dev->flags & IFF_MULTICAST) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mc_list, dev) { + netdev_for_each_mc_addr(ha, dev) { u32 crc; unsigned bit = 0; - crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr); + crc = ~ether_crc(ETH_ALEN, ha->addr); crc >>= 24; if (crc & 0x01) bit |= 0x02; diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 88f2fb193ab..e07b82b266d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -1602,7 +1602,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) static void efx_set_multicast_list(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; union efx_multicast_hash *mc_hash = &efx->multicast_hash; u32 crc; int bit; @@ -1614,8 +1614,8 @@ static void efx_set_multicast_list(struct net_device *net_dev) memset(mc_hash, 0xff, sizeof(*mc_hash)); } else { memset(mc_hash, 0x00, sizeof(*mc_hash)); - netdev_for_each_mc_addr(mc_list, net_dev) { - crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, net_dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); set_bit_le(bit, mc_hash->byte); } diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 760d9e83a46..6a05b93ae6c 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c @@ -848,13 +848,13 @@ static void sis190_set_rx_mode(struct net_device *dev) rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { int bit_nr = - ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; + ether_crc(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index bc7ff411aad..6293592635b 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c @@ -2298,12 +2298,14 @@ static void set_rx_mode(struct net_device *net_dev) /* Accept Broadcast packet, destination address matchs our * MAC address, use Receive Filter to reject unwanted MCAST * packets */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; rx_mode = RFAAB; - netdev_for_each_mc_addr(mclist, net_dev) { - unsigned int bit_nr = - sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev); + netdev_for_each_mc_addr(ha, net_dev) { + unsigned int bit_nr; + + bit_nr = sis900_mcast_bitnr(ha->addr, + sis_priv->chipset_rev); mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf)); } } diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c index 1921a54ea99..7e5b9f310fb 100644 --- a/drivers/net/skfp/skfddi.c +++ b/drivers/net/skfp/skfddi.c @@ -852,7 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev) static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev) { struct s_smc *smc = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; /* Enable promiscuous mode, if necessary */ if (dev->flags & IFF_PROMISC) { @@ -876,13 +876,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev) /* use exact filtering */ // point to first multicast addr - netdev_for_each_mc_addr(dmi, dev) { - mac_add_multicast(smc, - (struct fddi_addr *)dmi->dmi_addr, - 1); + netdev_for_each_mc_addr(ha, dev) { + mac_add_multicast(smc, + (struct fddi_addr *)ha->addr, + 1); pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n", - dmi->dmi_addr); + ha->addr); } } else { // more MC addresses than HW supports diff --git a/drivers/net/skge.c b/drivers/net/skge.c index bd8c5e8413b..de5ef3877ba 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2917,7 +2917,7 @@ static void genesis_set_multicast(struct net_device *dev) struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; u32 mode; u8 filter[8]; @@ -2937,8 +2937,8 @@ static void genesis_set_multicast(struct net_device *dev) skge->flow_status == FLOW_STAT_SYMMETRIC) genesis_add_filter(filter, pause_mc_addr); - netdev_for_each_mc_addr(list, dev) - genesis_add_filter(filter, list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + genesis_add_filter(filter, ha->addr); } xm_write32(hw, port, XM_MODE, mode); @@ -2956,7 +2956,7 @@ static void yukon_set_multicast(struct net_device *dev) struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || skge->flow_status == FLOW_STAT_SYMMETRIC); u16 reg; @@ -2979,8 +2979,8 @@ static void yukon_set_multicast(struct net_device *dev) if (rx_pause) yukon_add_filter(filter, pause_mc_addr); - netdev_for_each_mc_addr(list, dev) - yukon_add_filter(filter, list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + yukon_add_filter(filter, ha->addr); } diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index f1c0ec308bf..5b97edb7a35 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -3627,7 +3627,7 @@ static void sky2_set_multicast(struct net_device *dev) struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; - struct dev_mc_list *list; + struct netdev_hw_addr *ha; u16 reg; u8 filter[8]; int rx_pause; @@ -3651,8 +3651,8 @@ static void sky2_set_multicast(struct net_device *dev) if (rx_pause) sky2_add_filter(filter, pause_mc_addr); - netdev_for_each_mc_addr(list, dev) - sky2_add_filter(filter, list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + sky2_add_filter(filter, ha->addr); } gma_write16(hw, port, GM_MC_ADDR_H1, diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 0f97b5af91e..6278734c735 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c @@ -1341,7 +1341,7 @@ static void smc911x_set_multicast_list(struct net_device *dev) * within that register. */ else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; /* Set the Hash perfec mode */ mcr |= MAC_CR_HPFILT_; @@ -1349,19 +1349,16 @@ static void smc911x_set_multicast_list(struct net_device *dev) /* start with a table of all zeros: reject all */ memset(multicast_table, 0, sizeof(multicast_table)); - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { u32 position; - /* do we have a pointer here? */ - if (!cur_addr) - break; /* make sure this is a multicast address - shouldn't this be a given if we have it here ? */ - if (!(*cur_addr->dmi_addr & 1)) - continue; + if (!(*ha->addr & 1)) + continue; /* upper 6 bits are used as hash index */ - position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26; + position = ether_crc(ETH_ALEN, ha->addr)>>26; multicast_table[position>>5] |= 1 << (position&0x1f); } diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c index ee1487791d5..9e5e9e792e8 100644 --- a/drivers/net/smc9194.c +++ b/drivers/net/smc9194.c @@ -417,7 +417,7 @@ static void smc_shutdown( int ioaddr ) /* - . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds ) + . Function: smc_setmulticast( int ioaddr, struct net_device *dev ) . Purpose: . This sets the internal hardware table to filter out unwanted multicast . packets before they take up memory. @@ -438,26 +438,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev) { int i; unsigned char multicast_table[ 8 ]; - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; /* table for flipping the order of 3 bits */ unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; /* start with a table of all zeros: reject all */ memset( multicast_table, 0, sizeof( multicast_table ) ); - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { int position; - /* do we have a pointer here? */ - if ( !cur_addr ) - break; /* make sure this is a multicast address - shouldn't this be a given if we have it here ? */ - if ( !( *cur_addr->dmi_addr & 1 ) ) + if (!(*ha->addr & 1)) continue; /* only use the low order bits */ - position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f; + position = ether_crc_le(6, ha->addr) & 0x3f; /* do some messy swapping to put the bit in the right spot */ multicast_table[invert3[position&7]] |= diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 2e8133809dc..20c75657763 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -1413,7 +1413,7 @@ static void smc_set_multicast_list(struct net_device *dev) * within that register. */ else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; /* table for flipping the order of 3 bits */ static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; @@ -1421,16 +1421,16 @@ static void smc_set_multicast_list(struct net_device *dev) /* start with a table of all zeros: reject all */ memset(multicast_table, 0, sizeof(multicast_table)); - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { int position; /* make sure this is a multicast address - shouldn't this be a given if we have it here ? */ - if (!(*cur_addr->dmi_addr & 1)) + if (!(*ha->addr & 1)) continue; /* only use the low order bits */ - position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; + position = crc32_le(~0, ha->addr, 6) & 0x3f; /* do some messy swapping to put the bit in the right spot */ multicast_table[invert3[position&7]] |= diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 4fd1d8b3878..d6e3a12d65d 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -1383,13 +1383,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev) /* Enabling specific multicast addresses */ unsigned int hash_high = 0; unsigned int hash_low = 0; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; pdata->set_bits_mask = MAC_CR_HPFILT_; pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); - netdev_for_each_mc_addr(mc_list, dev) { - unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + unsigned int bitnum = smsc911x_hash(ha->addr); unsigned int mask = 0x01 << (bitnum & 0x1F); if (bitnum & 0x20) diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index 34fa10d8ad4..5409ec1a5fd 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c @@ -1063,12 +1063,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev) mac_cr |= MAC_CR_MCPAS_; mac_cr &= (~MAC_CR_HPFILT_); } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 hash_lo = 0, hash_hi = 0; smsc_dbg(HW, "Multicast filter enabled"); - netdev_for_each_mc_addr(mc_list, dev) { - u32 bit_num = smsc9420_hash(mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + u32 bit_num = smsc9420_hash(ha->addr); u32 mask = 1 << (bit_num & 0x1F); if (bit_num & 0x20) diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c index 287c251075e..e5d67327d70 100644 --- a/drivers/net/sonic.c +++ b/drivers/net/sonic.c @@ -531,7 +531,7 @@ static void sonic_multicast_list(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); unsigned int rcr; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char *addr; int i; @@ -550,8 +550,8 @@ static void sonic_multicast_list(struct net_device *dev) netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ i = 1; - netdev_for_each_mc_addr(dmi, dev) { - addr = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addr = ha->addr; sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 5ba9d989f8f..c04c8f9a2d9 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c @@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) static void spider_net_set_multi(struct net_device *netdev) { - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; u8 hash; int i; u32 reg; @@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev) hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ set_bit(0xfd, bitmask); - netdev_for_each_mc_addr(mc, netdev) { - hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + hash = spider_net_get_multicast_hash(netdev, ha->addr); set_bit(hash, bitmask); } diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c index 6dfa6989901..8a6d27cdc0b 100644 --- a/drivers/net/starfire.c +++ b/drivers/net/starfire.c @@ -1766,7 +1766,7 @@ static void set_rx_mode(struct net_device *dev) struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u32 rx_mode = MinVLANPrio; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; #ifdef VLAN_SUPPORT @@ -1804,8 +1804,8 @@ static void set_rx_mode(struct net_device *dev) /* Use the 16 element perfect filter, skip first two entries. */ void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; __be16 *eaddrs; - netdev_for_each_mc_addr(mclist, dev) { - eaddrs = (__be16 *)mclist->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (__be16 *) ha->addr; writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; @@ -1825,10 +1825,10 @@ static void set_rx_mode(struct net_device *dev) __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { /* The chip uses the upper 9 CRC bits as index into the hash table */ - int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; + int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23; __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; *fptr |= cpu_to_le32(1 << (bit_nr & 31)); diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c index a183de29c39..c0a1c9df3ac 100644 --- a/drivers/net/stmmac/dwmac100.c +++ b/drivers/net/stmmac/dwmac100.c @@ -316,7 +316,7 @@ static void dwmac100_set_filter(struct net_device *dev) MAC_CONTROL_HO | MAC_CONTROL_HP); } else { u32 mc_filter[2]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; /* Perfect filter mode for physical address and Hash filter for multicast */ @@ -325,11 +325,11 @@ static void dwmac100_set_filter(struct net_device *dev) MAC_CONTROL_IF | MAC_CONTROL_HO); memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { /* The upper 6 bits of the calculated CRC are used to * index the contens of the hash table */ int bit_nr = - ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + ether_crc(ETH_ALEN, ha->addr) >> 26; /* The most significant bit determines the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c index a6538ae4694..bf73fda6adf 100644 --- a/drivers/net/stmmac/dwmac1000_core.c +++ b/drivers/net/stmmac/dwmac1000_core.c @@ -94,17 +94,17 @@ static void dwmac1000_set_filter(struct net_device *dev) writel(0xffffffff, ioaddr + GMAC_HASH_LOW); } else if (!netdev_mc_empty(dev)) { u32 mc_filter[2]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; /* Hash filter for multicast */ value = GMAC_FRAME_FILTER_HMC; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { /* The upper 6 bits of the calculated CRC are used to index the contens of the hash table */ int bit_nr = - bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26; + bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; /* The most significant bit determines the register to * use (H/L) while the other 5 bits determine the bit * within the register. */ diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 2f6a760e5f2..2cfa065c39c 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -413,7 +413,7 @@ static int init586(struct net_device *dev) volatile struct iasetup_cmd_struct *ias_cmd; volatile struct tdr_cmd_struct *tdr_cmd; volatile struct mcsetup_cmd_struct *mc_cmd; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int num_addrs=netdev_mc_count(dev); ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); @@ -537,9 +537,9 @@ static int init586(struct net_device *dev) mc_cmd->mc_cnt = swab16(num_addrs * 6); i = 0; - netdev_for_each_mc_addr(dmi, dev) + netdev_for_each_mc_addr(ha, dev) memcpy((char *) mc_cmd->mc_list[i++], - dmi->dmi_addr, ETH_ALEN); + ha->addr, ETH_ALEN); p->scb->cbl_offset = make16(mc_cmd); p->scb->cmd_cuc = CUC_START; diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index a0bd361d5ec..4dd159b4116 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -999,7 +999,7 @@ static void bigmac_set_multicast(struct net_device *dev) { struct bigmac *bp = netdev_priv(dev); void __iomem *bregs = bp->bregs; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; int i; u32 tmp, crc; @@ -1028,8 +1028,8 @@ static void bigmac_set_multicast(struct net_device *dev) for (i = 0; i < 4; i++) hash_table[i] = 0; - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c index a855934dfc3..47e8cce10d1 100644 --- a/drivers/net/sundance.c +++ b/drivers/net/sundance.c @@ -1523,13 +1523,13 @@ static void set_rx_mode(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int bit; int index; int crc; memset (mc_filter, 0, sizeof (mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + crc = ether_crc_le(ETH_ALEN, ha->addr); for (index=0, bit=0; bit < 6; bit++, crc <<= 1) if (crc & 0x80000000) index |= 1 << bit; mc_filter[index/16] |= (1 << (index % 16)); diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 70196bc5fe6..58a27541ae3 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c @@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp) } else { u16 hash_table[16]; u32 crc; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, gp->dev) { - char *addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, gp->dev) { + char *addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index b17dbb11bd6..20deb14e98f 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp) hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); } else if ((hp->dev->flags & IFF_PROMISC) == 0) { u16 hash_table[4]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, hp->dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, hp->dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; @@ -2362,7 +2362,7 @@ static void happy_meal_set_multicast(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); void __iomem *bregs = hp->bigmacregs; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; @@ -2380,8 +2380,8 @@ static void happy_meal_set_multicast(struct net_device *dev) u16 hash_table[4]; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index d7c73f478ef..674570584bd 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1170,7 +1170,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) static void lance_load_multicast(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; char *addrs; u32 crc; u32 val; @@ -1195,8 +1195,8 @@ static void lance_load_multicast(struct net_device *dev) return; /* Add addresses */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; /* multicast address? */ if (!(*addrs & 1)) diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index be637dce944..239f0977219 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -627,7 +627,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) static void qe_set_multicast(struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u8 new_mconfig = qep->mconfig; char *addrs; int i; @@ -651,8 +651,8 @@ static void qe_set_multicast(struct net_device *dev) u8 *hbytes = (unsigned char *) &hash_table[0]; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if (!(*addrs & 1)) continue; diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index 6b1b7cea7f6..6cf8b06be5c 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c @@ -763,12 +763,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) static void __update_mc_list(struct vnet *vp, struct net_device *dev) { - struct dev_addr_list *p; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(p, dev) { + netdev_for_each_mc_addr(ha, dev) { struct vnet_mcast_entry *m; - m = __vnet_mc_find(vp, p->dmi_addr); + m = __vnet_mc_find(vp, ha->addr); if (m) { m->hit = 1; continue; @@ -778,7 +778,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev) m = kzalloc(sizeof(*m), GFP_ATOMIC); if (!m) continue; - memcpy(m->addr, p->dmi_addr, ETH_ALEN); + memcpy(m->addr, ha->addr, ETH_ALEN); m->hit = 1; m->next = vp->mcast_list; diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index 49bd84c0d58..36149ddace4 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -1954,16 +1954,16 @@ tc35815_set_multicast_list(struct net_device *dev) /* Disable promiscuous mode, use normal mode. */ tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *cur_addr; + struct netdev_hw_addr *ha; int i; int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); tc_writel(0, &tr->CAM_Ctl); /* Walk the address list, and load the filter */ i = 0; - netdev_for_each_mc_addr(cur_addr, dev) { + netdev_for_each_mc_addr(ha, dev) { /* entry 0,1 is reserved. */ - tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr); + tc35815_set_cam_entry(dev, i + 2, ha->addr); ena_bits |= CAM_Ena_Bit(i + 2); i++; } diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index 068a47174fc..a38aede5c8d 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev) WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); } else if (!netdev_mc_empty(ndev)) { u8 hash; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 reg, val; /* set IMF to deny all multicast frames */ @@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev) * into RX_MAC_MCST regs. we skip this phase now and accept ALL * multicast frames throu IMF */ /* accept the rest of addresses throu IMF */ - netdev_for_each_mc_addr(mclist, ndev) { + netdev_for_each_mc_addr(ha, ndev) { hash = 0; for (i = 0; i < ETH_ALEN; i++) - hash ^= mclist->dmi_addr[i]; + hash ^= ha->addr[i]; reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); val = READ_REG(priv, reg); val |= (1 << (hash % 32)); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 22cf1c446de..aff11f84f52 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -9457,14 +9457,14 @@ static void __tg3_set_rx_mode(struct net_device *dev) tg3_set_multi (tp, 0); } else { /* Accept one or more multicast(s). */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u32 mc_filter[4] = { 0, }; u32 regidx; u32 bit; u32 crc; - netdev_for_each_mc_addr(mclist, dev) { - crc = calc_crc (mclist->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); bit = ~crc & 0x7f; regidx = (bit & 0x60) >> 5; bit &= 0x1f; diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index 05ea30a94e8..8ffec22b74b 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c @@ -1314,7 +1314,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev ) static void TLan_SetMulticastList( struct net_device *dev ) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u32 hash1 = 0; u32 hash2 = 0; int i; @@ -1336,12 +1336,12 @@ static void TLan_SetMulticastList( struct net_device *dev ) TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); } else { i = 0; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if ( i < 3 ) { TLan_SetMac( dev, i + 1, - (char *) &dmi->dmi_addr ); + (char *) &ha->addr); } else { - offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); + offset = TLan_HashFunc((u8 *)&ha->addr); if ( offset < 32 ) hash1 |= ( 1 << offset ); else diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 8c54d89e60d..4673e38c52a 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -1390,7 +1390,7 @@ static int xl_close(struct net_device *dev) static void xl_set_rx_mode(struct net_device *dev) { struct xl_private *xl_priv = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char dev_mc_address[4] ; u16 options ; @@ -1407,11 +1407,11 @@ static void xl_set_rx_mode(struct net_device *dev) dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - netdev_for_each_mc_addr(dmi, dev) { - dev_mc_address[0] |= dmi->dmi_addr[2] ; - dev_mc_address[1] |= dmi->dmi_addr[3] ; - dev_mc_address[2] |= dmi->dmi_addr[4] ; - dev_mc_address[3] |= dmi->dmi_addr[5] ; + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; } if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */ diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 1a0967246e2..eebdaae2432 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev) static void tok_set_multicast_list(struct net_device *dev) { struct tok_info *ti = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; unsigned char address[4]; int i; @@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev) /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/ if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return; address[0] = address[1] = address[2] = address[3] = 0; - netdev_for_each_mc_addr(mclist, dev) { - address[0] |= mclist->dmi_addr[2]; - address[1] |= mclist->dmi_addr[3]; - address[2] |= mclist->dmi_addr[4]; - address[3] |= mclist->dmi_addr[5]; + netdev_for_each_mc_addr(ha, dev) { + address[0] |= ha->addr[2]; + address[1] |= ha->addr[3]; + address[2] |= ha->addr[4]; + address[3] |= ha->addr[5]; } SET_PAGE(ti->srb_page); for (i = 0; i < sizeof(struct srb_set_funct_addr); i++) diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c index 01c780f25e9..88c893100c2 100644 --- a/drivers/net/tokenring/lanstreamer.c +++ b/drivers/net/tokenring/lanstreamer.c @@ -1266,7 +1266,7 @@ static void streamer_set_rx_mode(struct net_device *dev) netdev_priv(dev); __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; __u8 options = 0; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char dev_mc_address[5]; writel(streamer_priv->srb, streamer_mmio + LAPA); @@ -1302,11 +1302,11 @@ static void streamer_set_rx_mode(struct net_device *dev) writel(streamer_priv->srb,streamer_mmio+LAPA); dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - netdev_for_each_mc_addr(dmi, dev) { - dev_mc_address[0] |= dmi->dmi_addr[2] ; - dev_mc_address[1] |= dmi->dmi_addr[3] ; - dev_mc_address[2] |= dmi->dmi_addr[4] ; - dev_mc_address[3] |= dmi->dmi_addr[5] ; + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; } writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC); diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c index 3decaf4b6cf..3d2fbe60b46 100644 --- a/drivers/net/tokenring/olympic.c +++ b/drivers/net/tokenring/olympic.c @@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev) u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; u8 options = 0; u8 __iomem *srb; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; unsigned char dev_mc_address[4] ; writel(olympic_priv->srb,olympic_mmio+LAPA); @@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev) dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; - netdev_for_each_mc_addr(dmi, dev) { - dev_mc_address[0] |= dmi->dmi_addr[2] ; - dev_mc_address[1] |= dmi->dmi_addr[3] ; - dev_mc_address[2] |= dmi->dmi_addr[4] ; - dev_mc_address[3] |= dmi->dmi_addr[5] ; + netdev_for_each_mc_addr(ha, dev) { + dev_mc_address[0] |= ha->addr[2]; + dev_mc_address[1] |= ha->addr[3]; + dev_mc_address[2] |= ha->addr[4]; + dev_mc_address[3] |= ha->addr[5]; } writeb(SRB_SET_FUNC_ADDRESS,srb+0); diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c index b545e20d289..9fa2c36ff90 100644 --- a/drivers/net/tokenring/tms380tr.c +++ b/drivers/net/tokenring/tms380tr.c @@ -1212,17 +1212,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev) } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { ((char *)(&tp->ocpl.FunctAddr))[0] |= - mclist->dmi_addr[2]; + ha->addr[2]; ((char *)(&tp->ocpl.FunctAddr))[1] |= - mclist->dmi_addr[3]; + ha->addr[3]; ((char *)(&tp->ocpl.FunctAddr))[2] |= - mclist->dmi_addr[4]; + ha->addr[4]; ((char *)(&tp->ocpl.FunctAddr))[3] |= - mclist->dmi_addr[5]; + ha->addr[5]; } } tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index 80333a4d328..1366541c30a 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev) if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { int i; - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH; memset(data->mc_hash, 0, sizeof(data->mc_hash)); - netdev_for_each_mc_addr(mc, dev) { + netdev_for_each_mc_addr(ha, dev) { u32 hash, crc; - crc = ether_crc(6, mc->dmi_addr); + crc = ether_crc(6, ha->addr); hash = crc >> 23; __set_bit(hash, &data->mc_hash[0]); } diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index cb429723b2c..a68b9176462 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -670,15 +670,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); u16 hash_table[32]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ - netdev_for_each_mc_addr(mclist, dev) { - int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; + netdev_for_each_mc_addr(ha, dev) { + int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; set_bit_le(index, hash_table); } @@ -699,13 +699,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct de_private *de = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ - netdev_for_each_mc_addr(mclist, dev) { - eaddrs = (u16 *)mclist->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (u16 *) ha->addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 55ade8949a6..cac4bcc8318 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -1951,7 +1951,7 @@ static void SetMulticastFilter(struct net_device *dev) { struct de4x5_private *lp = netdev_priv(dev); - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; u_long iobase = dev->base_addr; int i, bit, byte; u16 hashcode; @@ -1966,8 +1966,8 @@ SetMulticastFilter(struct net_device *dev) if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) { omr |= OMR_PM; /* Pass all multicasts */ } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; if ((*addrs & 0x01) == 1) { /* multicast address? */ crc = ether_crc_le(ETH_ALEN, addrs); hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ @@ -1983,8 +1983,8 @@ SetMulticastFilter(struct net_device *dev) } } } else { /* Perfect filtering */ - netdev_for_each_mc_addr(dmi, dev) { - addrs = dmi->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrs = ha->addr; for (i=0; ibase_addr+0xc0; /* ID Table */ u32 hash_val; @@ -1478,8 +1478,8 @@ static void dm9132_id_table(struct DEVICE *dev) hash_table[3] = 0x8000; /* the multicast address in Hash Table : 64 bits */ - netdev_for_each_mc_addr(mcptr, dev) { - hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f; hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); } @@ -1497,7 +1497,7 @@ static void dm9132_id_table(struct DEVICE *dev) static void send_filter_frame(struct DEVICE *dev) { struct dmfe_board_info *db = netdev_priv(dev); - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; struct tx_desc *txptr; u16 * addrptr; u32 * suptr; @@ -1520,8 +1520,8 @@ static void send_filter_frame(struct DEVICE *dev) *suptr++ = 0xffff; /* fit the multicast address */ - netdev_for_each_mc_addr(mcptr, dev) { - addrptr = (u16 *) mcptr->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrptr = (u16 *) ha->addr; *suptr++ = addrptr[0]; *suptr++ = addrptr[1]; *suptr++ = addrptr[2]; diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 7f544ef2f5f..c4b7cd726b6 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -990,15 +990,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); u16 hash_table[32]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; u16 *eaddrs; memset(hash_table, 0, sizeof(hash_table)); set_bit_le(255, hash_table); /* Broadcast entry */ /* This should work on big-endian machines as well. */ - netdev_for_each_mc_addr(mclist, dev) { - int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; + netdev_for_each_mc_addr(ha, dev) { + int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; set_bit_le(index, hash_table); } @@ -1018,13 +1018,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u16 *eaddrs; /* We have <= 14 addresses so we can use the wonderful 16 address perfect filtering of the Tulip. */ - netdev_for_each_mc_addr(mclist, dev) { - eaddrs = (u16 *)mclist->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + eaddrs = (u16 *) ha->addr; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; @@ -1061,7 +1061,7 @@ static void set_rx_mode(struct net_device *dev) } else if (tp->flags & MC_HASH_ONLY) { /* Some work-alikes have only a 64-entry hash filter table. */ /* Should verify correctness on big-endian/__powerpc__ */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; if (netdev_mc_count(dev) > 64) { /* Arbitrary non-effective limit. */ tp->csr6 |= AcceptAllMulticast; @@ -1069,18 +1069,21 @@ static void set_rx_mode(struct net_device *dev) } else { u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ int filterbit; - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { if (tp->flags & COMET_MAC_ADDR) - filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); + filterbit = ether_crc_le(ETH_ALEN, + ha->addr); else - filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + filterbit = ether_crc(ETH_ALEN, + ha->addr) >> 26; filterbit &= 0x3f; mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); if (tulip_debug > 2) dev_info(&dev->dev, "Added filter for %pM %08x bit %d\n", - mclist->dmi_addr, - ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); + ha->addr, + ether_crc(ETH_ALEN, ha->addr), + filterbit); } if (mc_filter[0] == tp->mc_filter[0] && mc_filter[1] == tp->mc_filter[1]) diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 0ab05af237e..b79d908fe34 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c @@ -1392,7 +1392,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr) static void send_filter_frame(struct net_device *dev, int mc_cnt) { struct uli526x_board_info *db = netdev_priv(dev); - struct dev_mc_list *mcptr; + struct netdev_hw_addr *ha; struct tx_desc *txptr; u16 * addrptr; u32 * suptr; @@ -1415,8 +1415,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) *suptr++ = 0xffff << FLT_SHIFT; /* fit the multicast address */ - netdev_for_each_mc_addr(mcptr, dev) { - addrptr = (u16 *) mcptr->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addrptr = (u16 *) ha->addr; *suptr++ = addrptr[0] << FLT_SHIFT; *suptr++ = addrptr[1] << FLT_SHIFT; *suptr++ = addrptr[2] << FLT_SHIFT; diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index 304f43866c4..3e3822c98a0 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c @@ -1367,13 +1367,15 @@ static u32 __set_rx_mode(struct net_device *dev) memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; - filterbit &= 0x3f; - mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); + netdev_for_each_mc_addr(ha, dev) { + int filbit; + + filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; + filbit &= 0x3f; + mc_filter[filbit >> 5] |= 1 << (filbit & 31); } rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; } diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index cd24e5f2b2a..3053f85aa06 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -921,11 +921,11 @@ typhoon_set_rx_mode(struct net_device *dev) /* Too many to match, or accept all multicasts. */ filter |= TYPHOON_RX_FILTER_ALL_MCAST; } else if (!netdev_mc_empty(dev)) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; + netdev_for_each_mc_addr(ha, dev) { + int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit >> 5] |= 1 << (bit & 0x1f); } diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 75743a76bbe..081f76bff34 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) static void ucc_geth_set_multi(struct net_device *dev) { struct ucc_geth_private *ugeth; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; struct ucc_fast __iomem *uf_regs; struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; @@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev) out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { /* Only support group multicast for now. */ - if (!(dmi->dmi_addr[0] & 1)) + if (!(ha->addr[0] & 1)) continue; /* Ask CPM to run CRC and set bit in * filter mask. */ - hw_add_addr_in_hash(ugeth, dmi->dmi_addr); + hw_add_addr_in_hash(ugeth, ha->addr); } } } diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 9e05639435f..763364f0972 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -557,16 +557,14 @@ static void asix_set_multicast(struct net_device *net) * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 crc_bits; memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); /* Build the multicast hash filter. */ - netdev_for_each_mc_addr(mc_list, net) { - crc_bits = - ether_crc(ETH_ALEN, - mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } @@ -793,16 +791,14 @@ static void ax88172_set_multicast(struct net_device *net) * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 crc_bits; memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); /* Build the multicast hash filter. */ - netdev_for_each_mc_addr(mc_list, net) { - crc_bits = - ether_crc(ETH_ALEN, - mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 96f1ebe0d34..0c48ff97f47 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast) static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; u8 broadcast[6]; u8 rx = RxEnable | RxPolarity | RxMultiCast; @@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev) if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { - netdev_for_each_mc_addr(mc, netdev) { - u32 crc = ether_crc_le(6, mc->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + u32 crc = ether_crc_le(6, ha->addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 269339769f4..4eb1fb31ff0 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -386,10 +386,10 @@ static void dm9601_set_multicast(struct net_device *net) netdev_mc_count(net) > DM_MAX_MCAST) { rx_ctl |= 0x04; } else if (!netdev_mc_empty(net)) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(mc_list, net) { - u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26; hashes[crc >> 3] |= 1 << (crc & 0x7); } } diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c index 70978219e98..0dc92c8ba4b 100644 --- a/drivers/net/usb/mcs7830.c +++ b/drivers/net/usb/mcs7830.c @@ -452,12 +452,12 @@ static void mcs7830_data_set_multicast(struct net_device *net) * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; u32 crc_bits; /* Build the multicast hash filter. */ - netdev_for_each_mc_addr(mc_list, net) { - crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, net) { + crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } } diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 300e3e764fa..b8b00d06ea7 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -444,14 +444,14 @@ static void smsc75xx_set_multicast(struct net_device *netdev) netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; } else if (!netdev_mc_empty(dev->net)) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; - netdev_for_each_mc_addr(mc_list, netdev) { - u32 bitnum = smsc75xx_hash(mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + u32 bitnum = smsc75xx_hash(ha->addr); pdata->multicast_hash_table[bitnum / 32] |= (1 << (bitnum % 32)); } diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 73f9a31cf94..ccd55ca3a06 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -384,13 +384,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev) pdata->mac_cr |= MAC_CR_MCPAS_; pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_); } else if (!netdev_mc_empty(dev->net)) { - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; pdata->mac_cr |= MAC_CR_HPFILT_; pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); - netdev_for_each_mc_addr(mc_list, netdev) { - u32 bitnum = smsc95xx_hash(mc_list->dmi_addr); + netdev_for_each_mc_addr(ha, netdev) { + u32 bitnum = smsc95xx_hash(ha->addr); u32 mask = 0x01 << (bitnum & 0x1F); if (bitnum & 0x20) hash_hi |= mask; diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 50f881aa393..d9133c62a2f 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -1704,11 +1704,11 @@ static void rhine_set_rx_mode(struct net_device *dev) iowrite32(0xffffffff, ioaddr + MulticastFilter1); rx_mode = 0x0C; } else { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); } diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 3a486f3bad3..91f3b841288 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev) struct mac_regs __iomem *regs = vptr->mac_regs; u8 rx_mode; int i; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ writel(0xffffffff, ®s->MARCAM[0]); @@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev) mac_get_cam_mask(regs, vptr->mCAMmask); i = 0; - netdev_for_each_mc_addr(mclist, dev) { - mac_set_cam(regs, i + offset, mclist->dmi_addr); + netdev_for_each_mc_addr(ha, dev) { + mac_set_cam(regs, i + offset, ha->addr); vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); i++; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3f5be35dea2..ecec9a8527f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -721,7 +721,6 @@ static void virtnet_set_rx_mode(struct net_device *dev) struct scatterlist sg[2]; u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; - struct dev_addr_list *addr; struct netdev_hw_addr *ha; int uc_count; int mc_count; @@ -778,8 +777,8 @@ static void virtnet_set_rx_mode(struct net_device *dev) mac_data->entries = mc_count; i = 0; - netdev_for_each_mc_addr(addr, dev) - memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) + memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); sg_set_buf(&sg[1], mac_data, sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index cff3485d967..58dfa367bf8 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1675,11 +1675,11 @@ vmxnet3_copy_mc(struct net_device *netdev) /* We may be called with BH disabled */ buf = kmalloc(sz, GFP_ATOMIC); if (buf) { - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; int i = 0; - netdev_for_each_mc_addr(mc, netdev) - memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr, + netdev_for_each_mc_addr(ha, netdev) + memcpy(buf + i++ * ETH_ALEN, ha->addr, ETH_ALEN); } } diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c index e5f2d3ee0df..37836a10d09 100644 --- a/drivers/net/vxge/vxge-main.c +++ b/drivers/net/vxge/vxge-main.c @@ -1117,7 +1117,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) */ static void vxge_set_multicast(struct net_device *dev) { - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct vxgedev *vdev; int i, mcast_cnt = 0; struct __vxge_hw_device *hldev; @@ -1217,8 +1217,8 @@ static void vxge_set_multicast(struct net_device *dev) } /* Add new ones */ - netdev_for_each_mc_addr(mclist, dev) { - memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy(mac_info.macaddr, ha->addr, ETH_ALEN); for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) { mac_info.vpath_no = vpath_idx; diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c index 547912e6843..2277998b726 100644 --- a/drivers/net/wireless/adm8211.c +++ b/drivers/net/wireless/adm8211.c @@ -1317,21 +1317,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev, } static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { - unsigned int bit_nr, i; + unsigned int bit_nr; u32 mc_filter[2]; + struct netdev_hw_addr *ha; mc_filter[1] = mc_filter[0] = 0; - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; - bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_hw_addr_list_for_each(ha, mc_list) { + bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; bit_nr &= 0x3F; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); - mclist = mclist->next; } return mc_filter[0] | ((u64)(mc_filter[1]) << 32); diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c index 257c734733d..b43d4b006d7 100644 --- a/drivers/net/wireless/ath/ar9170/main.c +++ b/drivers/net/wireless/ath/ar9170/main.c @@ -2045,21 +2045,17 @@ out: return err; } -static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, - struct dev_addr_list *mclist) +static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) { u64 mchash; - int i; + struct netdev_hw_addr *ha; /* always get broadcast frames */ mchash = 1ULL << (0xff >> 2); - for (i = 0; i < mc_count; i++) { - if (WARN_ON(!mclist)) - break; - mchash |= 1ULL << (mclist->dmi_addr[5] >> 2); - mclist = mclist->next; - } + netdev_hw_addr_list_for_each(ha, mc_list) + mchash |= 1ULL << (ha->addr[5] >> 2); return mchash; } diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index b142a78ed1e..53a2340f52b 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -230,7 +230,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif); static int ath5k_config(struct ieee80211_hw *hw, u32 changed); static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mc_list); + struct netdev_hw_addr_list *mc_list); static void ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, @@ -2999,22 +2999,20 @@ unlock: } static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { u32 mfilt[2], val; - int i; u8 pos; + struct netdev_hw_addr *ha; mfilt[0] = 0; mfilt[1] = 1; - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; + netdev_hw_addr_list_for_each(ha, mc_list) { /* calculate XOR of eight 6-bit values */ - val = get_unaligned_le32(mclist->dmi_addr + 0); + val = get_unaligned_le32(ha->addr + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; - val = get_unaligned_le32(mclist->dmi_addr + 3); + val = get_unaligned_le32(ha->addr + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); @@ -3022,8 +3020,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, * but not sure, needs testing, if we do use this we'd * neet to inform below to not reset the mcast */ /* ath5k_hw_set_mcast_filterindex(ah, - * mclist->dmi_addr[5]); */ - mclist = mclist->next; + * ha->addr[5]); */ } return ((u64)(mfilt[1]) << 32) | mfilt[0]; diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 3c889f43d90..ff7b0d0cca5 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -318,7 +318,7 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd, struct net_device *dev, int nr_addrs) { int i = nr_addrs; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; int cnt; if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) @@ -326,19 +326,19 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd, netif_addr_lock_bh(dev); cnt = netdev_mc_count(dev); - netdev_for_each_mc_addr(mc_list, dev) { - if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { + netdev_for_each_mc_addr(ha, dev) { + if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) { lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, - mc_list->dmi_addr); + ha->addr); cnt--; continue; } if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) break; - memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN); + memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN); lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, - mc_list->dmi_addr); + ha->addr); i++; cnt--; } diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c index 6ab30033c26..c20eef6b95c 100644 --- a/drivers/net/wireless/libertas_tf/main.c +++ b/drivers/net/wireless/libertas_tf/main.c @@ -367,22 +367,20 @@ static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed) } static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct lbtf_private *priv = hw->priv; int i; + struct netdev_hw_addr *ha; + int mc_count = netdev_hw_addr_list_count(mc_list); if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) return mc_count; priv->nr_of_multicastmacaddr = mc_count; - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; - memcpy(&priv->multicastlist[i], mclist->da_addr, - ETH_ALEN); - mclist = mclist->next; - } + i = 0; + netdev_hw_addr_list_for_each(ha, mc_list) + memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN); return mc_count; } diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c index ac65e13eb0d..6599fd15e67 100644 --- a/drivers/net/wireless/mwl8k.c +++ b/drivers/net/wireless/mwl8k.c @@ -1938,11 +1938,15 @@ struct mwl8k_cmd_mac_multicast_adr { static struct mwl8k_cmd_pkt * __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct mwl8k_priv *priv = hw->priv; struct mwl8k_cmd_mac_multicast_adr *cmd; int size; + int mc_count = 0; + + if (mc_list) + mc_count = netdev_hw_addr_list_count(mc_list); if (allmulti || mc_count > priv->num_mcaddrs) { allmulti = 1; @@ -1963,17 +1967,13 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, if (allmulti) { cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST); } else if (mc_count) { - int i; + struct netdev_hw_addr *ha; + int i = 0; cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); cmd->numaddr = cpu_to_le16(mc_count); - for (i = 0; i < mc_count && mclist; i++) { - if (mclist->da_addrlen != ETH_ALEN) { - kfree(cmd); - return NULL; - } - memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN); - mclist = mclist->next; + netdev_hw_addr_list_for_each(ha, mc_list) { + memcpy(cmd->addr[i], ha->addr, ETH_ALEN); } } @@ -3552,7 +3552,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct mwl8k_cmd_pkt *cmd; @@ -3563,7 +3563,7 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, * we'll end up throwing this packet away and creating a new * one in mwl8k_configure_filter(). */ - cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist); + cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list); return (unsigned long)cmd; } @@ -3686,7 +3686,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw, */ if (*total_flags & FIF_ALLMULTI) { kfree(cmd); - cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL); + cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL); } if (cmd != NULL) { diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c index 883b8f86862..9f657afaa3e 100644 --- a/drivers/net/wireless/orinoco/hw.c +++ b/drivers/net/wireless/orinoco/hw.c @@ -1056,14 +1056,14 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, * group address if either we want to multicast, or if we were * multicasting and want to stop */ if (!promisc && (mc_count || priv->mc_count)) { - struct dev_mc_list *p; + struct netdev_hw_addr *ha; struct hermes_multicast mclist; int i = 0; - netdev_for_each_mc_addr(p, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i == mc_count) break; - memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN); + memcpy(mclist.addr[i++], ha->addr, ETH_ALEN); } err = hermes_write_ltv(hw, USER_BAP, diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h index 9799a1d14a6..97af71e7995 100644 --- a/drivers/net/wireless/orinoco/hw.h +++ b/drivers/net/wireless/orinoco/hw.h @@ -22,7 +22,6 @@ /* Forward declarations */ struct orinoco_private; -struct dev_addr_list; int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name, size_t fw_name_len, u32 *hw_ver); diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 4f5bdb528ef..54680a3a5ac 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -1875,17 +1875,17 @@ static void ray_update_multi_list(struct net_device *dev, int all) writeb(0xff, &pccs->var); local->num_multi = 0xff; } else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i = 0; /* Copy the kernel's list of MC addresses to card */ - netdev_for_each_mc_addr(dmi, dev) { - memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) { + memcpy_toio(p, ha->addr, ETH_ALEN); dev_dbg(&link->dev, "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", - dmi->dmi_addr[0], dmi->dmi_addr[1], - dmi->dmi_addr[2], dmi->dmi_addr[3], - dmi->dmi_addr[4], dmi->dmi_addr[5]); + ha->addr[0], ha->addr[1], + ha->addr[2], ha->addr[3], + ha->addr[4], ha->addr[5]); p += ETH_ALEN; i++; } diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index aceb95ef727..8ab9f094747 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -1545,7 +1545,7 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid) static void set_multicast_list(struct usbnet *usbdev) { struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; __le32 filter, basefilter; int ret; char *mc_addrs = NULL; @@ -1584,9 +1584,9 @@ static void set_multicast_list(struct usbnet *usbdev) return; } - netdev_for_each_mc_addr(mclist, usbdev->net) + netdev_for_each_mc_addr(ha, usbdev->net) memcpy(mc_addrs + i++ * ETH_ALEN, - mclist->dmi_addr, ETH_ALEN); + ha->addr, ETH_ALEN); } netif_addr_unlock_bh(usbdev->net); diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index 2b928ecf47b..fb8a62f2b9b 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -723,10 +723,10 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev, priv->rf->conf_erp(dev, info); } -static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count, - struct dev_addr_list *mc_list) +static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, + struct netdev_hw_addr_list *mc_list) { - return mc_count; + return netdev_hw_addr_list_count(mc_list); } static void rtl8180_configure_filter(struct ieee80211_hw *dev, diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c index 0fb850e0c65..441d817ed48 100644 --- a/drivers/net/wireless/rtl818x/rtl8187_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c @@ -1193,9 +1193,9 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev, } static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev, - int mc_count, struct dev_addr_list *mc_list) + struct netdev_hw_addr_list *mc_list) { - return mc_count; + return netdev_hw_addr_list_count(mc_list); } static void rtl8187_configure_filter(struct ieee80211_hw *dev, diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c index 3daba6c0c77..6036d0206fe 100644 --- a/drivers/net/wireless/wl12xx/wl1271_main.c +++ b/drivers/net/wireless/wl12xx/wl1271_main.c @@ -1266,11 +1266,11 @@ struct wl1271_filter_params { u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; }; -static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, - struct dev_addr_list *mc_list) +static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) { struct wl1271_filter_params *fp; - int i; + struct netdev_hw_addr *ha; fp = kzalloc(sizeof(*fp), GFP_ATOMIC); if (!fp) { @@ -1279,21 +1279,16 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, } /* update multicast filtering parameters */ - fp->enabled = true; - if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) { - mc_count = 0; - fp->enabled = false; - } - fp->mc_list_length = 0; - for (i = 0; i < mc_count; i++) { - if (mc_list->da_addrlen == ETH_ALEN) { + if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) { + fp->enabled = false; + } else { + fp->enabled = true; + netdev_hw_addr_list_for_each(ha, mc_list) { memcpy(fp->mc_list[fp->mc_list_length], - mc_list->da_addr, ETH_ALEN); + ha->addr, ETH_ALEN); fp->mc_list_length++; - } else - wl1271_warning("Unknown mc address length."); - mc_list = mc_list->next; + } } return (u64)(unsigned long)fp; diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 6917286edca..74530b2d672 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -875,7 +875,7 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev) static void zd1201_set_multicast(struct net_device *dev) { struct zd1201 *zd = netdev_priv(dev); - struct dev_mc_list *mc; + struct netdev_hw_addr *ha; unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; int i; @@ -883,8 +883,8 @@ static void zd1201_set_multicast(struct net_device *dev) return; i = 0; - netdev_for_each_mc_addr(mc, dev) - memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN); + netdev_for_each_mc_addr(ha, dev) + memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN); zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, netdev_mc_count(dev) * ETH_ALEN, 0); } diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 00e09e26c82..6d95e4d74d7 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -947,20 +947,17 @@ static void set_rx_filter_handler(struct work_struct *work) } static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mclist) + struct netdev_hw_addr_list *mc_list) { struct zd_mac *mac = zd_hw_mac(hw); struct zd_mc_hash hash; - int i; + struct netdev_hw_addr *ha; zd_mc_clear(&hash); - for (i = 0; i < mc_count; i++) { - if (!mclist) - break; - dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr); - zd_mc_add_addr(&hash, mclist->dmi_addr); - mclist = mclist->next; + netdev_hw_addr_list_for_each(ha, mc_list) { + dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr); + zd_mc_add_addr(&hash, ha->addr); } return hash.low | ((u64)hash.high << 32); diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 7d4107f5eeb..34c91cf5d83 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c @@ -1300,25 +1300,25 @@ static void set_rx_mode(struct net_device *dev) /* Too many to filter well, or accept all multicasts. */ iowrite16(0x000B, ioaddr + AddrMode); } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */ - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; u16 hash_table[4]; int i; memset(hash_table, 0, sizeof(hash_table)); - netdev_for_each_mc_addr(mclist, dev) { + netdev_for_each_mc_addr(ha, dev) { unsigned int bit; /* Due to a bug in the early chip versions, multiple filter slots must be set for each address. */ if (yp->drv_flags & HasMulticastBug) { - bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); - bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); - bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); } - bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f; + bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); } /* Copy the hash table to the chip. */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 7576ad5a833..945f3e0a9f0 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -607,7 +607,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) static void qeth_l2_set_multicast_list(struct net_device *dev) { struct qeth_card *card = dev->ml_priv; - struct dev_addr_list *dm; struct netdev_hw_addr *ha; if (card->info.type == QETH_CARD_TYPE_OSN) @@ -619,8 +618,8 @@ static void qeth_l2_set_multicast_list(struct net_device *dev) return; qeth_l2_del_all_mc(card); spin_lock_bh(&card->mclock); - netdev_for_each_mc_addr(dm, dev) - qeth_l2_add_mc(card, dm->da_addr, 0); + netdev_for_each_mc_addr(ha, dev) + qeth_l2_add_mc(card, ha->addr, 0); netdev_for_each_uc_addr(ha, dev) qeth_l2_add_mc(card, ha->addr, 1); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index de33e38a405..37de40e0168 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -311,7 +311,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, dev_uc_add(netdev, flogi_maddr); if (fip->spma) dev_uc_add(netdev, fip->ctl_src_addr); - dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + dev_mc_add(netdev, FIP_ALL_ENODE_MACS); /* * setup the receive function from ethernet driver @@ -397,7 +397,7 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe) dev_uc_del(netdev, flogi_maddr); if (fip->spma) dev_uc_del(netdev, fip->ctl_src_addr); - dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); + dev_mc_del(netdev, FIP_ALL_ENODE_MACS); /* Tell the LLD we are done w/ FCoE */ ops = netdev->netdev_ops; diff --git a/drivers/staging/arlan/arlan-main.c b/drivers/staging/arlan/arlan-main.c index 88fdd53cf5d..80284522c42 100644 --- a/drivers/staging/arlan/arlan-main.c +++ b/drivers/staging/arlan/arlan-main.c @@ -1458,7 +1458,7 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short !netdev_mc_empty(dev)) { char hw_dst_addr[6]; - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i; memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6); @@ -1469,12 +1469,13 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short printk(KERN_ERR "%s mcast 0x0100 \n", dev->name); else if (hw_dst_addr[1] == 0x40) printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name); - netdev_for_each_mc_entry(dmi, dev) { + netdev_for_each_mc_entry(ha, dev) { if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP) printk(KERN_ERR "%s mcl %pM\n", - dev->name, dmi->dmi_addr); + dev->name, + ha->addr); for (i = 0; i < 6; i++) - if (dmi->dmi_addr[i] != hw_dst_addr[i]) + if (ha->addr[i] != hw_dst_addr[i]) break; if (i == 6) break; diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c index 40f8954dde4..2fb89cddef1 100644 --- a/drivers/staging/et131x/et131x_netdev.c +++ b/drivers/staging/et131x/et131x_netdev.c @@ -405,7 +405,7 @@ void et131x_multicast(struct net_device *netdev) struct et131x_adapter *adapter = netdev_priv(netdev); uint32_t PacketFilter = 0; unsigned long flags; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; int i; spin_lock_irqsave(&adapter->Lock, flags); @@ -450,10 +450,10 @@ void et131x_multicast(struct net_device *netdev) /* Set values in the private adapter struct */ i = 0; - netdev_for_each_mc_addr(mclist, netdev) { + netdev_for_each_mc_addr(ha, netdev) { if (i == NIC_MAX_MCAST_LIST) break; - memcpy(adapter->MCList[i++], mclist->dmi_addr, ETH_ALEN); + memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN); } adapter->MCAddressCount = i; diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c index 7daeced317c..bebf0fd2af8 100644 --- a/drivers/staging/slicoss/slicoss.c +++ b/drivers/staging/slicoss/slicoss.c @@ -1367,12 +1367,12 @@ static void slic_mcast_set_list(struct net_device *dev) struct adapter *adapter = netdev_priv(dev); int status = STATUS_SUCCESS; char *addresses; - struct dev_mc_list *mc_list; + struct netdev_hw_addr *ha; ASSERT(adapter); - netdev_for_each_mc_addr(mc_list, dev) { - addresses = (char *) &mc_list->dmi_addr; + netdev_for_each_mc_addr(ha, dev) { + addresses = (char *) &ha->addr; status = slic_mcast_add_list(adapter, addresses); if (status != STATUS_SUCCESS) break; diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 1d643653a7e..b698de40a60 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -3079,7 +3079,7 @@ static void device_set_multi(struct net_device *dev) { PSMgmtObject pMgmt = pDevice->pMgmt; u32 mc_filter[2]; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode)); @@ -3099,8 +3099,8 @@ static void device_set_multi(struct net_device *dev) { } else { memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31)); } MACvSelectPage1(pDevice->PortOffset); diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index a8e1adbc959..49270db98fb 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -1596,7 +1596,7 @@ static void device_set_multi(struct net_device *dev) { PSMgmtObject pMgmt = &(pDevice->sMgmtObj); u32 mc_filter[2]; int ii; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; BYTE byTmpMode = 0; int rc; @@ -1632,8 +1632,8 @@ static void device_set_multi(struct net_device *dev) { } else { memset(mc_filter, 0, sizeof(mc_filter)); - netdev_for_each_mc_addr(mclist, dev) { - int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31)); } for (ii = 0; ii < 4; ii++) { diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c index 54ca63196fd..f44ef351647 100644 --- a/drivers/staging/wavelan/wavelan.c +++ b/drivers/staging/wavelan/wavelan.c @@ -3419,7 +3419,7 @@ static void wv_82586_config(struct net_device * dev) ac_cfg_t cfg; /* Configure action */ ac_ias_t ias; /* IA-setup action */ ac_mcs_t mcs; /* Multicast setup */ - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; #ifdef DEBUG_CONFIG_TRACE printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name); @@ -3531,16 +3531,16 @@ static void wv_82586_config(struct net_device * dev) /* Any address to set? */ if (lp->mc_count) { - netdev_for_each_mc_addr(dmi, dev) - outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr, + netdev_for_each_mc_addr(ha, dev) + outsw(PIOP1(ioaddr), (u16 *) ha->addr, WAVELAN_ADDR_SIZE >> 1); #ifdef DEBUG_CONFIG_INFO printk(KERN_DEBUG "%s: wv_82586_config(): set %d multicast addresses:\n", dev->name, lp->mc_count); - netdev_for_each_mc_addr(dmi, dev) - printk(KERN_DEBUG " %pM\n", dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + printk(KERN_DEBUG " %pM\n", ha->addr); #endif } diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c index 04f691d127b..a90132a204e 100644 --- a/drivers/staging/wavelan/wavelan_cs.c +++ b/drivers/staging/wavelan/wavelan_cs.c @@ -3591,20 +3591,20 @@ wv_82593_config(struct net_device * dev) /* If roaming is enabled, join the "Beacon Request" multicast group... */ /* But only if it's not in there already! */ if(do_roaming) - dev_mc_add(dev,WAVELAN_BEACON_ADDRESS, WAVELAN_ADDR_SIZE, 1); + dev_mc_add(dev, WAVELAN_BEACON_ADDRESS); #endif /* WAVELAN_ROAMING */ /* If any multicast address to set */ if(lp->mc_count) { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count; #ifdef DEBUG_CONFIG_INFO printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n", dev->name, lp->mc_count); - netdev_for_each_mc_addr(dmi, dev) - printk(KERN_DEBUG " %pM\n", dmi->dmi_addr); + netdev_for_each_mc_addr(ha, dev) + printk(KERN_DEBUG " %pM\n", ha->addr); #endif /* Initialize adapter's ethernet multicast addresses */ @@ -3612,8 +3612,8 @@ wv_82593_config(struct net_device * dev) outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base)); outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */ outb((addrs_len >> 8), PIOP(base)); /* byte count msb */ - netdev_for_each_mc_addr(dmi, dev) - outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen); + netdev_for_each_mc_addr(ha, dev) + outsb(PIOP(base), ha->addr, dev->addr_len); /* reset transmit DMA pointer */ hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET); diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c index 3482eec1865..5d9499bba9c 100644 --- a/drivers/staging/winbond/wbusb.c +++ b/drivers/staging/winbond/wbusb.c @@ -92,10 +92,10 @@ static int wbsoft_get_stats(struct ieee80211_hw *hw, return 0; } -static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, int mc_count, - struct dev_addr_list *mc_list) +static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) { - return mc_count; + return netdev_hw_addr_list_count(mc_list); } static void wbsoft_configure_filter(struct ieee80211_hw *dev, diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c index fa082d90fca..d7532e89f5f 100644 --- a/drivers/staging/wlags49_h2/wl_netdev.c +++ b/drivers/staging/wlags49_h2/wl_netdev.c @@ -1049,7 +1049,7 @@ void wl_multicast( struct net_device *dev ) //;?seems reasonable that even an AP-only driver could afford this small additional footprint int x; - struct dev_mc_list *mclist; + struct netdev_hw_addr *ha; struct wl_private *lp = wl_priv(dev); unsigned long flags; /*------------------------------------------------------------------------*/ @@ -1072,9 +1072,9 @@ void wl_multicast( struct net_device *dev ) DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev)); - netdev_for_each_mc_addr(mclist, dev) - DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr), - mclist->dmi_addrlen ); + netdev_for_each_mc_addr(ha, dev) + DBG_PRINT(" %s (%d)\n", DbgHwAddr(ha->addr), + dev->addr_len); } #endif /* DBG */ @@ -1119,9 +1119,9 @@ void wl_multicast( struct net_device *dev ) lp->ltvRecord.typ = CFG_GROUP_ADDR; x = 0; - netdev_for_each_mc_addr(mclist, dev) + netdev_for_each_mc_addr(ha, dev) memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]), - mclist->dmi_addr, ETH_ALEN); + ha->addr, ETH_ALEN); DBG_PRINT( "Setting multicast list\n" ); hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord )); } else { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 60f0c83192f..a343a21ba8b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -228,25 +228,6 @@ struct netif_rx_stats { DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); -struct dev_addr_list { - struct dev_addr_list *next; - u8 da_addr[MAX_ADDR_LEN]; - u8 da_addrlen; - u8 da_synced; - int da_users; - int da_gusers; -}; - -/* - * We tag multicasts with these structures. - */ - -#define dev_mc_list dev_addr_list -#define dmi_addr da_addr -#define dmi_addrlen da_addrlen -#define dmi_users da_users -#define dmi_gusers da_gusers - struct netdev_hw_addr { struct list_head list; unsigned char addr[MAX_ADDR_LEN]; @@ -255,8 +236,10 @@ struct netdev_hw_addr { #define NETDEV_HW_ADDR_T_SAN 2 #define NETDEV_HW_ADDR_T_SLAVE 3 #define NETDEV_HW_ADDR_T_UNICAST 4 +#define NETDEV_HW_ADDR_T_MULTICAST 5 int refcount; bool synced; + bool global_use; struct rcu_head rcu_head; }; @@ -265,16 +248,20 @@ struct netdev_hw_addr_list { int count; }; -#define netdev_uc_count(dev) ((dev)->uc.count) -#define netdev_uc_empty(dev) ((dev)->uc.count == 0) -#define netdev_for_each_uc_addr(ha, dev) \ - list_for_each_entry(ha, &dev->uc.list, list) +#define netdev_hw_addr_list_count(l) ((l)->count) +#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) +#define netdev_hw_addr_list_for_each(ha, l) \ + list_for_each_entry(ha, &(l)->list, list) -#define netdev_mc_count(dev) ((dev)->mc_count) -#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) +#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) +#define netdev_for_each_uc_addr(ha, dev) \ + netdev_hw_addr_list_for_each(ha, &(dev)->uc) +#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) +#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) #define netdev_for_each_mc_addr(mclist, dev) \ - for (mclist = dev->mc_list; mclist; mclist = mclist->next) + netdev_hw_addr_list_for_each(ha, &(dev)->mc) struct hh_cache { struct hh_cache *hh_next; /* Next entry */ @@ -862,12 +849,10 @@ struct net_device { unsigned char addr_len; /* hardware address length */ unsigned short dev_id; /* for shared network cards */ - struct netdev_hw_addr_list uc; /* Secondary unicast - mac addresses */ - int uc_promisc; spinlock_t addr_list_lock; - struct dev_addr_list *mc_list; /* Multicast mac addresses */ - int mc_count; /* Number of installed mcasts */ + struct netdev_hw_addr_list uc; /* Unicast mac addresses */ + struct netdev_hw_addr_list mc; /* Multicast mac addresses */ + int uc_promisc; unsigned int promiscuity; unsigned int allmulti; @@ -1980,6 +1965,22 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, extern int register_netdev(struct net_device *dev); extern void unregister_netdev(struct net_device *dev); +/* General hardware address lists handling functions */ +extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type); +extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type); +extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len); +extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len); +extern void __hw_addr_flush(struct netdev_hw_addr_list *list); +extern void __hw_addr_init(struct netdev_hw_addr_list *list); + /* Functions used for device addresses handling */ extern int dev_addr_add(struct net_device *dev, unsigned char *addr, unsigned char addr_type); @@ -2002,18 +2003,19 @@ extern void dev_uc_unsync(struct net_device *to, struct net_device *from); extern void dev_uc_flush(struct net_device *dev); extern void dev_uc_init(struct net_device *dev); +/* Functions used for multicast addresses handling */ +extern int dev_mc_add(struct net_device *dev, unsigned char *addr); +extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); +extern int dev_mc_del(struct net_device *dev, unsigned char *addr); +extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); +extern int dev_mc_sync(struct net_device *to, struct net_device *from); +extern void dev_mc_unsync(struct net_device *to, struct net_device *from); +extern void dev_mc_flush(struct net_device *dev); +extern void dev_mc_init(struct net_device *dev); + /* Functions used for secondary unicast and multicast support */ extern void dev_set_rx_mode(struct net_device *dev); extern void __dev_set_rx_mode(struct net_device *dev); -extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); -extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); -extern int dev_mc_sync(struct net_device *to, struct net_device *from); -extern void dev_mc_unsync(struct net_device *to, struct net_device *from); -extern void dev_addr_discard(struct net_device *dev); -extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); -extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); -extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); -extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); extern int dev_set_promiscuity(struct net_device *dev, int inc); extern int dev_set_allmulti(struct net_device *dev, int inc); extern void netdev_state_change(struct net_device *dev); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 1a8f50af49a..20823d04e03 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1626,7 +1626,7 @@ struct ieee80211_ops { struct ieee80211_bss_conf *info, u32 changed); u64 (*prepare_multicast)(struct ieee80211_hw *hw, - int mc_count, struct dev_addr_list *mc_list); + struct netdev_hw_addr_list *mc_list); void (*configure_filter)(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, diff --git a/net/802/garp.c b/net/802/garp.c index 1dcb0660c49..78cff9ec2cb 100644 --- a/net/802/garp.c +++ b/net/802/garp.c @@ -575,7 +575,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl) if (!app) goto err2; - err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); + err = dev_mc_add(dev, appl->proto.group_address); if (err < 0) goto err3; @@ -615,7 +615,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl garp_pdu_queue(app); garp_queue_xmit(app); - dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); + dev_mc_del(dev, appl->proto.group_address); kfree(app); garp_release_port(dev); } diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 9fc4da56fb1..1d15a60b23a 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -781,7 +781,7 @@ static int atif_ioctl(int cmd, void __user *arg) atrtr_create(&rtdef, dev); } } - dev_mc_add(dev, aarp_mcast, 6, 1); + dev_mc_add_global(dev, aarp_mcast); return 0; case SIOCGIFADDR: diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c index 326ab453edb..260a9507e54 100644 --- a/net/bluetooth/bnep/netdev.c +++ b/net/bluetooth/bnep/netdev.c @@ -87,7 +87,7 @@ static void bnep_net_set_mc_list(struct net_device *dev) memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); r->len = htons(ETH_ALEN * 2); } else { - struct dev_mc_list *dmi; + struct netdev_hw_addr *ha; int i, len = skb->len; if (dev->flags & IFF_BROADCAST) { @@ -98,11 +98,11 @@ static void bnep_net_set_mc_list(struct net_device *dev) /* FIXME: We should group addresses here. */ i = 0; - netdev_for_each_mc_addr(dmi, dev) { + netdev_for_each_mc_addr(ha, dev) { if (i == BNEP_MAX_MULTICAST_FILTERS) break; - memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); - memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); + memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); + memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); } r->len = htons(skb->len - len); } diff --git a/net/core/Makefile b/net/core/Makefile index 0a899f1aadb..51c3eec850e 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -7,9 +7,8 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o -obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ - neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ - dev_addr_lists.o +obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ + neighbour.o rtnetlink.o utils.o link_watch.o filter.o obj-$(CONFIG_XFRM) += flow.o obj-y += net-sysfs.o diff --git a/net/core/dev.c b/net/core/dev.c index 949c62dba71..2a9b7dd0bb6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3968,140 +3968,6 @@ void dev_set_rx_mode(struct net_device *dev) netif_addr_unlock_bh(dev); } -/* multicast addresses handling functions */ - -int __dev_addr_delete(struct dev_addr_list **list, int *count, - void *addr, int alen, int glbl) -{ - struct dev_addr_list *da; - - for (; (da = *list) != NULL; list = &da->next) { - if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && - alen == da->da_addrlen) { - if (glbl) { - int old_glbl = da->da_gusers; - da->da_gusers = 0; - if (old_glbl == 0) - break; - } - if (--da->da_users) - return 0; - - *list = da->next; - kfree(da); - (*count)--; - return 0; - } - } - return -ENOENT; -} - -int __dev_addr_add(struct dev_addr_list **list, int *count, - void *addr, int alen, int glbl) -{ - struct dev_addr_list *da; - - for (da = *list; da != NULL; da = da->next) { - if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && - da->da_addrlen == alen) { - if (glbl) { - int old_glbl = da->da_gusers; - da->da_gusers = 1; - if (old_glbl) - return 0; - } - da->da_users++; - return 0; - } - } - - da = kzalloc(sizeof(*da), GFP_ATOMIC); - if (da == NULL) - return -ENOMEM; - memcpy(da->da_addr, addr, alen); - da->da_addrlen = alen; - da->da_users = 1; - da->da_gusers = glbl ? 1 : 0; - da->next = *list; - *list = da; - (*count)++; - return 0; -} - - -int __dev_addr_sync(struct dev_addr_list **to, int *to_count, - struct dev_addr_list **from, int *from_count) -{ - struct dev_addr_list *da, *next; - int err = 0; - - da = *from; - while (da != NULL) { - next = da->next; - if (!da->da_synced) { - err = __dev_addr_add(to, to_count, - da->da_addr, da->da_addrlen, 0); - if (err < 0) - break; - da->da_synced = 1; - da->da_users++; - } else if (da->da_users == 1) { - __dev_addr_delete(to, to_count, - da->da_addr, da->da_addrlen, 0); - __dev_addr_delete(from, from_count, - da->da_addr, da->da_addrlen, 0); - } - da = next; - } - return err; -} -EXPORT_SYMBOL_GPL(__dev_addr_sync); - -void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, - struct dev_addr_list **from, int *from_count) -{ - struct dev_addr_list *da, *next; - - da = *from; - while (da != NULL) { - next = da->next; - if (da->da_synced) { - __dev_addr_delete(to, to_count, - da->da_addr, da->da_addrlen, 0); - da->da_synced = 0; - __dev_addr_delete(from, from_count, - da->da_addr, da->da_addrlen, 0); - } - da = next; - } -} -EXPORT_SYMBOL_GPL(__dev_addr_unsync); - -static void __dev_addr_discard(struct dev_addr_list **list) -{ - struct dev_addr_list *tmp; - - while (*list != NULL) { - tmp = *list; - *list = tmp->next; - if (tmp->da_users > tmp->da_gusers) - printk("__dev_addr_discard: address leakage! " - "da_users=%d\n", tmp->da_users); - kfree(tmp); - } -} - -void dev_addr_discard(struct net_device *dev) -{ - netif_addr_lock_bh(dev); - - __dev_addr_discard(&dev->mc_list); - netdev_mc_count(dev) = 0; - - netif_addr_unlock_bh(dev); -} -EXPORT_SYMBOL(dev_addr_discard); - /** * dev_get_flags - get flags reported to userspace * @dev: device @@ -4412,8 +4278,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; - return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, - dev->addr_len, 1); + return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCDELMULTI: if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || @@ -4421,8 +4286,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; - return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, - dev->addr_len, 1); + return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) @@ -4730,7 +4594,7 @@ static void rollback_registered_many(struct list_head *head) * Flush the unicast and multicast chains */ dev_uc_flush(dev); - dev_addr_discard(dev); + dev_mc_flush(dev); if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); @@ -5310,6 +5174,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, if (dev_addr_init(dev)) goto free_rx; + dev_mc_init(dev); dev_uc_init(dev); dev_net_set(dev, &init_net); @@ -5545,7 +5410,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char * Flush the unicast and multicast chains */ dev_uc_flush(dev); - dev_addr_discard(dev); + dev_mc_flush(dev); netdev_unregister_kobject(dev); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 7e52b6d18ad..37d5975e18a 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -19,8 +19,9 @@ * General list handling functions */ -static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, - int addr_len, unsigned char addr_type) +static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, + unsigned char *addr, int addr_len, + unsigned char addr_type, bool global) { struct netdev_hw_addr *ha; int alloc_size; @@ -31,6 +32,13 @@ static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, list_for_each_entry(ha, &list->list, list) { if (!memcmp(ha->addr, addr, addr_len) && ha->type == addr_type) { + if (global) { + /* check if addr is already used as global */ + if (ha->global_use) + return 0; + else + ha->global_use = true; + } ha->refcount++; return 0; } @@ -46,12 +54,19 @@ static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, memcpy(ha->addr, addr, addr_len); ha->type = addr_type; ha->refcount = 1; + ha->global_use = global; ha->synced = false; list_add_tail_rcu(&ha->list, &list->list); list->count++; return 0; } +static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, + int addr_len, unsigned char addr_type) +{ + return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); +} + static void ha_rcu_free(struct rcu_head *head) { struct netdev_hw_addr *ha; @@ -60,14 +75,21 @@ static void ha_rcu_free(struct rcu_head *head) kfree(ha); } -static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, - int addr_len, unsigned char addr_type) +static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, + unsigned char *addr, int addr_len, + unsigned char addr_type, bool global) { struct netdev_hw_addr *ha; list_for_each_entry(ha, &list->list, list) { if (!memcmp(ha->addr, addr, addr_len) && (ha->type == addr_type || !addr_type)) { + if (global) { + if (!ha->global_use) + break; + else + ha->global_use = false; + } if (--ha->refcount) return 0; list_del_rcu(&ha->list); @@ -79,10 +101,15 @@ static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, return -ENOENT; } -static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len, - unsigned char addr_type) +static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, + int addr_len, unsigned char addr_type) +{ + return __hw_addr_del_ex(list, addr, addr_len, addr_type, false); +} + +int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type) { int err; struct netdev_hw_addr *ha, *ha2; @@ -105,11 +132,11 @@ unroll: } return err; } +EXPORT_SYMBOL(__hw_addr_add_multiple); -static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len, - unsigned char addr_type) +void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len, unsigned char addr_type) { struct netdev_hw_addr *ha; unsigned char type; @@ -119,10 +146,11 @@ static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, __hw_addr_del(to_list, ha->addr, addr_len, addr_type); } } +EXPORT_SYMBOL(__hw_addr_del_multiple); -static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len) +int __hw_addr_sync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len) { int err = 0; struct netdev_hw_addr *ha, *tmp; @@ -142,10 +170,11 @@ static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, } return err; } +EXPORT_SYMBOL(__hw_addr_sync); -static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len) +void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, + struct netdev_hw_addr_list *from_list, + int addr_len) { struct netdev_hw_addr *ha, *tmp; @@ -159,8 +188,9 @@ static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, } } } +EXPORT_SYMBOL(__hw_addr_unsync); -static void __hw_addr_flush(struct netdev_hw_addr_list *list) +void __hw_addr_flush(struct netdev_hw_addr_list *list) { struct netdev_hw_addr *ha, *tmp; @@ -170,12 +200,14 @@ static void __hw_addr_flush(struct netdev_hw_addr_list *list) } list->count = 0; } +EXPORT_SYMBOL(__hw_addr_flush); -static void __hw_addr_init(struct netdev_hw_addr_list *list) +void __hw_addr_init(struct netdev_hw_addr_list *list) { INIT_LIST_HEAD(&list->list); list->count = 0; } +EXPORT_SYMBOL(__hw_addr_init); /* * Device addresses handling functions @@ -475,4 +507,235 @@ EXPORT_SYMBOL(dev_uc_init); * Multicast list handling functions */ -/* To be filled here */ +static int __dev_mc_add(struct net_device *dev, unsigned char *addr, + bool global) +{ + int err; + + netif_addr_lock_bh(dev); + err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_MULTICAST, global); + if (!err) + __dev_set_rx_mode(dev); + netif_addr_unlock_bh(dev); + return err; +} +/** + * dev_mc_add - Add a multicast address + * @dev: device + * @addr: address to add + * + * Add a multicast address to the device or increase + * the reference count if it already exists. + */ +int dev_mc_add(struct net_device *dev, unsigned char *addr) +{ + return __dev_mc_add(dev, addr, false); +} +EXPORT_SYMBOL(dev_mc_add); + +/** + * dev_mc_add_global - Add a global multicast address + * @dev: device + * @addr: address to add + * + * Add a global multicast address to the device. + */ +int dev_mc_add_global(struct net_device *dev, unsigned char *addr) +{ + return __dev_mc_add(dev, addr, true); +} +EXPORT_SYMBOL(dev_mc_add_global); + +static int __dev_mc_del(struct net_device *dev, unsigned char *addr, + bool global) +{ + int err; + + netif_addr_lock_bh(dev); + err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len, + NETDEV_HW_ADDR_T_MULTICAST, global); + if (!err) + __dev_set_rx_mode(dev); + netif_addr_unlock_bh(dev); + return err; +} + +/** + * dev_mc_del - Delete a multicast address. + * @dev: device + * @addr: address to delete + * + * Release reference to a multicast address and remove it + * from the device if the reference count drops to zero. + */ +int dev_mc_del(struct net_device *dev, unsigned char *addr) +{ + return __dev_mc_del(dev, addr, false); +} +EXPORT_SYMBOL(dev_mc_del); + +/** + * dev_mc_del_global - Delete a global multicast address. + * @dev: device + * @addr: address to delete + * + * Release reference to a multicast address and remove it + * from the device if the reference count drops to zero. + */ +int dev_mc_del_global(struct net_device *dev, unsigned char *addr) +{ + return __dev_mc_del(dev, addr, true); +} +EXPORT_SYMBOL(dev_mc_del_global); + +/** + * dev_mc_sync - Synchronize device's unicast list to another device + * @to: destination device + * @from: source device + * + * Add newly added addresses to the destination device and release + * addresses that have no users left. The source device must be + * locked by netif_tx_lock_bh. + * + * This function is intended to be called from the dev->set_multicast_list + * or dev->set_rx_mode function of layered software devices. + */ +int dev_mc_sync(struct net_device *to, struct net_device *from) +{ + int err = 0; + + if (to->addr_len != from->addr_len) + return -EINVAL; + + netif_addr_lock_bh(to); + err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len); + if (!err) + __dev_set_rx_mode(to); + netif_addr_unlock_bh(to); + return err; +} +EXPORT_SYMBOL(dev_mc_sync); + +/** + * dev_mc_unsync - Remove synchronized addresses from the destination device + * @to: destination device + * @from: source device + * + * Remove all addresses that were added to the destination device by + * dev_mc_sync(). This function is intended to be called from the + * dev->stop function of layered software devices. + */ +void dev_mc_unsync(struct net_device *to, struct net_device *from) +{ + if (to->addr_len != from->addr_len) + return; + + netif_addr_lock_bh(from); + netif_addr_lock(to); + __hw_addr_unsync(&to->mc, &from->mc, to->addr_len); + __dev_set_rx_mode(to); + netif_addr_unlock(to); + netif_addr_unlock_bh(from); +} +EXPORT_SYMBOL(dev_mc_unsync); + +/** + * dev_mc_flush - Flush multicast addresses + * @dev: device + * + * Flush multicast addresses. + */ +void dev_mc_flush(struct net_device *dev) +{ + netif_addr_lock_bh(dev); + __hw_addr_flush(&dev->mc); + netif_addr_unlock_bh(dev); +} +EXPORT_SYMBOL(dev_mc_flush); + +/** + * dev_mc_flush - Init multicast address list + * @dev: device + * + * Init multicast address list. + */ +void dev_mc_init(struct net_device *dev) +{ + __hw_addr_init(&dev->mc); +} +EXPORT_SYMBOL(dev_mc_init); + +#ifdef CONFIG_PROC_FS +#include +#include + +static int dev_mc_seq_show(struct seq_file *seq, void *v) +{ + struct netdev_hw_addr *ha; + struct net_device *dev = v; + + if (v == SEQ_START_TOKEN) + return 0; + + netif_addr_lock_bh(dev); + netdev_for_each_mc_addr(ha, dev) { + int i; + + seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, + dev->name, ha->refcount, ha->global_use); + + for (i = 0; i < dev->addr_len; i++) + seq_printf(seq, "%02x", ha->addr[i]); + + seq_putc(seq, '\n'); + } + netif_addr_unlock_bh(dev); + return 0; +} + +static const struct seq_operations dev_mc_seq_ops = { + .start = dev_seq_start, + .next = dev_seq_next, + .stop = dev_seq_stop, + .show = dev_mc_seq_show, +}; + +static int dev_mc_seq_open(struct inode *inode, struct file *file) +{ + return seq_open_net(inode, file, &dev_mc_seq_ops, + sizeof(struct seq_net_private)); +} + +static const struct file_operations dev_mc_seq_fops = { + .owner = THIS_MODULE, + .open = dev_mc_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net, +}; + +#endif + +static int __net_init dev_mc_net_init(struct net *net) +{ + if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) + return -ENOMEM; + return 0; +} + +static void __net_exit dev_mc_net_exit(struct net *net) +{ + proc_net_remove(net, "dev_mcast"); +} + +static struct pernet_operations __net_initdata dev_mc_net_ops = { + .init = dev_mc_net_init, + .exit = dev_mc_net_exit, +}; + +void __init dev_mcast_init(void) +{ + register_pernet_subsys(&dev_mc_net_ops); +} + diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c deleted file mode 100644 index 3dc295beb48..00000000000 --- a/net/core/dev_mcast.c +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Linux NET3: Multicast List maintenance. - * - * Authors: - * Tim Kordas - * Richard Underwood - * - * Stir fried together from the IP multicast and CAP patches above - * Alan Cox - * - * Fixes: - * Alan Cox : Update the device on a real delete - * rather than any time but... - * Alan Cox : IFF_ALLMULTI support. - * Alan Cox : New format set_multicast_list() calls. - * Gleb Natapov : Remove dev_mc_lock. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -/* - * Device multicast list maintenance. - * - * This is used both by IP and by the user level maintenance functions. - * Unlike BSD we maintain a usage count on a given multicast address so - * that a casual user application can add/delete multicasts used by - * protocols without doing damage to the protocols when it deletes the - * entries. It also helps IP as it tracks overlapping maps. - * - * Device mc lists are changed by bh at least if IPv6 is enabled, - * so that it must be bh protected. - * - * We block accesses to device mc filters with netif_tx_lock. - */ - -/* - * Delete a device level multicast - */ - -int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl) -{ - int err; - - netif_addr_lock_bh(dev); - err = __dev_addr_delete(&dev->mc_list, &dev->mc_count, - addr, alen, glbl); - if (!err) { - /* - * We have altered the list, so the card - * loaded filter is now wrong. Fix it - */ - - __dev_set_rx_mode(dev); - } - netif_addr_unlock_bh(dev); - return err; -} - -/* - * Add a device level multicast - */ - -int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) -{ - int err; - - netif_addr_lock_bh(dev); - if (alen != dev->addr_len) - err = -EINVAL; - else - err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl); - if (!err) - __dev_set_rx_mode(dev); - netif_addr_unlock_bh(dev); - return err; -} - -/** - * dev_mc_sync - Synchronize device's multicast list to another device - * @to: destination device - * @from: source device - * - * Add newly added addresses to the destination device and release - * addresses that have no users left. The source device must be - * locked by netif_tx_lock_bh. - * - * This function is intended to be called from the dev->set_multicast_list - * or dev->set_rx_mode function of layered software devices. - */ -int dev_mc_sync(struct net_device *to, struct net_device *from) -{ - int err = 0; - - netif_addr_lock_bh(to); - err = __dev_addr_sync(&to->mc_list, &to->mc_count, - &from->mc_list, &from->mc_count); - if (!err) - __dev_set_rx_mode(to); - netif_addr_unlock_bh(to); - - return err; -} -EXPORT_SYMBOL(dev_mc_sync); - - -/** - * dev_mc_unsync - Remove synchronized addresses from the destination - * device - * @to: destination device - * @from: source device - * - * Remove all addresses that were added to the destination device by - * dev_mc_sync(). This function is intended to be called from the - * dev->stop function of layered software devices. - */ -void dev_mc_unsync(struct net_device *to, struct net_device *from) -{ - netif_addr_lock_bh(from); - netif_addr_lock(to); - - __dev_addr_unsync(&to->mc_list, &to->mc_count, - &from->mc_list, &from->mc_count); - __dev_set_rx_mode(to); - - netif_addr_unlock(to); - netif_addr_unlock_bh(from); -} -EXPORT_SYMBOL(dev_mc_unsync); - -#ifdef CONFIG_PROC_FS -static int dev_mc_seq_show(struct seq_file *seq, void *v) -{ - struct dev_addr_list *m; - struct net_device *dev = v; - - if (v == SEQ_START_TOKEN) - return 0; - - netif_addr_lock_bh(dev); - for (m = dev->mc_list; m; m = m->next) { - int i; - - seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex, - dev->name, m->dmi_users, m->dmi_gusers); - - for (i = 0; i < m->dmi_addrlen; i++) - seq_printf(seq, "%02x", m->dmi_addr[i]); - - seq_putc(seq, '\n'); - } - netif_addr_unlock_bh(dev); - return 0; -} - -static const struct seq_operations dev_mc_seq_ops = { - .start = dev_seq_start, - .next = dev_seq_next, - .stop = dev_seq_stop, - .show = dev_mc_seq_show, -}; - -static int dev_mc_seq_open(struct inode *inode, struct file *file) -{ - return seq_open_net(inode, file, &dev_mc_seq_ops, - sizeof(struct seq_net_private)); -} - -static const struct file_operations dev_mc_seq_fops = { - .owner = THIS_MODULE, - .open = dev_mc_seq_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_net, -}; - -#endif - -static int __net_init dev_mc_net_init(struct net *net) -{ - if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops)) - return -ENOMEM; - return 0; -} - -static void __net_exit dev_mc_net_exit(struct net *net) -{ - proc_net_remove(net, "dev_mcast"); -} - -static struct pernet_operations __net_initdata dev_mc_net_ops = { - .init = dev_mc_net_init, - .exit = dev_mc_net_exit, -}; - -void __init dev_mcast_init(void) -{ - register_pernet_subsys(&dev_mc_net_ops); -} - -EXPORT_SYMBOL(dev_mc_add); -EXPORT_SYMBOL(dev_mc_delete); diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 238af093495..f3e4734d207 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -349,7 +349,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de if (dn_db->dev->type == ARPHRD_ETHER) { if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa1->ifa_local); - dev_mc_delete(dev, mac_addr, ETH_ALEN, 0); + dev_mc_del(dev, mac_addr); } } @@ -380,7 +380,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa) if (dev->type == ARPHRD_ETHER) { if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { dn_dn2eth(mac_addr, ifa->ifa_local); - dev_mc_add(dev, mac_addr, ETH_ALEN, 0); + dev_mc_add(dev, mac_addr); } } @@ -1000,9 +1000,9 @@ static int dn_eth_up(struct net_device *dev) struct dn_dev *dn_db = dev->dn_ptr; if (dn_db->parms.forwarding == 0) - dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); + dev_mc_add(dev, dn_rt_all_end_mcast); else - dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); + dev_mc_add(dev, dn_rt_all_rt_mcast); dn_db->use_long = 1; @@ -1014,9 +1014,9 @@ static void dn_eth_down(struct net_device *dev) struct dn_dev *dn_db = dev->dn_ptr; if (dn_db->parms.forwarding == 0) - dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); + dev_mc_del(dev, dn_rt_all_end_mcast); else - dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); + dev_mc_del(dev, dn_rt_all_rt_mcast); } static void dn_dev_set_timer(struct net_device *dev); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 63bf298ca10..51824c42b77 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -997,7 +997,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr) --ANK */ if (arp_mc_map(addr, buf, dev, 0) == 0) - dev_mc_add(dev, buf, dev->addr_len, 0); + dev_mc_add(dev, buf); } /* @@ -1010,7 +1010,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr) struct net_device *dev = in_dev->dev; if (arp_mc_map(addr, buf, dev, 0) == 0) - dev_mc_delete(dev, buf, dev->addr_len, 0); + dev_mc_del(dev, buf); } #ifdef CONFIG_IP_MULTICAST diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 0886f96c736..a2208b7b313 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -87,7 +87,7 @@ clusterip_config_entry_put(struct clusterip_config *c) list_del(&c->list); write_unlock_bh(&clusterip_lock); - dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0); + dev_mc_del(c->dev, c->clustermac); dev_put(c->dev); /* In case anyone still accesses the file, the open/close @@ -396,7 +396,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par) dev_put(dev); return false; } - dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); + dev_mc_add(config->dev, config->clustermac); } } cipinfo->config = config; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index bcd97191596..37d1868c006 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -714,7 +714,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc) if (!(mc->mca_flags&MAF_LOADED)) { mc->mca_flags |= MAF_LOADED; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) - dev_mc_add(dev, buf, dev->addr_len, 0); + dev_mc_add(dev, buf); } spin_unlock_bh(&mc->mca_lock); @@ -740,7 +740,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc) if (mc->mca_flags&MAF_LOADED) { mc->mca_flags &= ~MAF_LOADED; if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) - dev_mc_delete(dev, buf, dev->addr_len, 0); + dev_mc_del(dev, buf); } if (mc->mca_flags & MAF_NOREPORT) diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index c3d844093a2..9179196da26 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, } static inline u64 drv_prepare_multicast(struct ieee80211_local *local, - int mc_count, - struct dev_addr_list *mc_list) + struct netdev_hw_addr_list *mc_list) { u64 ret = 0; if (local->ops->prepare_multicast) - ret = local->ops->prepare_multicast(&local->hw, mc_count, - mc_list); + ret = local->ops->prepare_multicast(&local->hw, mc_list); - trace_drv_prepare_multicast(local, mc_count, ret); + trace_drv_prepare_multicast(local, mc_list->count, ret); return ret; } diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ab369e2a528..7fdacf9408b 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -646,8 +646,7 @@ struct ieee80211_local { struct work_struct recalc_smps; /* aggregated multicast list */ - struct dev_addr_list *mc_list; - int mc_count; + struct netdev_hw_addr_list mc_list; bool tim_in_locked_section; /* see ieee80211_beacon_get() */ diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b4ec59a8dc0..00f3a93c6b0 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -412,8 +412,7 @@ static int ieee80211_stop(struct net_device *dev) netif_addr_lock_bh(dev); spin_lock_bh(&local->filter_lock); - __dev_addr_unsync(&local->mc_list, &local->mc_count, - &dev->mc_list, &dev->mc_count); + __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len); spin_unlock_bh(&local->filter_lock); netif_addr_unlock_bh(dev); @@ -596,8 +595,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev) sdata->flags ^= IEEE80211_SDATA_PROMISC; } spin_lock_bh(&local->filter_lock); - __dev_addr_sync(&local->mc_list, &local->mc_count, - &dev->mc_list, &dev->mc_count); + __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len); spin_unlock_bh(&local->filter_lock); ieee80211_queue_work(&local->hw, &local->reconfig_filter); } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 06c33b68d8e..84ad249a4e2 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local) spin_lock_bh(&local->filter_lock); changed_flags = local->filter_flags ^ new_flags; - mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); + mc = drv_prepare_multicast(local, &local->mc_list); spin_unlock_bh(&local->filter_lock); /* be a bit nasty */ diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 48c1e0ae565..b0f037cc899 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1691,9 +1691,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i, if (i->alen != dev->addr_len) return -EINVAL; if (what > 0) - return dev_mc_add(dev, i->addr, i->alen, 0); + return dev_mc_add(dev, i->addr); else - return dev_mc_delete(dev, i->addr, i->alen, 0); + return dev_mc_del(dev, i->addr); break; case PACKET_MR_PROMISC: return dev_set_promiscuity(dev, what); -- cgit v1.2.3-70-g09d2 From bc1db9af731a74c7eca04df5936214c800774113 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Fri, 9 Apr 2010 17:13:50 -0700 Subject: IB: Explicitly rule out llseek to avoid BKL in default_llseek() Several RDMA user-access drivers have file_operations structures with no .llseek method set. None of the drivers actually do anything with f_pos, so this means llseek is essentially a NOP, instead of returning an error as leaving other file_operations methods unimplemented would do. This is mostly harmless, except that a NULL .llseek means that default_llseek() is used, and this function grabs the BKL, which we would like to avoid. Since llseek does nothing useful on these files, we would like it to return an error to userspace instead of silently grabbing the BKL and succeeding. For nearly all of the file types, we take the belt-and-suspenders approach of setting the .llseek method to no_llseek and also calling nonseekable_open(); the exception is the uverbs_event files, which are created with anon_inode_getfile(), which already sets f_mode the same way as nonseekable_open() would. This work is motivated by Arnd Bergmann's bkl-removal tree. Signed-off-by: Roland Dreier --- drivers/infiniband/core/ucm.c | 3 ++- drivers/infiniband/core/ucma.c | 4 +++- drivers/infiniband/core/user_mad.c | 12 ++++++++---- drivers/infiniband/core/uverbs_main.c | 11 +++++++---- 4 files changed, 20 insertions(+), 10 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 512b1c43460..46474842cfe 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1181,7 +1181,7 @@ static int ib_ucm_open(struct inode *inode, struct file *filp) file->filp = filp; file->device = container_of(inode->i_cdev, struct ib_ucm_device, cdev); - return 0; + return nonseekable_open(inode, filp); } static int ib_ucm_close(struct inode *inode, struct file *filp) @@ -1229,6 +1229,7 @@ static const struct file_operations ucm_fops = { .release = ib_ucm_close, .write = ib_ucm_write, .poll = ib_ucm_poll, + .llseek = no_llseek, }; static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 46185084121..ac7edc24165 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1220,7 +1220,8 @@ static int ucma_open(struct inode *inode, struct file *filp) filp->private_data = file; file->filp = filp; - return 0; + + return nonseekable_open(inode, filp); } static int ucma_close(struct inode *inode, struct file *filp) @@ -1250,6 +1251,7 @@ static const struct file_operations ucma_fops = { .release = ucma_close, .write = ucma_write, .poll = ucma_poll, + .llseek = no_llseek, }; static struct miscdevice ucma_misc = { diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index e7db054fb1c..6babb72b39f 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -781,7 +781,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp) { struct ib_umad_port *port; struct ib_umad_file *file; - int ret = 0; + int ret; port = container_of(inode->i_cdev, struct ib_umad_port, cdev); if (port) @@ -814,6 +814,8 @@ static int ib_umad_open(struct inode *inode, struct file *filp) list_add_tail(&file->port_list, &port->file_list); + ret = nonseekable_open(inode, filp); + out: mutex_unlock(&port->file_mutex); return ret; @@ -866,7 +868,8 @@ static const struct file_operations umad_fops = { .compat_ioctl = ib_umad_compat_ioctl, #endif .open = ib_umad_open, - .release = ib_umad_close + .release = ib_umad_close, + .llseek = no_llseek, }; static int ib_umad_sm_open(struct inode *inode, struct file *filp) @@ -903,7 +906,7 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp) filp->private_data = port; - return 0; + return nonseekable_open(inode, filp); fail: kref_put(&port->umad_dev->ref, ib_umad_release_dev); @@ -933,7 +936,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp) static const struct file_operations umad_sm_fops = { .owner = THIS_MODULE, .open = ib_umad_sm_open, - .release = ib_umad_sm_close + .release = ib_umad_sm_close, + .llseek = no_llseek, }; static struct ib_client umad_client = { diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index fb352625442..ec83e9fe387 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -369,7 +369,8 @@ static const struct file_operations uverbs_event_fops = { .read = ib_uverbs_event_read, .poll = ib_uverbs_event_poll, .release = ib_uverbs_event_close, - .fasync = ib_uverbs_event_fasync + .fasync = ib_uverbs_event_fasync, + .llseek = no_llseek, }; void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) @@ -623,7 +624,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) filp->private_data = file; - return 0; + return nonseekable_open(inode, filp); err_module: module_put(dev->ib_dev->owner); @@ -651,7 +652,8 @@ static const struct file_operations uverbs_fops = { .owner = THIS_MODULE, .write = ib_uverbs_write, .open = ib_uverbs_open, - .release = ib_uverbs_close + .release = ib_uverbs_close, + .llseek = no_llseek, }; static const struct file_operations uverbs_mmap_fops = { @@ -659,7 +661,8 @@ static const struct file_operations uverbs_mmap_fops = { .write = ib_uverbs_write, .mmap = ib_uverbs_mmap, .open = ib_uverbs_open, - .release = ib_uverbs_close + .release = ib_uverbs_close, + .llseek = no_llseek, }; static struct ib_client uverbs_client = { -- cgit v1.2.3-70-g09d2 From 7960d6b9de7716e9080b47f6dc4d415d967e032d Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 2 Apr 2010 04:29:38 +0000 Subject: RDMA/cxgb3: Use the dma state API instead of pci equivalents The DMA API is preferred; no functional change. Signed-off-by: FUJITA Tomonori Signed-off-by: Roland Dreier --- drivers/infiniband/hw/cxgb3/cxio_hal.c | 12 ++++++------ drivers/infiniband/hw/cxgb3/cxio_hal.h | 2 +- drivers/infiniband/hw/cxgb3/cxio_wr.h | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 35f286f1ad1..005b7b52bc1 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -174,7 +174,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) kfree(cq->sw_queue); return -ENOMEM; } - pci_unmap_addr_set(cq, mapping, cq->dma_addr); + dma_unmap_addr_set(cq, mapping, cq->dma_addr); memset(cq->queue, 0, size); setup.id = cq->cqid; setup.base_addr = (u64) (cq->dma_addr); @@ -297,7 +297,7 @@ int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, goto err4; memset(wq->queue, 0, depth * sizeof(union t3_wr)); - pci_unmap_addr_set(wq, mapping, wq->dma_addr); + dma_unmap_addr_set(wq, mapping, wq->dma_addr); wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; if (!kernel_domain) wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + @@ -325,7 +325,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), (1UL << (cq->size_log2)) * sizeof(struct t3_cqe), cq->queue, - pci_unmap_addr(cq, mapping)); + dma_unmap_addr(cq, mapping)); cxio_hal_put_cqid(rdev_p->rscp, cq->cqid); return err; } @@ -336,7 +336,7 @@ int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), (1UL << (wq->size_log2)) * sizeof(union t3_wr), wq->queue, - pci_unmap_addr(wq, mapping)); + dma_unmap_addr(wq, mapping)); kfree(wq->sq); cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); kfree(wq->rq); @@ -537,7 +537,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) err = -ENOMEM; goto err; } - pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, + dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping, rdev_p->ctrl_qp.dma_addr); rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; memset(rdev_p->ctrl_qp.workq, 0, @@ -583,7 +583,7 @@ static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p) dma_free_coherent(&(rdev_p->rnic_info.pdev->dev), (1UL << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr), rdev_p->ctrl_qp.workq, - pci_unmap_addr(&rdev_p->ctrl_qp, mapping)); + dma_unmap_addr(&rdev_p->ctrl_qp, mapping)); return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID); } diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 073373c2c56..8f0caf7d448 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -71,7 +71,7 @@ struct cxio_hal_ctrl_qp { wait_queue_head_t waitq;/* wait for RspQ/CQE msg */ union t3_wr *workq; /* the work request queue */ dma_addr_t dma_addr; /* pci bus address of the workq */ - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); void __iomem *doorbell; }; diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index 15073b2da1c..e5ddb63e7d2 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -691,7 +691,7 @@ struct t3_swrq { struct t3_wq { union t3_wr *queue; /* DMA accessable memory */ dma_addr_t dma_addr; /* DMA address for HW */ - DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */ + DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */ u32 error; /* 1 once we go to ERROR */ u32 qpid; u32 wptr; /* idx to next available WR slot */ @@ -718,7 +718,7 @@ struct t3_cq { u32 wptr; u32 size_log2; dma_addr_t dma_addr; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); struct t3_cqe *queue; struct t3_cqe *sw_queue; u32 sw_rptr; -- cgit v1.2.3-70-g09d2 From 73a203d2014f50d874b9e40083ad481ca70408e8 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Mon, 5 Apr 2010 19:59:57 +0000 Subject: RDMA/cxgb3: Don't free skbs on NET_XMIT_* indications from LLD The low level cxgb3 driver can return NET_XMIT_CN and friends. The iw_cxgb3 driver should _not_ treat these as errors. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier --- drivers/infiniband/hw/cxgb3/iwch_cm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 4fef0329627..cfd6db019f1 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -151,7 +151,7 @@ int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2 return -EIO; } error = l2t_send(tdev, skb, l2e); - if (error) + if (error < 0) kfree_skb(skb); return error; } @@ -167,7 +167,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) return -EIO; } error = cxgb3_ofld_send(tdev, skb); - if (error) + if (error < 0) kfree_skb(skb); return error; } -- cgit v1.2.3-70-g09d2 From e7494440573d9d7b5cb0e7ed9ae7d742226583de Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Wed, 21 Apr 2010 15:23:10 -0700 Subject: RDMA/amso1100: Use the dma state API instead of pci equivalents The DMA API is preferred; no functional change. Signed-off-by: FUJITA Tomonori Signed-off-by: Roland Dreier --- drivers/infiniband/hw/amso1100/c2.h | 2 +- drivers/infiniband/hw/amso1100/c2_alloc.c | 4 ++-- drivers/infiniband/hw/amso1100/c2_cq.c | 4 ++-- drivers/infiniband/hw/amso1100/c2_mq.h | 2 +- drivers/infiniband/hw/amso1100/c2_provider.h | 2 +- drivers/infiniband/hw/amso1100/c2_rnic.c | 12 ++++++------ 6 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index f7ff66f9836..6ae698e6877 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h @@ -250,7 +250,7 @@ struct c2_array { struct sp_chunk { struct sp_chunk *next; dma_addr_t dma_addr; - DECLARE_PCI_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_ADDR(mapping); u16 head; u16 shared_ptr[0]; }; diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c index d4f5f5d42e9..78d247ec696 100644 --- a/drivers/infiniband/hw/amso1100/c2_alloc.c +++ b/drivers/infiniband/hw/amso1100/c2_alloc.c @@ -49,7 +49,7 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, return -ENOMEM; new_head->dma_addr = dma_addr; - pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); + dma_unmap_addr_set(new_head, mapping, new_head->dma_addr); new_head->next = NULL; new_head->head = 0; @@ -81,7 +81,7 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root) while (root) { next = root->next; dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, - pci_unmap_addr(root, mapping)); + dma_unmap_addr(root, mapping)); root = next; } } diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index f7b0fc23f41..49e0e8533f7 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c @@ -257,7 +257,7 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) { dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, - mq->msg_pool.host, pci_unmap_addr(mq, mapping)); + mq->msg_pool.host, dma_unmap_addr(mq, mapping)); } static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, @@ -278,7 +278,7 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, NULL, /* peer (currently unknown) */ C2_MQ_HOST_TARGET); - pci_unmap_addr_set(mq, mapping, mq->host_dma); + dma_unmap_addr_set(mq, mapping, mq->host_dma); return 0; } diff --git a/drivers/infiniband/hw/amso1100/c2_mq.h b/drivers/infiniband/hw/amso1100/c2_mq.h index acede007b94..fc1b9a7cec4 100644 --- a/drivers/infiniband/hw/amso1100/c2_mq.h +++ b/drivers/infiniband/hw/amso1100/c2_mq.h @@ -71,7 +71,7 @@ struct c2_mq { u8 __iomem *adapter; } msg_pool; dma_addr_t host_dma; - DECLARE_PCI_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_ADDR(mapping); u16 hint_count; u16 priv; struct c2_mq_shared __iomem *peer; diff --git a/drivers/infiniband/hw/amso1100/c2_provider.h b/drivers/infiniband/hw/amso1100/c2_provider.h index 1076df2ee96..bf189987711 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.h +++ b/drivers/infiniband/hw/amso1100/c2_provider.h @@ -50,7 +50,7 @@ struct c2_buf_list { void *buf; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index 78c4bcc6ef6..85cfae4cad7 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -524,7 +524,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev) err = -ENOMEM; goto bail1; } - pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); + dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages, (unsigned long long) c2dev->rep_vq.host_dma); c2_mq_rep_init(&c2dev->rep_vq, @@ -545,7 +545,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev) err = -ENOMEM; goto bail2; } - pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); + dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages, (unsigned long long) c2dev->aeq.host_dma); c2_mq_rep_init(&c2dev->aeq, @@ -596,11 +596,11 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev) bail3: dma_free_coherent(&c2dev->pcidev->dev, c2dev->aeq.q_size * c2dev->aeq.msg_size, - q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); + q2_pages, dma_unmap_addr(&c2dev->aeq, mapping)); bail2: dma_free_coherent(&c2dev->pcidev->dev, c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, - q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); + q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping)); bail1: c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); bail0: @@ -637,13 +637,13 @@ void __devexit c2_rnic_term(struct c2_dev *c2dev) dma_free_coherent(&c2dev->pcidev->dev, c2dev->aeq.q_size * c2dev->aeq.msg_size, c2dev->aeq.msg_pool.host, - pci_unmap_addr(&c2dev->aeq, mapping)); + dma_unmap_addr(&c2dev->aeq, mapping)); /* Free the verbs reply queue */ dma_free_coherent(&c2dev->pcidev->dev, c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, c2dev->rep_vq.msg_pool.host, - pci_unmap_addr(&c2dev->rep_vq, mapping)); + dma_unmap_addr(&c2dev->rep_vq, mapping)); /* Free the MQ shared pointer pool */ c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); -- cgit v1.2.3-70-g09d2 From 3a2baff783497321e8322ce29f3a33a21c0d88f5 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Fri, 2 Apr 2010 04:29:39 +0000 Subject: IB/mthca: Use the dma state API instead of pci equivalents The DMA API is preferred; no functional change. Signed-off-by: FUJITA Tomonori Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mthca/mthca_allocator.c | 8 ++++---- drivers/infiniband/hw/mthca/mthca_eq.c | 6 +++--- drivers/infiniband/hw/mthca/mthca_provider.h | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index c5ccc2daab6..b4e0cf4e95c 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -211,7 +211,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, if (!buf->direct.buf) return -ENOMEM; - pci_unmap_addr_set(&buf->direct, mapping, t); + dma_unmap_addr_set(&buf->direct, mapping, t); memset(buf->direct.buf, 0, size); @@ -251,7 +251,7 @@ int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, goto err_free; dma_list[i] = t; - pci_unmap_addr_set(&buf->page_list[i], mapping, t); + dma_unmap_addr_set(&buf->page_list[i], mapping, t); clear_page(buf->page_list[i].buf); } @@ -289,12 +289,12 @@ void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, if (is_direct) dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, - pci_unmap_addr(&buf->direct, mapping)); + dma_unmap_addr(&buf->direct, mapping)); else { for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->page_list[i].buf, - pci_unmap_addr(&buf->page_list[i], + dma_unmap_addr(&buf->page_list[i], mapping)); kfree(buf->page_list); } diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c index 9388164b605..8e8c728aff8 100644 --- a/drivers/infiniband/hw/mthca/mthca_eq.c +++ b/drivers/infiniband/hw/mthca/mthca_eq.c @@ -504,7 +504,7 @@ static int mthca_create_eq(struct mthca_dev *dev, goto err_out_free_pages; dma_list[i] = t; - pci_unmap_addr_set(&eq->page_list[i], mapping, t); + dma_unmap_addr_set(&eq->page_list[i], mapping, t); clear_page(eq->page_list[i].buf); } @@ -579,7 +579,7 @@ static int mthca_create_eq(struct mthca_dev *dev, if (eq->page_list[i].buf) dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, eq->page_list[i].buf, - pci_unmap_addr(&eq->page_list[i], + dma_unmap_addr(&eq->page_list[i], mapping)); mthca_free_mailbox(dev, mailbox); @@ -629,7 +629,7 @@ static void mthca_free_eq(struct mthca_dev *dev, for (i = 0; i < npages; ++i) pci_free_consistent(dev->pdev, PAGE_SIZE, eq->page_list[i].buf, - pci_unmap_addr(&eq->page_list[i], mapping)); + dma_unmap_addr(&eq->page_list[i], mapping)); kfree(eq->page_list); mthca_free_mailbox(dev, mailbox); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h index 90f4c4d2e98..596acc45569 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/drivers/infiniband/hw/mthca/mthca_provider.h @@ -46,7 +46,7 @@ struct mthca_buf_list { void *buf; - DECLARE_PCI_UNMAP_ADDR(mapping) + DEFINE_DMA_UNMAP_ADDR(mapping); }; union mthca_buf { -- cgit v1.2.3-70-g09d2 From cfdda9d764362ab77b11a410bb928400e6520d57 Mon Sep 17 00:00:00 2001 From: Steve Wise Date: Wed, 21 Apr 2010 15:30:06 -0700 Subject: RDMA/cxgb4: Add driver for Chelsio T4 RNIC Add an RDMA/iWARP driver for Chelsio T4 Ethernet adapters. Signed-off-by: Steve Wise Signed-off-by: Roland Dreier --- drivers/infiniband/Kconfig | 1 + drivers/infiniband/Makefile | 1 + drivers/infiniband/hw/cxgb4/Kconfig | 18 + drivers/infiniband/hw/cxgb4/Makefile | 5 + drivers/infiniband/hw/cxgb4/cm.c | 2330 +++++++++++++++++++++++++++++ drivers/infiniband/hw/cxgb4/cq.c | 882 +++++++++++ drivers/infiniband/hw/cxgb4/device.c | 520 +++++++ drivers/infiniband/hw/cxgb4/ev.c | 193 +++ drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 743 +++++++++ drivers/infiniband/hw/cxgb4/mem.c | 811 ++++++++++ drivers/infiniband/hw/cxgb4/provider.c | 518 +++++++ drivers/infiniband/hw/cxgb4/qp.c | 1577 +++++++++++++++++++ drivers/infiniband/hw/cxgb4/resource.c | 417 ++++++ drivers/infiniband/hw/cxgb4/t4.h | 536 +++++++ drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 829 ++++++++++ drivers/infiniband/hw/cxgb4/user.h | 66 + 16 files changed, 9447 insertions(+) create mode 100644 drivers/infiniband/hw/cxgb4/Kconfig create mode 100644 drivers/infiniband/hw/cxgb4/Makefile create mode 100644 drivers/infiniband/hw/cxgb4/cm.c create mode 100644 drivers/infiniband/hw/cxgb4/cq.c create mode 100644 drivers/infiniband/hw/cxgb4/device.c create mode 100644 drivers/infiniband/hw/cxgb4/ev.c create mode 100644 drivers/infiniband/hw/cxgb4/iw_cxgb4.h create mode 100644 drivers/infiniband/hw/cxgb4/mem.c create mode 100644 drivers/infiniband/hw/cxgb4/provider.c create mode 100644 drivers/infiniband/hw/cxgb4/qp.c create mode 100644 drivers/infiniband/hw/cxgb4/resource.c create mode 100644 drivers/infiniband/hw/cxgb4/t4.h create mode 100644 drivers/infiniband/hw/cxgb4/t4fw_ri_api.h create mode 100644 drivers/infiniband/hw/cxgb4/user.h (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 975adce5f40..330d2a42336 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -46,6 +46,7 @@ source "drivers/infiniband/hw/ipath/Kconfig" source "drivers/infiniband/hw/ehca/Kconfig" source "drivers/infiniband/hw/amso1100/Kconfig" source "drivers/infiniband/hw/cxgb3/Kconfig" +source "drivers/infiniband/hw/cxgb4/Kconfig" source "drivers/infiniband/hw/mlx4/Kconfig" source "drivers/infiniband/hw/nes/Kconfig" diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile index ed35e449624..0c4e589d746 100644 --- a/drivers/infiniband/Makefile +++ b/drivers/infiniband/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/ obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/ obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/ +obj-$(CONFIG_INFINIBAND_CXGB4) += hw/cxgb4/ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/ obj-$(CONFIG_INFINIBAND_NES) += hw/nes/ obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/ diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig new file mode 100644 index 00000000000..ccb85eaaad7 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/Kconfig @@ -0,0 +1,18 @@ +config INFINIBAND_CXGB4 + tristate "Chelsio T4 RDMA Driver" + depends on CHELSIO_T4 && INET + select GENERIC_ALLOCATOR + ---help--- + This is an iWARP/RDMA driver for the Chelsio T4 1GbE and + 10GbE adapters. + + For general information about Chelsio and our products, visit + our website at . + + For customer support, please visit our customer support page at + . + + Please send feedback to . + + To compile this driver as a module, choose M here: the module + will be called iw_cxgb4. diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile new file mode 100644 index 00000000000..e31a499f017 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/Makefile @@ -0,0 +1,5 @@ +EXTRA_CFLAGS += -Idrivers/net/cxgb4 + +obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o + +iw_cxgb4-y := device.o cm.o provider.o mem.o cq.o qp.o resource.o ev.o diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c new file mode 100644 index 00000000000..07b068be0cf --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -0,0 +1,2330 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "iw_cxgb4.h" + +static char *states[] = { + "idle", + "listen", + "connecting", + "mpa_wait_req", + "mpa_req_sent", + "mpa_req_rcvd", + "mpa_rep_sent", + "fpdu_mode", + "aborting", + "closing", + "moribund", + "dead", + NULL, +}; + +static int enable_tcp_timestamps; +module_param(enable_tcp_timestamps, int, 0644); +MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); + +static int enable_tcp_sack; +module_param(enable_tcp_sack, int, 0644); +MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); + +static int enable_tcp_window_scaling = 1; +module_param(enable_tcp_window_scaling, int, 0644); +MODULE_PARM_DESC(enable_tcp_window_scaling, + "Enable tcp window scaling (default=1)"); + +int c4iw_debug; +module_param(c4iw_debug, int, 0644); +MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); + +static int peer2peer; +module_param(peer2peer, int, 0644); +MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); + +static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; +module_param(p2p_type, int, 0644); +MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " + "1=RDMA_READ 0=RDMA_WRITE (default 1)"); + +static int ep_timeout_secs = 60; +module_param(ep_timeout_secs, int, 0644); +MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " + "in seconds (default=60)"); + +static int mpa_rev = 1; +module_param(mpa_rev, int, 0644); +MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " + "1 is spec compliant. (default=1)"); + +static int markers_enabled; +module_param(markers_enabled, int, 0644); +MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); + +static int crc_enabled = 1; +module_param(crc_enabled, int, 0644); +MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); + +static int rcv_win = 256 * 1024; +module_param(rcv_win, int, 0644); +MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); + +static int snd_win = 32 * 1024; +module_param(snd_win, int, 0644); +MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); + +static void process_work(struct work_struct *work); +static struct workqueue_struct *workq; +static DECLARE_WORK(skb_work, process_work); + +static struct sk_buff_head rxq; +static c4iw_handler_func work_handlers[NUM_CPL_CMDS]; +c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; + +static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); +static void ep_timeout(unsigned long arg); +static void connect_reply_upcall(struct c4iw_ep *ep, int status); + +static void start_ep_timer(struct c4iw_ep *ep) +{ + PDBG("%s ep %p\n", __func__, ep); + if (timer_pending(&ep->timer)) { + PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); + del_timer_sync(&ep->timer); + } else + c4iw_get_ep(&ep->com); + ep->timer.expires = jiffies + ep_timeout_secs * HZ; + ep->timer.data = (unsigned long)ep; + ep->timer.function = ep_timeout; + add_timer(&ep->timer); +} + +static void stop_ep_timer(struct c4iw_ep *ep) +{ + PDBG("%s ep %p\n", __func__, ep); + if (!timer_pending(&ep->timer)) { + printk(KERN_ERR "%s timer stopped when its not running! " + "ep %p state %u\n", __func__, ep, ep->com.state); + WARN_ON(1); + return; + } + del_timer_sync(&ep->timer); + c4iw_put_ep(&ep->com); +} + +static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, + struct l2t_entry *l2e) +{ + int error = 0; + + if (c4iw_fatal_error(rdev)) { + kfree_skb(skb); + PDBG("%s - device in error state - dropping\n", __func__); + return -EIO; + } + error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); + if (error < 0) + kfree_skb(skb); + return error; +} + +int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) +{ + int error = 0; + + if (c4iw_fatal_error(rdev)) { + kfree_skb(skb); + PDBG("%s - device in error state - dropping\n", __func__); + return -EIO; + } + error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); + if (error < 0) + kfree_skb(skb); + return error; +} + +static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) +{ + struct cpl_tid_release *req; + + skb = get_skb(skb, sizeof *req, GFP_KERNEL); + if (!skb) + return; + req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, hwtid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); + set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); + c4iw_ofld_send(rdev, skb); + return; +} + +static void set_emss(struct c4iw_ep *ep, u16 opt) +{ + ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; + ep->mss = ep->emss; + if (GET_TCPOPT_TSTAMP(opt)) + ep->emss -= 12; + if (ep->emss < 128) + ep->emss = 128; + PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), + ep->mss, ep->emss); +} + +static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) +{ + unsigned long flags; + enum c4iw_ep_state state; + + spin_lock_irqsave(&epc->lock, flags); + state = epc->state; + spin_unlock_irqrestore(&epc->lock, flags); + return state; +} + +static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) +{ + epc->state = new; +} + +static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) +{ + unsigned long flags; + + spin_lock_irqsave(&epc->lock, flags); + PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); + __state_set(epc, new); + spin_unlock_irqrestore(&epc->lock, flags); + return; +} + +static void *alloc_ep(int size, gfp_t gfp) +{ + struct c4iw_ep_common *epc; + + epc = kzalloc(size, gfp); + if (epc) { + kref_init(&epc->kref); + spin_lock_init(&epc->lock); + init_waitqueue_head(&epc->waitq); + } + PDBG("%s alloc ep %p\n", __func__, epc); + return epc; +} + +void _c4iw_free_ep(struct kref *kref) +{ + struct c4iw_ep *ep; + + ep = container_of(kref, struct c4iw_ep, com.kref); + PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); + if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); + dst_release(ep->dst); + cxgb4_l2t_release(ep->l2t); + } + kfree(ep); +} + +static void release_ep_resources(struct c4iw_ep *ep) +{ + set_bit(RELEASE_RESOURCES, &ep->com.flags); + c4iw_put_ep(&ep->com); +} + +static void process_work(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + struct c4iw_dev *dev; + struct cpl_act_establish *rpl = cplhdr(skb); + unsigned int opcode; + int ret; + + while ((skb = skb_dequeue(&rxq))) { + rpl = cplhdr(skb); + dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); + opcode = rpl->ot.opcode; + + BUG_ON(!work_handlers[opcode]); + ret = work_handlers[opcode](dev, skb); + if (!ret) + kfree_skb(skb); + } +} + +static int status2errno(int status) +{ + switch (status) { + case CPL_ERR_NONE: + return 0; + case CPL_ERR_CONN_RESET: + return -ECONNRESET; + case CPL_ERR_ARP_MISS: + return -EHOSTUNREACH; + case CPL_ERR_CONN_TIMEDOUT: + return -ETIMEDOUT; + case CPL_ERR_TCAM_FULL: + return -ENOMEM; + case CPL_ERR_CONN_EXIST: + return -EADDRINUSE; + default: + return -EIO; + } +} + +/* + * Try and reuse skbs already allocated... + */ +static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) +{ + if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { + skb_trim(skb, 0); + skb_get(skb); + skb_reset_transport_header(skb); + } else { + skb = alloc_skb(len, gfp); + } + return skb; +} + +static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, + __be32 peer_ip, __be16 local_port, + __be16 peer_port, u8 tos) +{ + struct rtable *rt; + struct flowi fl = { + .oif = 0, + .nl_u = { + .ip4_u = { + .daddr = peer_ip, + .saddr = local_ip, + .tos = tos} + }, + .proto = IPPROTO_TCP, + .uli_u = { + .ports = { + .sport = local_port, + .dport = peer_port} + } + }; + + if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0)) + return NULL; + return rt; +} + +static void arp_failure_discard(void *handle, struct sk_buff *skb) +{ + PDBG("%s c4iw_dev %p\n", __func__, handle); + kfree_skb(skb); +} + +/* + * Handle an ARP failure for an active open. + */ +static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) +{ + printk(KERN_ERR MOD "ARP failure duing connect\n"); + kfree_skb(skb); +} + +/* + * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant + * and send it along. + */ +static void abort_arp_failure(void *handle, struct sk_buff *skb) +{ + struct c4iw_rdev *rdev = handle; + struct cpl_abort_req *req = cplhdr(skb); + + PDBG("%s rdev %p\n", __func__, rdev); + req->cmd = CPL_ABORT_NO_RST; + c4iw_ofld_send(rdev, skb); +} + +static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) +{ + unsigned int flowclen = 80; + struct fw_flowc_wr *flowc; + int i; + + skb = get_skb(skb, flowclen, GFP_KERNEL); + flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); + + flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | + FW_FLOWC_WR_NPARAMS(8)); + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, + 16)) | FW_WR_FLOWID(ep->hwtid)); + + flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; + flowc->mnemval[0].val = cpu_to_be32(0); + flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; + flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); + flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; + flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); + flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; + flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); + flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; + flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); + flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; + flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); + flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; + flowc->mnemval[6].val = cpu_to_be32(snd_win); + flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; + flowc->mnemval[7].val = cpu_to_be32(ep->emss); + /* Pad WR to 16 byte boundary */ + flowc->mnemval[8].mnemonic = 0; + flowc->mnemval[8].val = 0; + for (i = 0; i < 9; i++) { + flowc->mnemval[i].r4[0] = 0; + flowc->mnemval[i].r4[1] = 0; + flowc->mnemval[i].r4[2] = 0; + } + + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + c4iw_ofld_send(&ep->com.dev->rdev, skb); +} + +static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) +{ + struct cpl_close_con_req *req; + struct sk_buff *skb; + int wrlen = roundup(sizeof *req, 16); + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + skb = get_skb(NULL, wrlen, gfp); + if (!skb) { + printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); + return -ENOMEM; + } + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + t4_set_arp_err_handler(skb, NULL, arp_failure_discard); + req = (struct cpl_close_con_req *) skb_put(skb, wrlen); + memset(req, 0, wrlen); + INIT_TP_WR(req, ep->hwtid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, + ep->hwtid)); + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); +} + +static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) +{ + struct cpl_abort_req *req; + int wrlen = roundup(sizeof *req, 16); + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + skb = get_skb(skb, wrlen, gfp); + if (!skb) { + printk(KERN_ERR MOD "%s - failed to alloc skb.\n", + __func__); + return -ENOMEM; + } + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); + req = (struct cpl_abort_req *) skb_put(skb, wrlen); + memset(req, 0, wrlen); + INIT_TP_WR(req, ep->hwtid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); + req->cmd = CPL_ABORT_SEND_RST; + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); +} + +static int send_connect(struct c4iw_ep *ep) +{ + struct cpl_act_open_req *req; + struct sk_buff *skb; + u64 opt0; + u32 opt2; + unsigned int mtu_idx; + int wscale; + int wrlen = roundup(sizeof *req, 16); + + PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); + + skb = get_skb(NULL, wrlen, GFP_KERNEL); + if (!skb) { + printk(KERN_ERR MOD "%s - failed to alloc skb.\n", + __func__); + return -ENOMEM; + } + set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); + + cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); + wscale = compute_wscale(rcv_win); + opt0 = KEEP_ALIVE(1) | + WND_SCALE(wscale) | + MSS_IDX(mtu_idx) | + L2T_IDX(ep->l2t->idx) | + TX_CHAN(ep->tx_chan) | + SMAC_SEL(ep->smac_idx) | + DSCP(ep->tos) | + RCV_BUFSIZ(rcv_win>>10); + opt2 = RX_CHANNEL(0) | + RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); + if (enable_tcp_timestamps) + opt2 |= TSTAMPS_EN(1); + if (enable_tcp_sack) + opt2 |= SACK_EN(1); + if (wscale && enable_tcp_window_scaling) + opt2 |= WND_SCALE_EN(1); + t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); + + req = (struct cpl_act_open_req *) skb_put(skb, wrlen); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32( + MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); + req->local_port = ep->com.local_addr.sin_port; + req->peer_port = ep->com.remote_addr.sin_port; + req->local_ip = ep->com.local_addr.sin_addr.s_addr; + req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; + req->opt0 = cpu_to_be64(opt0); + req->params = 0; + req->opt2 = cpu_to_be32(opt2); + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); +} + +static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb) +{ + int mpalen, wrlen; + struct fw_ofld_tx_data_wr *req; + struct mpa_message *mpa; + + PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); + + BUG_ON(skb_cloned(skb)); + + mpalen = sizeof(*mpa) + ep->plen; + wrlen = roundup(mpalen + sizeof *req, 16); + skb = get_skb(skb, wrlen, GFP_KERNEL); + if (!skb) { + connect_reply_upcall(ep, -ENOMEM); + return; + } + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + + req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); + memset(req, 0, wrlen); + req->op_to_immdlen = cpu_to_be32( + FW_WR_OP(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL(1) | + FW_WR_IMMDLEN(mpalen)); + req->flowid_len16 = cpu_to_be32( + FW_WR_FLOWID(ep->hwtid) | + FW_WR_LEN16(wrlen >> 4)); + req->plen = cpu_to_be32(mpalen); + req->tunnel_to_proxy = cpu_to_be32( + FW_OFLD_TX_DATA_WR_FLUSH(1) | + FW_OFLD_TX_DATA_WR_SHOVE(1)); + + mpa = (struct mpa_message *)(req + 1); + memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); + mpa->flags = (crc_enabled ? MPA_CRC : 0) | + (markers_enabled ? MPA_MARKERS : 0); + mpa->private_data_size = htons(ep->plen); + mpa->revision = mpa_rev; + + if (ep->plen) + memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); + + /* + * Reference the mpa skb. This ensures the data area + * will remain in memory until the hw acks the tx. + * Function fw4_ack() will deref it. + */ + skb_get(skb); + t4_set_arp_err_handler(skb, NULL, arp_failure_discard); + BUG_ON(ep->mpa_skb); + ep->mpa_skb = skb; + c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); + start_ep_timer(ep); + state_set(&ep->com, MPA_REQ_SENT); + ep->mpa_attr.initiator = 1; + return; +} + +static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) +{ + int mpalen, wrlen; + struct fw_ofld_tx_data_wr *req; + struct mpa_message *mpa; + struct sk_buff *skb; + + PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); + + mpalen = sizeof(*mpa) + plen; + wrlen = roundup(mpalen + sizeof *req, 16); + + skb = get_skb(NULL, wrlen, GFP_KERNEL); + if (!skb) { + printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); + return -ENOMEM; + } + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + + req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); + memset(req, 0, wrlen); + req->op_to_immdlen = cpu_to_be32( + FW_WR_OP(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL(1) | + FW_WR_IMMDLEN(mpalen)); + req->flowid_len16 = cpu_to_be32( + FW_WR_FLOWID(ep->hwtid) | + FW_WR_LEN16(wrlen >> 4)); + req->plen = cpu_to_be32(mpalen); + req->tunnel_to_proxy = cpu_to_be32( + FW_OFLD_TX_DATA_WR_FLUSH(1) | + FW_OFLD_TX_DATA_WR_SHOVE(1)); + + mpa = (struct mpa_message *)(req + 1); + memset(mpa, 0, sizeof(*mpa)); + memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); + mpa->flags = MPA_REJECT; + mpa->revision = mpa_rev; + mpa->private_data_size = htons(plen); + if (plen) + memcpy(mpa->private_data, pdata, plen); + + /* + * Reference the mpa skb again. This ensures the data area + * will remain in memory until the hw acks the tx. + * Function fw4_ack() will deref it. + */ + skb_get(skb); + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + t4_set_arp_err_handler(skb, NULL, arp_failure_discard); + BUG_ON(ep->mpa_skb); + ep->mpa_skb = skb; + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); +} + +static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) +{ + int mpalen, wrlen; + struct fw_ofld_tx_data_wr *req; + struct mpa_message *mpa; + struct sk_buff *skb; + + PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); + + mpalen = sizeof(*mpa) + plen; + wrlen = roundup(mpalen + sizeof *req, 16); + + skb = get_skb(NULL, wrlen, GFP_KERNEL); + if (!skb) { + printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); + return -ENOMEM; + } + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + + req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); + memset(req, 0, wrlen); + req->op_to_immdlen = cpu_to_be32( + FW_WR_OP(FW_OFLD_TX_DATA_WR) | + FW_WR_COMPL(1) | + FW_WR_IMMDLEN(mpalen)); + req->flowid_len16 = cpu_to_be32( + FW_WR_FLOWID(ep->hwtid) | + FW_WR_LEN16(wrlen >> 4)); + req->plen = cpu_to_be32(mpalen); + req->tunnel_to_proxy = cpu_to_be32( + FW_OFLD_TX_DATA_WR_FLUSH(1) | + FW_OFLD_TX_DATA_WR_SHOVE(1)); + + mpa = (struct mpa_message *)(req + 1); + memset(mpa, 0, sizeof(*mpa)); + memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); + mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | + (markers_enabled ? MPA_MARKERS : 0); + mpa->revision = mpa_rev; + mpa->private_data_size = htons(plen); + if (plen) + memcpy(mpa->private_data, pdata, plen); + + /* + * Reference the mpa skb. This ensures the data area + * will remain in memory until the hw acks the tx. + * Function fw4_ack() will deref it. + */ + skb_get(skb); + t4_set_arp_err_handler(skb, NULL, arp_failure_discard); + ep->mpa_skb = skb; + state_set(&ep->com, MPA_REP_SENT); + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); +} + +static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct cpl_act_establish *req = cplhdr(skb); + unsigned int tid = GET_TID(req); + unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); + struct tid_info *t = dev->rdev.lldi.tids; + + ep = lookup_atid(t, atid); + + PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, + be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); + + dst_confirm(ep->dst); + + /* setup the hwtid for this connection */ + ep->hwtid = tid; + cxgb4_insert_tid(t, ep, tid); + + ep->snd_seq = be32_to_cpu(req->snd_isn); + ep->rcv_seq = be32_to_cpu(req->rcv_isn); + + set_emss(ep, ntohs(req->tcp_opt)); + + /* dealloc the atid */ + cxgb4_free_atid(t, atid); + + /* start MPA negotiation */ + send_flowc(ep, NULL); + send_mpa_req(ep, skb); + + return 0; +} + +static void close_complete_upcall(struct c4iw_ep *ep) +{ + struct iw_cm_event event; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + memset(&event, 0, sizeof(event)); + event.event = IW_CM_EVENT_CLOSE; + if (ep->com.cm_id) { + PDBG("close complete delivered ep %p cm_id %p tid %u\n", + ep, ep->com.cm_id, ep->hwtid); + ep->com.cm_id->event_handler(ep->com.cm_id, &event); + ep->com.cm_id->rem_ref(ep->com.cm_id); + ep->com.cm_id = NULL; + ep->com.qp = NULL; + } +} + +static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) +{ + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + close_complete_upcall(ep); + state_set(&ep->com, ABORTING); + return send_abort(ep, skb, gfp); +} + +static void peer_close_upcall(struct c4iw_ep *ep) +{ + struct iw_cm_event event; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + memset(&event, 0, sizeof(event)); + event.event = IW_CM_EVENT_DISCONNECT; + if (ep->com.cm_id) { + PDBG("peer close delivered ep %p cm_id %p tid %u\n", + ep, ep->com.cm_id, ep->hwtid); + ep->com.cm_id->event_handler(ep->com.cm_id, &event); + } +} + +static void peer_abort_upcall(struct c4iw_ep *ep) +{ + struct iw_cm_event event; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + memset(&event, 0, sizeof(event)); + event.event = IW_CM_EVENT_CLOSE; + event.status = -ECONNRESET; + if (ep->com.cm_id) { + PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, + ep->com.cm_id, ep->hwtid); + ep->com.cm_id->event_handler(ep->com.cm_id, &event); + ep->com.cm_id->rem_ref(ep->com.cm_id); + ep->com.cm_id = NULL; + ep->com.qp = NULL; + } +} + +static void connect_reply_upcall(struct c4iw_ep *ep, int status) +{ + struct iw_cm_event event; + + PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); + memset(&event, 0, sizeof(event)); + event.event = IW_CM_EVENT_CONNECT_REPLY; + event.status = status; + event.local_addr = ep->com.local_addr; + event.remote_addr = ep->com.remote_addr; + + if ((status == 0) || (status == -ECONNREFUSED)) { + event.private_data_len = ep->plen; + event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); + } + if (ep->com.cm_id) { + PDBG("%s ep %p tid %u status %d\n", __func__, ep, + ep->hwtid, status); + ep->com.cm_id->event_handler(ep->com.cm_id, &event); + } + if (status < 0) { + ep->com.cm_id->rem_ref(ep->com.cm_id); + ep->com.cm_id = NULL; + ep->com.qp = NULL; + } +} + +static void connect_request_upcall(struct c4iw_ep *ep) +{ + struct iw_cm_event event; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + memset(&event, 0, sizeof(event)); + event.event = IW_CM_EVENT_CONNECT_REQUEST; + event.local_addr = ep->com.local_addr; + event.remote_addr = ep->com.remote_addr; + event.private_data_len = ep->plen; + event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); + event.provider_data = ep; + if (state_read(&ep->parent_ep->com) != DEAD) { + c4iw_get_ep(&ep->com); + ep->parent_ep->com.cm_id->event_handler( + ep->parent_ep->com.cm_id, + &event); + } + c4iw_put_ep(&ep->parent_ep->com); + ep->parent_ep = NULL; +} + +static void established_upcall(struct c4iw_ep *ep) +{ + struct iw_cm_event event; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + memset(&event, 0, sizeof(event)); + event.event = IW_CM_EVENT_ESTABLISHED; + if (ep->com.cm_id) { + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + ep->com.cm_id->event_handler(ep->com.cm_id, &event); + } +} + +static int update_rx_credits(struct c4iw_ep *ep, u32 credits) +{ + struct cpl_rx_data_ack *req; + struct sk_buff *skb; + int wrlen = roundup(sizeof *req, 16); + + PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); + skb = get_skb(NULL, wrlen, GFP_KERNEL); + if (!skb) { + printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); + return 0; + } + + req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); + memset(req, 0, wrlen); + INIT_TP_WR(req, ep->hwtid); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, + ep->hwtid)); + req->credit_dack = cpu_to_be32(credits); + set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx); + c4iw_ofld_send(&ep->com.dev->rdev, skb); + return credits; +} + +static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) +{ + struct mpa_message *mpa; + u16 plen; + struct c4iw_qp_attributes attrs; + enum c4iw_qp_attr_mask mask; + int err; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + + /* + * Stop mpa timer. If it expired, then the state has + * changed and we bail since ep_timeout already aborted + * the connection. + */ + stop_ep_timer(ep); + if (state_read(&ep->com) != MPA_REQ_SENT) + return; + + /* + * If we get more than the supported amount of private data + * then we must fail this connection. + */ + if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { + err = -EINVAL; + goto err; + } + + /* + * copy the new data into our accumulation buffer. + */ + skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), + skb->len); + ep->mpa_pkt_len += skb->len; + + /* + * if we don't even have the mpa message, then bail. + */ + if (ep->mpa_pkt_len < sizeof(*mpa)) + return; + mpa = (struct mpa_message *) ep->mpa_pkt; + + /* Validate MPA header. */ + if (mpa->revision != mpa_rev) { + err = -EPROTO; + goto err; + } + if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { + err = -EPROTO; + goto err; + } + + plen = ntohs(mpa->private_data_size); + + /* + * Fail if there's too much private data. + */ + if (plen > MPA_MAX_PRIVATE_DATA) { + err = -EPROTO; + goto err; + } + + /* + * If plen does not account for pkt size + */ + if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { + err = -EPROTO; + goto err; + } + + ep->plen = (u8) plen; + + /* + * If we don't have all the pdata yet, then bail. + * We'll continue process when more data arrives. + */ + if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) + return; + + if (mpa->flags & MPA_REJECT) { + err = -ECONNREFUSED; + goto err; + } + + /* + * If we get here we have accumulated the entire mpa + * start reply message including private data. And + * the MPA header is valid. + */ + state_set(&ep->com, FPDU_MODE); + ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; + ep->mpa_attr.recv_marker_enabled = markers_enabled; + ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; + ep->mpa_attr.version = mpa_rev; + ep->mpa_attr.p2p_type = peer2peer ? p2p_type : + FW_RI_INIT_P2PTYPE_DISABLED; + PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " + "xmit_marker_enabled=%d, version=%d\n", __func__, + ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, + ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); + + attrs.mpa_attr = ep->mpa_attr; + attrs.max_ird = ep->ird; + attrs.max_ord = ep->ord; + attrs.llp_stream_handle = ep; + attrs.next_state = C4IW_QP_STATE_RTS; + + mask = C4IW_QP_ATTR_NEXT_STATE | + C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | + C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; + + /* bind QP and TID with INIT_WR */ + err = c4iw_modify_qp(ep->com.qp->rhp, + ep->com.qp, mask, &attrs, 1); + if (err) + goto err; + goto out; +err: + abort_connection(ep, skb, GFP_KERNEL); +out: + connect_reply_upcall(ep, err); + return; +} + +static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) +{ + struct mpa_message *mpa; + u16 plen; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + + if (state_read(&ep->com) != MPA_REQ_WAIT) + return; + + /* + * If we get more than the supported amount of private data + * then we must fail this connection. + */ + if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { + stop_ep_timer(ep); + abort_connection(ep, skb, GFP_KERNEL); + return; + } + + PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); + + /* + * Copy the new data into our accumulation buffer. + */ + skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), + skb->len); + ep->mpa_pkt_len += skb->len; + + /* + * If we don't even have the mpa message, then bail. + * We'll continue process when more data arrives. + */ + if (ep->mpa_pkt_len < sizeof(*mpa)) + return; + + PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); + stop_ep_timer(ep); + mpa = (struct mpa_message *) ep->mpa_pkt; + + /* + * Validate MPA Header. + */ + if (mpa->revision != mpa_rev) { + abort_connection(ep, skb, GFP_KERNEL); + return; + } + + if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { + abort_connection(ep, skb, GFP_KERNEL); + return; + } + + plen = ntohs(mpa->private_data_size); + + /* + * Fail if there's too much private data. + */ + if (plen > MPA_MAX_PRIVATE_DATA) { + abort_connection(ep, skb, GFP_KERNEL); + return; + } + + /* + * If plen does not account for pkt size + */ + if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { + abort_connection(ep, skb, GFP_KERNEL); + return; + } + ep->plen = (u8) plen; + + /* + * If we don't have all the pdata yet, then bail. + */ + if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) + return; + + /* + * If we get here we have accumulated the entire mpa + * start reply message including private data. + */ + ep->mpa_attr.initiator = 0; + ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; + ep->mpa_attr.recv_marker_enabled = markers_enabled; + ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; + ep->mpa_attr.version = mpa_rev; + ep->mpa_attr.p2p_type = peer2peer ? p2p_type : + FW_RI_INIT_P2PTYPE_DISABLED; + PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " + "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, + ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, + ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, + ep->mpa_attr.p2p_type); + + state_set(&ep->com, MPA_REQ_RCVD); + + /* drive upcall */ + connect_request_upcall(ep); + return; +} + +static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct cpl_rx_data *hdr = cplhdr(skb); + unsigned int dlen = ntohs(hdr->len); + unsigned int tid = GET_TID(hdr); + struct tid_info *t = dev->rdev.lldi.tids; + + ep = lookup_tid(t, tid); + PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); + skb_pull(skb, sizeof(*hdr)); + skb_trim(skb, dlen); + + ep->rcv_seq += dlen; + BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); + + /* update RX credits */ + update_rx_credits(ep, dlen); + + switch (state_read(&ep->com)) { + case MPA_REQ_SENT: + process_mpa_reply(ep, skb); + break; + case MPA_REQ_WAIT: + process_mpa_request(ep, skb); + break; + case MPA_REP_SENT: + break; + default: + printk(KERN_ERR MOD "%s Unexpected streaming data." + " ep %p state %d tid %u\n", + __func__, ep, state_read(&ep->com), ep->hwtid); + + /* + * The ep will timeout and inform the ULP of the failure. + * See ep_timeout(). + */ + break; + } + return 0; +} + +static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct cpl_abort_rpl_rss *rpl = cplhdr(skb); + unsigned long flags; + int release = 0; + unsigned int tid = GET_TID(rpl); + struct tid_info *t = dev->rdev.lldi.tids; + + ep = lookup_tid(t, tid); + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + BUG_ON(!ep); + spin_lock_irqsave(&ep->com.lock, flags); + switch (ep->com.state) { + case ABORTING: + __state_set(&ep->com, DEAD); + release = 1; + break; + default: + printk(KERN_ERR "%s ep %p state %d\n", + __func__, ep, ep->com.state); + break; + } + spin_unlock_irqrestore(&ep->com.lock, flags); + + if (release) + release_ep_resources(ep); + return 0; +} + +/* + * Return whether a failed active open has allocated a TID + */ +static inline int act_open_has_tid(int status) +{ + return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && + status != CPL_ERR_ARP_MISS; +} + +static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct cpl_act_open_rpl *rpl = cplhdr(skb); + unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( + ntohl(rpl->atid_status))); + struct tid_info *t = dev->rdev.lldi.tids; + int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); + + ep = lookup_atid(t, atid); + + PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, + status, status2errno(status)); + + if (status == CPL_ERR_RTX_NEG_ADVICE) { + printk(KERN_WARNING MOD "Connection problems for atid %u\n", + atid); + return 0; + } + + connect_reply_upcall(ep, status2errno(status)); + state_set(&ep->com, DEAD); + + if (status && act_open_has_tid(status)) + cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); + + cxgb4_free_atid(t, atid); + dst_release(ep->dst); + cxgb4_l2t_release(ep->l2t); + c4iw_put_ep(&ep->com); + + return 0; +} + +static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct cpl_pass_open_rpl *rpl = cplhdr(skb); + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int stid = GET_TID(rpl); + struct c4iw_listen_ep *ep = lookup_stid(t, stid); + + if (!ep) { + printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); + return 0; + } + PDBG("%s ep %p status %d error %d\n", __func__, ep, + rpl->status, status2errno(rpl->status)); + ep->com.rpl_err = status2errno(rpl->status); + ep->com.rpl_done = 1; + wake_up(&ep->com.waitq); + + return 0; +} + +static int listen_stop(struct c4iw_listen_ep *ep) +{ + struct sk_buff *skb; + struct cpl_close_listsvr_req *req; + + PDBG("%s ep %p\n", __func__, ep); + skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); + if (!skb) { + printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); + return -ENOMEM; + } + req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); + INIT_TP_WR(req, 0); + OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, + ep->stid)); + req->reply_ctrl = cpu_to_be16( + QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); + set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); + return c4iw_ofld_send(&ep->com.dev->rdev, skb); +} + +static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int stid = GET_TID(rpl); + struct c4iw_listen_ep *ep = lookup_stid(t, stid); + + PDBG("%s ep %p\n", __func__, ep); + ep->com.rpl_err = status2errno(rpl->status); + ep->com.rpl_done = 1; + wake_up(&ep->com.waitq); + return 0; +} + +static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, + struct cpl_pass_accept_req *req) +{ + struct cpl_pass_accept_rpl *rpl; + unsigned int mtu_idx; + u64 opt0; + u32 opt2; + int wscale; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + BUG_ON(skb_cloned(skb)); + skb_trim(skb, sizeof(*rpl)); + skb_get(skb); + cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); + wscale = compute_wscale(rcv_win); + opt0 = KEEP_ALIVE(1) | + WND_SCALE(wscale) | + MSS_IDX(mtu_idx) | + L2T_IDX(ep->l2t->idx) | + TX_CHAN(ep->tx_chan) | + SMAC_SEL(ep->smac_idx) | + DSCP(ep->tos) | + RCV_BUFSIZ(rcv_win>>10); + opt2 = RX_CHANNEL(0) | + RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); + + if (enable_tcp_timestamps && req->tcpopt.tstamp) + opt2 |= TSTAMPS_EN(1); + if (enable_tcp_sack && req->tcpopt.sack) + opt2 |= SACK_EN(1); + if (wscale && enable_tcp_window_scaling) + opt2 |= WND_SCALE_EN(1); + + rpl = cplhdr(skb); + INIT_TP_WR(rpl, ep->hwtid); + OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, + ep->hwtid)); + rpl->opt0 = cpu_to_be64(opt0); + rpl->opt2 = cpu_to_be32(opt2); + set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); + c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); + + return; +} + +static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, + struct sk_buff *skb) +{ + PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, + peer_ip); + BUG_ON(skb_cloned(skb)); + skb_trim(skb, sizeof(struct cpl_tid_release)); + skb_get(skb); + release_tid(&dev->rdev, hwtid, skb); + return; +} + +static void get_4tuple(struct cpl_pass_accept_req *req, + __be32 *local_ip, __be32 *peer_ip, + __be16 *local_port, __be16 *peer_port) +{ + int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); + int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); + struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); + struct tcphdr *tcp = (struct tcphdr *) + ((u8 *)(req + 1) + eth_len + ip_len); + + PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, + ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), + ntohs(tcp->dest)); + + *peer_ip = ip->saddr; + *local_ip = ip->daddr; + *peer_port = tcp->source; + *local_port = tcp->dest; + + return; +} + +static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *child_ep, *parent_ep; + struct cpl_pass_accept_req *req = cplhdr(skb); + unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int hwtid = GET_TID(req); + struct dst_entry *dst; + struct l2t_entry *l2t; + struct rtable *rt; + __be32 local_ip, peer_ip; + __be16 local_port, peer_port; + struct net_device *pdev; + u32 tx_chan, smac_idx; + u16 rss_qid; + u32 mtu; + int step; + int txq_idx; + + parent_ep = lookup_stid(t, stid); + PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); + + get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); + + if (state_read(&parent_ep->com) != LISTEN) { + printk(KERN_ERR "%s - listening ep not in LISTEN\n", + __func__); + goto reject; + } + + /* Find output route */ + rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, + GET_POPEN_TOS(ntohl(req->tos_stid))); + if (!rt) { + printk(KERN_ERR MOD "%s - failed to find dst entry!\n", + __func__); + goto reject; + } + dst = &rt->u.dst; + if (dst->neighbour->dev->flags & IFF_LOOPBACK) { + pdev = ip_dev_find(&init_net, peer_ip); + BUG_ON(!pdev); + l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, + pdev, 0); + mtu = pdev->mtu; + tx_chan = cxgb4_port_chan(pdev); + smac_idx = tx_chan << 1; + step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; + txq_idx = cxgb4_port_idx(pdev) * step; + step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; + rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; + dev_put(pdev); + } else { + l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour, + dst->neighbour->dev, 0); + mtu = dst_mtu(dst); + tx_chan = cxgb4_port_chan(dst->neighbour->dev); + smac_idx = tx_chan << 1; + step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; + txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; + step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; + rss_qid = dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(dst->neighbour->dev) * step]; + } + if (!l2t) { + printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", + __func__); + dst_release(dst); + goto reject; + } + + child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); + if (!child_ep) { + printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", + __func__); + cxgb4_l2t_release(l2t); + dst_release(dst); + goto reject; + } + state_set(&child_ep->com, CONNECTING); + child_ep->com.dev = dev; + child_ep->com.cm_id = NULL; + child_ep->com.local_addr.sin_family = PF_INET; + child_ep->com.local_addr.sin_port = local_port; + child_ep->com.local_addr.sin_addr.s_addr = local_ip; + child_ep->com.remote_addr.sin_family = PF_INET; + child_ep->com.remote_addr.sin_port = peer_port; + child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; + c4iw_get_ep(&parent_ep->com); + child_ep->parent_ep = parent_ep; + child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); + child_ep->l2t = l2t; + child_ep->dst = dst; + child_ep->hwtid = hwtid; + child_ep->tx_chan = tx_chan; + child_ep->smac_idx = smac_idx; + child_ep->rss_qid = rss_qid; + child_ep->mtu = mtu; + child_ep->txq_idx = txq_idx; + + PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, + tx_chan, smac_idx, rss_qid); + + init_timer(&child_ep->timer); + cxgb4_insert_tid(t, child_ep, hwtid); + accept_cr(child_ep, peer_ip, skb, req); + goto out; +reject: + reject_cr(dev, hwtid, peer_ip, skb); +out: + return 0; +} + +static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct cpl_pass_establish *req = cplhdr(skb); + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int tid = GET_TID(req); + + ep = lookup_tid(t, tid); + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + ep->snd_seq = be32_to_cpu(req->snd_isn); + ep->rcv_seq = be32_to_cpu(req->rcv_isn); + + set_emss(ep, ntohs(req->tcp_opt)); + + dst_confirm(ep->dst); + state_set(&ep->com, MPA_REQ_WAIT); + start_ep_timer(ep); + send_flowc(ep, skb); + + return 0; +} + +static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct cpl_peer_close *hdr = cplhdr(skb); + struct c4iw_ep *ep; + struct c4iw_qp_attributes attrs; + unsigned long flags; + int disconnect = 1; + int release = 0; + int closing = 0; + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int tid = GET_TID(hdr); + int start_timer = 0; + int stop_timer = 0; + + ep = lookup_tid(t, tid); + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + dst_confirm(ep->dst); + + spin_lock_irqsave(&ep->com.lock, flags); + switch (ep->com.state) { + case MPA_REQ_WAIT: + __state_set(&ep->com, CLOSING); + break; + case MPA_REQ_SENT: + __state_set(&ep->com, CLOSING); + connect_reply_upcall(ep, -ECONNRESET); + break; + case MPA_REQ_RCVD: + + /* + * We're gonna mark this puppy DEAD, but keep + * the reference on it until the ULP accepts or + * rejects the CR. Also wake up anyone waiting + * in rdma connection migration (see c4iw_accept_cr()). + */ + __state_set(&ep->com, CLOSING); + ep->com.rpl_done = 1; + ep->com.rpl_err = -ECONNRESET; + PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); + wake_up(&ep->com.waitq); + break; + case MPA_REP_SENT: + __state_set(&ep->com, CLOSING); + ep->com.rpl_done = 1; + ep->com.rpl_err = -ECONNRESET; + PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); + wake_up(&ep->com.waitq); + break; + case FPDU_MODE: + start_timer = 1; + __state_set(&ep->com, CLOSING); + closing = 1; + peer_close_upcall(ep); + break; + case ABORTING: + disconnect = 0; + break; + case CLOSING: + __state_set(&ep->com, MORIBUND); + disconnect = 0; + break; + case MORIBUND: + stop_timer = 1; + if (ep->com.cm_id && ep->com.qp) { + attrs.next_state = C4IW_QP_STATE_IDLE; + c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); + } + close_complete_upcall(ep); + __state_set(&ep->com, DEAD); + release = 1; + disconnect = 0; + break; + case DEAD: + disconnect = 0; + break; + default: + BUG_ON(1); + } + spin_unlock_irqrestore(&ep->com.lock, flags); + if (closing) { + attrs.next_state = C4IW_QP_STATE_CLOSING; + c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); + } + if (start_timer) + start_ep_timer(ep); + if (stop_timer) + stop_ep_timer(ep); + if (disconnect) + c4iw_ep_disconnect(ep, 0, GFP_KERNEL); + if (release) + release_ep_resources(ep); + return 0; +} + +/* + * Returns whether an ABORT_REQ_RSS message is a negative advice. + */ +static int is_neg_adv_abort(unsigned int status) +{ + return status == CPL_ERR_RTX_NEG_ADVICE || + status == CPL_ERR_PERSIST_NEG_ADVICE; +} + +static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct cpl_abort_req_rss *req = cplhdr(skb); + struct c4iw_ep *ep; + struct cpl_abort_rpl *rpl; + struct sk_buff *rpl_skb; + struct c4iw_qp_attributes attrs; + int ret; + int release = 0; + unsigned long flags; + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int tid = GET_TID(req); + int stop_timer = 0; + + ep = lookup_tid(t, tid); + if (is_neg_adv_abort(req->status)) { + PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, + ep->hwtid); + return 0; + } + spin_lock_irqsave(&ep->com.lock, flags); + PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, + ep->com.state); + switch (ep->com.state) { + case CONNECTING: + break; + case MPA_REQ_WAIT: + stop_timer = 1; + break; + case MPA_REQ_SENT: + stop_timer = 1; + connect_reply_upcall(ep, -ECONNRESET); + break; + case MPA_REP_SENT: + ep->com.rpl_done = 1; + ep->com.rpl_err = -ECONNRESET; + PDBG("waking up ep %p\n", ep); + wake_up(&ep->com.waitq); + break; + case MPA_REQ_RCVD: + + /* + * We're gonna mark this puppy DEAD, but keep + * the reference on it until the ULP accepts or + * rejects the CR. Also wake up anyone waiting + * in rdma connection migration (see c4iw_accept_cr()). + */ + ep->com.rpl_done = 1; + ep->com.rpl_err = -ECONNRESET; + PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); + wake_up(&ep->com.waitq); + break; + case MORIBUND: + case CLOSING: + stop_timer = 1; + /*FALLTHROUGH*/ + case FPDU_MODE: + if (ep->com.cm_id && ep->com.qp) { + attrs.next_state = C4IW_QP_STATE_ERROR; + ret = c4iw_modify_qp(ep->com.qp->rhp, + ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, + &attrs, 1); + if (ret) + printk(KERN_ERR MOD + "%s - qp <- error failed!\n", + __func__); + } + peer_abort_upcall(ep); + break; + case ABORTING: + break; + case DEAD: + PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); + spin_unlock_irqrestore(&ep->com.lock, flags); + return 0; + default: + BUG_ON(1); + break; + } + dst_confirm(ep->dst); + if (ep->com.state != ABORTING) { + __state_set(&ep->com, DEAD); + release = 1; + } + spin_unlock_irqrestore(&ep->com.lock, flags); + + rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); + if (!rpl_skb) { + printk(KERN_ERR MOD "%s - cannot allocate skb!\n", + __func__); + release = 1; + goto out; + } + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); + rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); + INIT_TP_WR(rpl, ep->hwtid); + OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); + rpl->cmd = CPL_ABORT_NO_RST; + c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); +out: + if (stop_timer) + stop_ep_timer(ep); + if (release) + release_ep_resources(ep); + return 0; +} + +static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct c4iw_qp_attributes attrs; + struct cpl_close_con_rpl *rpl = cplhdr(skb); + unsigned long flags; + int release = 0; + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int tid = GET_TID(rpl); + int stop_timer = 0; + + ep = lookup_tid(t, tid); + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + BUG_ON(!ep); + + /* The cm_id may be null if we failed to connect */ + spin_lock_irqsave(&ep->com.lock, flags); + switch (ep->com.state) { + case CLOSING: + __state_set(&ep->com, MORIBUND); + break; + case MORIBUND: + stop_timer = 1; + if ((ep->com.cm_id) && (ep->com.qp)) { + attrs.next_state = C4IW_QP_STATE_IDLE; + c4iw_modify_qp(ep->com.qp->rhp, + ep->com.qp, + C4IW_QP_ATTR_NEXT_STATE, + &attrs, 1); + } + close_complete_upcall(ep); + __state_set(&ep->com, DEAD); + release = 1; + break; + case ABORTING: + case DEAD: + break; + default: + BUG_ON(1); + break; + } + spin_unlock_irqrestore(&ep->com.lock, flags); + if (stop_timer) + stop_ep_timer(ep); + if (release) + release_ep_resources(ep); + return 0; +} + +static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct cpl_rdma_terminate *term = cplhdr(skb); + struct tid_info *t = dev->rdev.lldi.tids; + unsigned int tid = GET_TID(term); + + ep = lookup_tid(t, tid); + + if (state_read(&ep->com) != FPDU_MODE) + return 0; + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + skb_pull(skb, sizeof *term); + PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); + skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, + skb->len); + ep->com.qp->attr.terminate_msg_len = skb->len; + ep->com.qp->attr.is_terminate_local = 0; + return 0; +} + +/* + * Upcall from the adapter indicating data has been transmitted. + * For us its just the single MPA request or reply. We can now free + * the skb holding the mpa message. + */ +static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + struct cpl_fw4_ack *hdr = cplhdr(skb); + u8 credits = hdr->credits; + unsigned int tid = GET_TID(hdr); + struct tid_info *t = dev->rdev.lldi.tids; + + + ep = lookup_tid(t, tid); + PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); + if (credits == 0) { + PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n", + __func__, ep, ep->hwtid, state_read(&ep->com)); + return 0; + } + + dst_confirm(ep->dst); + if (ep->mpa_skb) { + PDBG("%s last streaming msg ack ep %p tid %u state %u " + "initiator %u freeing skb\n", __func__, ep, ep->hwtid, + state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); + kfree_skb(ep->mpa_skb); + ep->mpa_skb = NULL; + } + return 0; +} + +static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct cpl_fw6_msg *rpl = cplhdr(skb); + struct c4iw_wr_wait *wr_waitp; + int ret; + + PDBG("%s type %u\n", __func__, rpl->type); + + switch (rpl->type) { + case 1: + ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); + wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1]; + PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); + if (wr_waitp) { + wr_waitp->ret = ret; + wr_waitp->done = 1; + wake_up(&wr_waitp->wait); + } + break; + case 2: + c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); + break; + default: + printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, + rpl->type); + break; + } + return 0; +} + +static void ep_timeout(unsigned long arg) +{ + struct c4iw_ep *ep = (struct c4iw_ep *)arg; + struct c4iw_qp_attributes attrs; + unsigned long flags; + int abort = 1; + + spin_lock_irqsave(&ep->com.lock, flags); + PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, + ep->com.state); + switch (ep->com.state) { + case MPA_REQ_SENT: + __state_set(&ep->com, ABORTING); + connect_reply_upcall(ep, -ETIMEDOUT); + break; + case MPA_REQ_WAIT: + __state_set(&ep->com, ABORTING); + break; + case CLOSING: + case MORIBUND: + if (ep->com.cm_id && ep->com.qp) { + attrs.next_state = C4IW_QP_STATE_ERROR; + c4iw_modify_qp(ep->com.qp->rhp, + ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, + &attrs, 1); + } + __state_set(&ep->com, ABORTING); + break; + default: + printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", + __func__, ep, ep->hwtid, ep->com.state); + WARN_ON(1); + abort = 0; + } + spin_unlock_irqrestore(&ep->com.lock, flags); + if (abort) + abort_connection(ep, NULL, GFP_ATOMIC); + c4iw_put_ep(&ep->com); +} + +int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) +{ + int err; + struct c4iw_ep *ep = to_ep(cm_id); + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + + if (state_read(&ep->com) == DEAD) { + c4iw_put_ep(&ep->com); + return -ECONNRESET; + } + BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); + if (mpa_rev == 0) + abort_connection(ep, NULL, GFP_KERNEL); + else { + err = send_mpa_reject(ep, pdata, pdata_len); + err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); + } + c4iw_put_ep(&ep->com); + return 0; +} + +int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) +{ + int err; + struct c4iw_qp_attributes attrs; + enum c4iw_qp_attr_mask mask; + struct c4iw_ep *ep = to_ep(cm_id); + struct c4iw_dev *h = to_c4iw_dev(cm_id->device); + struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); + + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); + if (state_read(&ep->com) == DEAD) { + err = -ECONNRESET; + goto err; + } + + BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); + BUG_ON(!qp); + + if ((conn_param->ord > T4_MAX_READ_DEPTH) || + (conn_param->ird > T4_MAX_READ_DEPTH)) { + abort_connection(ep, NULL, GFP_KERNEL); + err = -EINVAL; + goto err; + } + + cm_id->add_ref(cm_id); + ep->com.cm_id = cm_id; + ep->com.qp = qp; + + ep->ird = conn_param->ird; + ep->ord = conn_param->ord; + + if (peer2peer && ep->ird == 0) + ep->ird = 1; + + PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); + + /* bind QP to EP and move to RTS */ + attrs.mpa_attr = ep->mpa_attr; + attrs.max_ird = ep->ird; + attrs.max_ord = ep->ord; + attrs.llp_stream_handle = ep; + attrs.next_state = C4IW_QP_STATE_RTS; + + /* bind QP and TID with INIT_WR */ + mask = C4IW_QP_ATTR_NEXT_STATE | + C4IW_QP_ATTR_LLP_STREAM_HANDLE | + C4IW_QP_ATTR_MPA_ATTR | + C4IW_QP_ATTR_MAX_IRD | + C4IW_QP_ATTR_MAX_ORD; + + err = c4iw_modify_qp(ep->com.qp->rhp, + ep->com.qp, mask, &attrs, 1); + if (err) + goto err1; + err = send_mpa_reply(ep, conn_param->private_data, + conn_param->private_data_len); + if (err) + goto err1; + + state_set(&ep->com, FPDU_MODE); + established_upcall(ep); + c4iw_put_ep(&ep->com); + return 0; +err1: + ep->com.cm_id = NULL; + ep->com.qp = NULL; + cm_id->rem_ref(cm_id); +err: + c4iw_put_ep(&ep->com); + return err; +} + +int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) +{ + int err = 0; + struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); + struct c4iw_ep *ep; + struct rtable *rt; + struct net_device *pdev; + int step; + + ep = alloc_ep(sizeof(*ep), GFP_KERNEL); + if (!ep) { + printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); + err = -ENOMEM; + goto out; + } + init_timer(&ep->timer); + ep->plen = conn_param->private_data_len; + if (ep->plen) + memcpy(ep->mpa_pkt + sizeof(struct mpa_message), + conn_param->private_data, ep->plen); + ep->ird = conn_param->ird; + ep->ord = conn_param->ord; + + if (peer2peer && ep->ord == 0) + ep->ord = 1; + + cm_id->add_ref(cm_id); + ep->com.dev = dev; + ep->com.cm_id = cm_id; + ep->com.qp = get_qhp(dev, conn_param->qpn); + BUG_ON(!ep->com.qp); + PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, + ep->com.qp, cm_id); + + /* + * Allocate an active TID to initiate a TCP connection. + */ + ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); + if (ep->atid == -1) { + printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); + err = -ENOMEM; + goto fail2; + } + + PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, + ntohl(cm_id->local_addr.sin_addr.s_addr), + ntohs(cm_id->local_addr.sin_port), + ntohl(cm_id->remote_addr.sin_addr.s_addr), + ntohs(cm_id->remote_addr.sin_port)); + + /* find a route */ + rt = find_route(dev, + cm_id->local_addr.sin_addr.s_addr, + cm_id->remote_addr.sin_addr.s_addr, + cm_id->local_addr.sin_port, + cm_id->remote_addr.sin_port, 0); + if (!rt) { + printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); + err = -EHOSTUNREACH; + goto fail3; + } + ep->dst = &rt->u.dst; + + /* get a l2t entry */ + if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) { + PDBG("%s LOOPBACK\n", __func__); + pdev = ip_dev_find(&init_net, + cm_id->remote_addr.sin_addr.s_addr); + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, + ep->dst->neighbour, + pdev, 0); + ep->mtu = pdev->mtu; + ep->tx_chan = cxgb4_port_chan(pdev); + ep->smac_idx = ep->tx_chan << 1; + step = ep->com.dev->rdev.lldi.ntxq / + ep->com.dev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(pdev) * step; + step = ep->com.dev->rdev.lldi.nrxq / + ep->com.dev->rdev.lldi.nchan; + ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(pdev) * step]; + dev_put(pdev); + } else { + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, + ep->dst->neighbour, + ep->dst->neighbour->dev, 0); + ep->mtu = dst_mtu(ep->dst); + ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev); + ep->smac_idx = ep->tx_chan << 1; + step = ep->com.dev->rdev.lldi.ntxq / + ep->com.dev->rdev.lldi.nchan; + ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; + step = ep->com.dev->rdev.lldi.nrxq / + ep->com.dev->rdev.lldi.nchan; + ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ + cxgb4_port_idx(ep->dst->neighbour->dev) * step]; + } + if (!ep->l2t) { + printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); + err = -ENOMEM; + goto fail4; + } + + PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", + __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, + ep->l2t->idx); + + state_set(&ep->com, CONNECTING); + ep->tos = 0; + ep->com.local_addr = cm_id->local_addr; + ep->com.remote_addr = cm_id->remote_addr; + + /* send connect request to rnic */ + err = send_connect(ep); + if (!err) + goto out; + + cxgb4_l2t_release(ep->l2t); +fail4: + dst_release(ep->dst); +fail3: + cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); +fail2: + cm_id->rem_ref(cm_id); + c4iw_put_ep(&ep->com); +out: + return err; +} + +int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) +{ + int err = 0; + struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); + struct c4iw_listen_ep *ep; + + + might_sleep(); + + ep = alloc_ep(sizeof(*ep), GFP_KERNEL); + if (!ep) { + printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); + err = -ENOMEM; + goto fail1; + } + PDBG("%s ep %p\n", __func__, ep); + cm_id->add_ref(cm_id); + ep->com.cm_id = cm_id; + ep->com.dev = dev; + ep->backlog = backlog; + ep->com.local_addr = cm_id->local_addr; + + /* + * Allocate a server TID. + */ + ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); + if (ep->stid == -1) { + printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); + err = -ENOMEM; + goto fail2; + } + + state_set(&ep->com, LISTEN); + err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, + ep->com.local_addr.sin_addr.s_addr, + ep->com.local_addr.sin_port, + ep->com.dev->rdev.lldi.rxq_ids[0]); + if (err) + goto fail3; + + /* wait for pass_open_rpl */ + wait_event(ep->com.waitq, ep->com.rpl_done); + err = ep->com.rpl_err; + if (!err) { + cm_id->provider_data = ep; + goto out; + } +fail3: + cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); +fail2: + cm_id->rem_ref(cm_id); + c4iw_put_ep(&ep->com); +fail1: +out: + return err; +} + +int c4iw_destroy_listen(struct iw_cm_id *cm_id) +{ + int err; + struct c4iw_listen_ep *ep = to_listen_ep(cm_id); + + PDBG("%s ep %p\n", __func__, ep); + + might_sleep(); + state_set(&ep->com, DEAD); + ep->com.rpl_done = 0; + ep->com.rpl_err = 0; + err = listen_stop(ep); + if (err) + goto done; + wait_event(ep->com.waitq, ep->com.rpl_done); + cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); +done: + err = ep->com.rpl_err; + cm_id->rem_ref(cm_id); + c4iw_put_ep(&ep->com); + return err; +} + +int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) +{ + int ret = 0; + unsigned long flags; + int close = 0; + int fatal = 0; + struct c4iw_rdev *rdev; + int start_timer = 0; + int stop_timer = 0; + + spin_lock_irqsave(&ep->com.lock, flags); + + PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, + states[ep->com.state], abrupt); + + rdev = &ep->com.dev->rdev; + if (c4iw_fatal_error(rdev)) { + fatal = 1; + close_complete_upcall(ep); + ep->com.state = DEAD; + } + switch (ep->com.state) { + case MPA_REQ_WAIT: + case MPA_REQ_SENT: + case MPA_REQ_RCVD: + case MPA_REP_SENT: + case FPDU_MODE: + close = 1; + if (abrupt) + ep->com.state = ABORTING; + else { + ep->com.state = CLOSING; + start_timer = 1; + } + set_bit(CLOSE_SENT, &ep->com.flags); + break; + case CLOSING: + if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { + close = 1; + if (abrupt) { + stop_timer = 1; + ep->com.state = ABORTING; + } else + ep->com.state = MORIBUND; + } + break; + case MORIBUND: + case ABORTING: + case DEAD: + PDBG("%s ignoring disconnect ep %p state %u\n", + __func__, ep, ep->com.state); + break; + default: + BUG(); + break; + } + + spin_unlock_irqrestore(&ep->com.lock, flags); + if (start_timer) + start_ep_timer(ep); + if (stop_timer) + stop_ep_timer(ep); + if (close) { + if (abrupt) + ret = abort_connection(ep, NULL, gfp); + else + ret = send_halfclose(ep, gfp); + if (ret) + fatal = 1; + } + if (fatal) + release_ep_resources(ep); + return ret; +} + +/* + * All the CM events are handled on a work queue to have a safe context. + */ +static int sched(struct c4iw_dev *dev, struct sk_buff *skb) +{ + + /* + * Save dev in the skb->cb area. + */ + *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; + + /* + * Queue the skb and schedule the worker thread. + */ + skb_queue_tail(&rxq, skb); + queue_work(workq, &skb_work); + return 0; +} + +static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct cpl_set_tcb_rpl *rpl = cplhdr(skb); + + if (rpl->status != CPL_ERR_NONE) { + printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " + "for tid %u\n", rpl->status, GET_TID(rpl)); + } + return 0; +} + +int __init c4iw_cm_init(void) +{ + skb_queue_head_init(&rxq); + + workq = create_singlethread_workqueue("iw_cxgb4"); + if (!workq) + return -ENOMEM; + + /* + * Most upcalls from the T4 Core go to sched() to + * schedule the processing on a work queue. + */ + c4iw_handlers[CPL_ACT_ESTABLISH] = sched; + c4iw_handlers[CPL_ACT_OPEN_RPL] = sched; + c4iw_handlers[CPL_RX_DATA] = sched; + c4iw_handlers[CPL_ABORT_RPL_RSS] = sched; + c4iw_handlers[CPL_ABORT_RPL] = sched; + c4iw_handlers[CPL_PASS_OPEN_RPL] = sched; + c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched; + c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched; + c4iw_handlers[CPL_PASS_ESTABLISH] = sched; + c4iw_handlers[CPL_PEER_CLOSE] = sched; + c4iw_handlers[CPL_CLOSE_CON_RPL] = sched; + c4iw_handlers[CPL_ABORT_REQ_RSS] = sched; + c4iw_handlers[CPL_RDMA_TERMINATE] = sched; + c4iw_handlers[CPL_FW4_ACK] = sched; + c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl; + c4iw_handlers[CPL_FW6_MSG] = fw6_msg; + + /* + * These are the real handlers that are called from a + * work queue. + */ + work_handlers[CPL_ACT_ESTABLISH] = act_establish; + work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl; + work_handlers[CPL_RX_DATA] = rx_data; + work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl; + work_handlers[CPL_ABORT_RPL] = abort_rpl; + work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl; + work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl; + work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req; + work_handlers[CPL_PASS_ESTABLISH] = pass_establish; + work_handlers[CPL_PEER_CLOSE] = peer_close; + work_handlers[CPL_ABORT_REQ_RSS] = peer_abort; + work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl; + work_handlers[CPL_RDMA_TERMINATE] = terminate; + work_handlers[CPL_FW4_ACK] = fw4_ack; + return 0; +} + +void __exit c4iw_cm_term(void) +{ + flush_workqueue(workq); + destroy_workqueue(workq); +} diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c new file mode 100644 index 00000000000..fb1aafcc294 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -0,0 +1,882 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "iw_cxgb4.h" + +static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, + struct c4iw_dev_ucontext *uctx) +{ + struct fw_ri_res_wr *res_wr; + struct fw_ri_res *res; + int wr_len; + struct c4iw_wr_wait wr_wait; + struct sk_buff *skb; + int ret; + + wr_len = sizeof *res_wr + sizeof *res; + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + + res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); + memset(res_wr, 0, wr_len); + res_wr->op_nres = cpu_to_be32( + FW_WR_OP(FW_RI_RES_WR) | + V_FW_RI_RES_WR_NRES(1) | + FW_WR_COMPL(1)); + res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); + res_wr->cookie = (u64)&wr_wait; + res = res_wr->res; + res->u.cq.restype = FW_RI_RES_TYPE_CQ; + res->u.cq.op = FW_RI_RES_OP_RESET; + res->u.cq.iqid = cpu_to_be32(cq->cqid); + + c4iw_init_wr_wait(&wr_wait); + ret = c4iw_ofld_send(rdev, skb); + if (!ret) { + wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); + if (!wr_wait.done) { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(rdev->lldi.pdev)); + rdev->flags = T4_FATAL_ERROR; + ret = -EIO; + } else + ret = wr_wait.ret; + } + + kfree(cq->sw_queue); + dma_free_coherent(&(rdev->lldi.pdev->dev), + cq->memsize, cq->queue, + pci_unmap_addr(cq, mapping)); + c4iw_put_cqid(rdev, cq->cqid, uctx); + return ret; +} + +static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, + struct c4iw_dev_ucontext *uctx) +{ + struct fw_ri_res_wr *res_wr; + struct fw_ri_res *res; + int wr_len; + int user = (uctx != &rdev->uctx); + struct c4iw_wr_wait wr_wait; + int ret; + struct sk_buff *skb; + + cq->cqid = c4iw_get_cqid(rdev, uctx); + if (!cq->cqid) { + ret = -ENOMEM; + goto err1; + } + + if (!user) { + cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL); + if (!cq->sw_queue) { + ret = -ENOMEM; + goto err2; + } + } + cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, + &cq->dma_addr, GFP_KERNEL); + if (!cq->queue) { + ret = -ENOMEM; + goto err3; + } + pci_unmap_addr_set(cq, mapping, cq->dma_addr); + memset(cq->queue, 0, cq->memsize); + + /* build fw_ri_res_wr */ + wr_len = sizeof *res_wr + sizeof *res; + + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) { + ret = -ENOMEM; + goto err4; + } + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + + res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); + memset(res_wr, 0, wr_len); + res_wr->op_nres = cpu_to_be32( + FW_WR_OP(FW_RI_RES_WR) | + V_FW_RI_RES_WR_NRES(1) | + FW_WR_COMPL(1)); + res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); + res_wr->cookie = (u64)&wr_wait; + res = res_wr->res; + res->u.cq.restype = FW_RI_RES_TYPE_CQ; + res->u.cq.op = FW_RI_RES_OP_WRITE; + res->u.cq.iqid = cpu_to_be32(cq->cqid); + res->u.cq.iqandst_to_iqandstindex = cpu_to_be32( + V_FW_RI_RES_WR_IQANUS(0) | + V_FW_RI_RES_WR_IQANUD(1) | + F_FW_RI_RES_WR_IQANDST | + V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids)); + res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( + F_FW_RI_RES_WR_IQDROPRSS | + V_FW_RI_RES_WR_IQPCIECH(2) | + V_FW_RI_RES_WR_IQINTCNTTHRESH(0) | + F_FW_RI_RES_WR_IQO | + V_FW_RI_RES_WR_IQESIZE(1)); + res->u.cq.iqsize = cpu_to_be16(cq->size); + res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); + + c4iw_init_wr_wait(&wr_wait); + + ret = c4iw_ofld_send(rdev, skb); + if (ret) + goto err4; + PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait); + wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); + if (!wr_wait.done) { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(rdev->lldi.pdev)); + rdev->flags = T4_FATAL_ERROR; + ret = -EIO; + } else + ret = wr_wait.ret; + if (ret) + goto err4; + + cq->gen = 1; + cq->gts = rdev->lldi.gts_reg; + cq->rdev = rdev; + if (user) { + cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + + (cq->cqid << rdev->cqshift); + cq->ugts &= PAGE_MASK; + } + return 0; +err4: + dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, + pci_unmap_addr(cq, mapping)); +err3: + kfree(cq->sw_queue); +err2: + c4iw_put_cqid(rdev, cq->cqid, uctx); +err1: + return ret; +} + +static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) +{ + struct t4_cqe cqe; + + PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, + wq, cq, cq->sw_cidx, cq->sw_pidx); + memset(&cqe, 0, sizeof(cqe)); + cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | + V_CQE_OPCODE(FW_RI_SEND) | + V_CQE_TYPE(0) | + V_CQE_SWCQE(1) | + V_CQE_QPID(wq->rq.qid)); + cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); +} + +int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) +{ + int flushed = 0; + int in_use = wq->rq.in_use - count; + + BUG_ON(in_use < 0); + PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, + wq, cq, wq->rq.in_use, count); + while (in_use--) { + insert_recv_cqe(wq, cq); + flushed++; + } + return flushed; +} + +static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, + struct t4_swsqe *swcqe) +{ + struct t4_cqe cqe; + + PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, + wq, cq, cq->sw_cidx, cq->sw_pidx); + memset(&cqe, 0, sizeof(cqe)); + cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) | + V_CQE_OPCODE(swcqe->opcode) | + V_CQE_TYPE(1) | + V_CQE_SWCQE(1) | + V_CQE_QPID(wq->sq.qid)); + CQE_WRID_SQ_IDX(&cqe) = swcqe->idx; + cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen)); + cq->sw_queue[cq->sw_pidx] = cqe; + t4_swcq_produce(cq); +} + +int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count) +{ + int flushed = 0; + struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count]; + int in_use = wq->sq.in_use - count; + + BUG_ON(in_use < 0); + while (in_use--) { + swsqe->signaled = 0; + insert_sq_cqe(wq, cq, swsqe); + swsqe++; + if (swsqe == (wq->sq.sw_sq + wq->sq.size)) + swsqe = wq->sq.sw_sq; + flushed++; + } + return flushed; +} + +/* + * Move all CQEs from the HWCQ into the SWCQ. + */ +void c4iw_flush_hw_cq(struct t4_cq *cq) +{ + struct t4_cqe *cqe = NULL, *swcqe; + int ret; + + PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid); + ret = t4_next_hw_cqe(cq, &cqe); + while (!ret) { + PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n", + __func__, cq->cidx, cq->sw_pidx); + swcqe = &cq->sw_queue[cq->sw_pidx]; + *swcqe = *cqe; + swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1)); + t4_swcq_produce(cq); + t4_hwcq_consume(cq); + ret = t4_next_hw_cqe(cq, &cqe); + } +} + +static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) +{ + if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) + return 0; + + if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe)) + return 0; + + if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe)) + return 0; + + if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) + return 0; + return 1; +} + +void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count) +{ + struct t4_cqe *cqe; + u32 ptr; + + *count = 0; + ptr = cq->sw_cidx; + while (ptr != cq->sw_pidx) { + cqe = &cq->sw_queue[ptr]; + if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && + wq->sq.oldest_read)) && + (CQE_QPID(cqe) == wq->sq.qid)) + (*count)++; + if (++ptr == cq->size) + ptr = 0; + } + PDBG("%s cq %p count %d\n", __func__, cq, *count); +} + +void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) +{ + struct t4_cqe *cqe; + u32 ptr; + + *count = 0; + PDBG("%s count zero %d\n", __func__, *count); + ptr = cq->sw_cidx; + while (ptr != cq->sw_pidx) { + cqe = &cq->sw_queue[ptr]; + if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) && + (CQE_QPID(cqe) == wq->rq.qid) && cqe_completes_wr(cqe, wq)) + (*count)++; + if (++ptr == cq->size) + ptr = 0; + } + PDBG("%s cq %p count %d\n", __func__, cq, *count); +} + +static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) +{ + struct t4_swsqe *swsqe; + u16 ptr = wq->sq.cidx; + int count = wq->sq.in_use; + int unsignaled = 0; + + swsqe = &wq->sq.sw_sq[ptr]; + while (count--) + if (!swsqe->signaled) { + if (++ptr == wq->sq.size) + ptr = 0; + swsqe = &wq->sq.sw_sq[ptr]; + unsignaled++; + } else if (swsqe->complete) { + + /* + * Insert this completed cqe into the swcq. + */ + PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n", + __func__, ptr, cq->sw_pidx); + swsqe->cqe.header |= htonl(V_CQE_SWCQE(1)); + cq->sw_queue[cq->sw_pidx] = swsqe->cqe; + t4_swcq_produce(cq); + swsqe->signaled = 0; + wq->sq.in_use -= unsignaled; + break; + } else + break; +} + +static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, + struct t4_cqe *read_cqe) +{ + read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; + read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len); + read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) | + V_CQE_SWCQE(SW_CQE(hw_cqe)) | + V_CQE_OPCODE(FW_RI_READ_REQ) | + V_CQE_TYPE(1)); +} + +/* + * Return a ptr to the next read wr in the SWSQ or NULL. + */ +static void advance_oldest_read(struct t4_wq *wq) +{ + + u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; + + if (rptr == wq->sq.size) + rptr = 0; + while (rptr != wq->sq.pidx) { + wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; + + if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) + return; + if (++rptr == wq->sq.size) + rptr = 0; + } + wq->sq.oldest_read = NULL; +} + +/* + * poll_cq + * + * Caller must: + * check the validity of the first CQE, + * supply the wq assicated with the qpid. + * + * credit: cq credit to return to sge. + * cqe_flushed: 1 iff the CQE is flushed. + * cqe: copy of the polled CQE. + * + * return value: + * 0 CQE returned ok. + * -EAGAIN CQE skipped, try again. + * -EOVERFLOW CQ overflow detected. + */ +static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, + u8 *cqe_flushed, u64 *cookie, u32 *credit) +{ + int ret = 0; + struct t4_cqe *hw_cqe, read_cqe; + + *cqe_flushed = 0; + *credit = 0; + ret = t4_next_cqe(cq, &hw_cqe); + if (ret) + return ret; + + PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x" + " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", + __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), + CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), + CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), + CQE_WRID_LOW(hw_cqe)); + + /* + * skip cqe's not affiliated with a QP. + */ + if (wq == NULL) { + ret = -EAGAIN; + goto skip_cqe; + } + + /* + * Gotta tweak READ completions: + * 1) the cqe doesn't contain the sq_wptr from the wr. + * 2) opcode not reflected from the wr. + * 3) read_len not reflected from the wr. + * 4) cq_type is RQ_TYPE not SQ_TYPE. + */ + if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { + + /* + * If this is an unsolicited read response, then the read + * was generated by the kernel driver as part of peer-2-peer + * connection setup. So ignore the completion. + */ + if (!wq->sq.oldest_read) { + if (CQE_STATUS(hw_cqe)) + t4_set_wq_in_error(wq); + ret = -EAGAIN; + goto skip_cqe; + } + + /* + * Don't write to the HWCQ, so create a new read req CQE + * in local memory. + */ + create_read_req_cqe(wq, hw_cqe, &read_cqe); + hw_cqe = &read_cqe; + advance_oldest_read(wq); + } + + if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { + *cqe_flushed = t4_wq_in_error(wq); + t4_set_wq_in_error(wq); + goto proc_cqe; + } + + /* + * RECV completion. + */ + if (RQ_TYPE(hw_cqe)) { + + /* + * HW only validates 4 bits of MSN. So we must validate that + * the MSN in the SEND is the next expected MSN. If its not, + * then we complete this with T4_ERR_MSN and mark the wq in + * error. + */ + + if (t4_rq_empty(wq)) { + t4_set_wq_in_error(wq); + ret = -EAGAIN; + goto skip_cqe; + } + if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { + t4_set_wq_in_error(wq); + hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN)); + goto proc_cqe; + } + goto proc_cqe; + } + + /* + * If we get here its a send completion. + * + * Handle out of order completion. These get stuffed + * in the SW SQ. Then the SW SQ is walked to move any + * now in-order completions into the SW CQ. This handles + * 2 cases: + * 1) reaping unsignaled WRs when the first subsequent + * signaled WR is completed. + * 2) out of order read completions. + */ + if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { + struct t4_swsqe *swsqe; + + PDBG("%s out of order completion going in sw_sq at idx %u\n", + __func__, CQE_WRID_SQ_IDX(hw_cqe)); + swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; + swsqe->cqe = *hw_cqe; + swsqe->complete = 1; + ret = -EAGAIN; + goto flush_wq; + } + +proc_cqe: + *cqe = *hw_cqe; + + /* + * Reap the associated WR(s) that are freed up with this + * completion. + */ + if (SQ_TYPE(hw_cqe)) { + wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe); + PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); + *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; + t4_sq_consume(wq); + } else { + PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); + *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; + BUG_ON(t4_rq_empty(wq)); + t4_rq_consume(wq); + } + +flush_wq: + /* + * Flush any completed cqes that are now in-order. + */ + flush_completed_wrs(wq, cq); + +skip_cqe: + if (SW_CQE(hw_cqe)) { + PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n", + __func__, cq, cq->cqid, cq->sw_cidx); + t4_swcq_consume(cq); + } else { + PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n", + __func__, cq, cq->cqid, cq->cidx); + t4_hwcq_consume(cq); + } + return ret; +} + +/* + * Get one cq entry from c4iw and map it to openib. + * + * Returns: + * 0 cqe returned + * -ENODATA EMPTY; + * -EAGAIN caller must try again + * any other -errno fatal error + */ +static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) +{ + struct c4iw_qp *qhp = NULL; + struct t4_cqe cqe = {0, 0}, *rd_cqe; + struct t4_wq *wq; + u32 credit = 0; + u8 cqe_flushed; + u64 cookie = 0; + int ret; + + ret = t4_next_cqe(&chp->cq, &rd_cqe); + + if (ret) + return ret; + + qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe)); + if (!qhp) + wq = NULL; + else { + spin_lock(&qhp->lock); + wq = &(qhp->wq); + } + ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); + if (ret) + goto out; + + wc->wr_id = cookie; + wc->qp = &qhp->ibqp; + wc->vendor_err = CQE_STATUS(&cqe); + wc->wc_flags = 0; + + PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x " + "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe), + CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe), + CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie); + + if (CQE_TYPE(&cqe) == 0) { + if (!CQE_STATUS(&cqe)) + wc->byte_len = CQE_LEN(&cqe); + else + wc->byte_len = 0; + wc->opcode = IB_WC_RECV; + if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV || + CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { + wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); + wc->wc_flags |= IB_WC_WITH_INVALIDATE; + } + } else { + switch (CQE_OPCODE(&cqe)) { + case FW_RI_RDMA_WRITE: + wc->opcode = IB_WC_RDMA_WRITE; + break; + case FW_RI_READ_REQ: + wc->opcode = IB_WC_RDMA_READ; + wc->byte_len = CQE_LEN(&cqe); + break; + case FW_RI_SEND_WITH_INV: + case FW_RI_SEND_WITH_SE_INV: + wc->opcode = IB_WC_SEND; + wc->wc_flags |= IB_WC_WITH_INVALIDATE; + break; + case FW_RI_SEND: + case FW_RI_SEND_WITH_SE: + wc->opcode = IB_WC_SEND; + break; + case FW_RI_BIND_MW: + wc->opcode = IB_WC_BIND_MW; + break; + + case FW_RI_LOCAL_INV: + wc->opcode = IB_WC_LOCAL_INV; + break; + case FW_RI_FAST_REGISTER: + wc->opcode = IB_WC_FAST_REG_MR; + break; + default: + printk(KERN_ERR MOD "Unexpected opcode %d " + "in the CQE received for QPID=0x%0x\n", + CQE_OPCODE(&cqe), CQE_QPID(&cqe)); + ret = -EINVAL; + goto out; + } + } + + if (cqe_flushed) + wc->status = IB_WC_WR_FLUSH_ERR; + else { + + switch (CQE_STATUS(&cqe)) { + case T4_ERR_SUCCESS: + wc->status = IB_WC_SUCCESS; + break; + case T4_ERR_STAG: + wc->status = IB_WC_LOC_ACCESS_ERR; + break; + case T4_ERR_PDID: + wc->status = IB_WC_LOC_PROT_ERR; + break; + case T4_ERR_QPID: + case T4_ERR_ACCESS: + wc->status = IB_WC_LOC_ACCESS_ERR; + break; + case T4_ERR_WRAP: + wc->status = IB_WC_GENERAL_ERR; + break; + case T4_ERR_BOUND: + wc->status = IB_WC_LOC_LEN_ERR; + break; + case T4_ERR_INVALIDATE_SHARED_MR: + case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: + wc->status = IB_WC_MW_BIND_ERR; + break; + case T4_ERR_CRC: + case T4_ERR_MARKER: + case T4_ERR_PDU_LEN_ERR: + case T4_ERR_OUT_OF_RQE: + case T4_ERR_DDP_VERSION: + case T4_ERR_RDMA_VERSION: + case T4_ERR_DDP_QUEUE_NUM: + case T4_ERR_MSN: + case T4_ERR_TBIT: + case T4_ERR_MO: + case T4_ERR_MSN_RANGE: + case T4_ERR_IRD_OVERFLOW: + case T4_ERR_OPCODE: + wc->status = IB_WC_FATAL_ERR; + break; + case T4_ERR_SWFLUSH: + wc->status = IB_WC_WR_FLUSH_ERR; + break; + default: + printk(KERN_ERR MOD + "Unexpected cqe_status 0x%x for QPID=0x%0x\n", + CQE_STATUS(&cqe), CQE_QPID(&cqe)); + ret = -EINVAL; + } + } +out: + if (wq) + spin_unlock(&qhp->lock); + return ret; +} + +int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) +{ + struct c4iw_cq *chp; + unsigned long flags; + int npolled; + int err = 0; + + chp = to_c4iw_cq(ibcq); + + spin_lock_irqsave(&chp->lock, flags); + for (npolled = 0; npolled < num_entries; ++npolled) { + do { + err = c4iw_poll_cq_one(chp, wc + npolled); + } while (err == -EAGAIN); + if (err) + break; + } + spin_unlock_irqrestore(&chp->lock, flags); + return !err || err == -ENODATA ? npolled : err; +} + +int c4iw_destroy_cq(struct ib_cq *ib_cq) +{ + struct c4iw_cq *chp; + struct c4iw_ucontext *ucontext; + + PDBG("%s ib_cq %p\n", __func__, ib_cq); + chp = to_c4iw_cq(ib_cq); + + remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); + atomic_dec(&chp->refcnt); + wait_event(chp->wait, !atomic_read(&chp->refcnt)); + + ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context) + : NULL; + destroy_cq(&chp->rhp->rdev, &chp->cq, + ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx); + kfree(chp); + return 0; +} + +struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, + int vector, struct ib_ucontext *ib_context, + struct ib_udata *udata) +{ + struct c4iw_dev *rhp; + struct c4iw_cq *chp; + struct c4iw_create_cq_resp uresp; + struct c4iw_ucontext *ucontext = NULL; + int ret; + size_t memsize; + struct c4iw_mm_entry *mm, *mm2; + + PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); + + rhp = to_c4iw_dev(ibdev); + + chp = kzalloc(sizeof(*chp), GFP_KERNEL); + if (!chp) + return ERR_PTR(-ENOMEM); + + if (ib_context) + ucontext = to_c4iw_ucontext(ib_context); + + /* account for the status page. */ + entries++; + + /* + * entries must be multiple of 16 for HW. + */ + entries = roundup(entries, 16); + memsize = entries * sizeof *chp->cq.queue; + + /* + * memsize must be a multiple of the page size if its a user cq. + */ + if (ucontext) + memsize = roundup(memsize, PAGE_SIZE); + chp->cq.size = entries; + chp->cq.memsize = memsize; + + ret = create_cq(&rhp->rdev, &chp->cq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); + if (ret) + goto err1; + + chp->rhp = rhp; + chp->cq.size--; /* status page */ + chp->ibcq.cqe = chp->cq.size; + spin_lock_init(&chp->lock); + atomic_set(&chp->refcnt, 1); + init_waitqueue_head(&chp->wait); + ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); + if (ret) + goto err2; + + if (ucontext) { + mm = kmalloc(sizeof *mm, GFP_KERNEL); + if (!mm) + goto err3; + mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); + if (!mm2) + goto err4; + + uresp.qid_mask = rhp->rdev.cqmask; + uresp.cqid = chp->cq.cqid; + uresp.size = chp->cq.size; + uresp.memsize = chp->cq.memsize; + spin_lock(&ucontext->mmap_lock); + uresp.key = ucontext->key; + ucontext->key += PAGE_SIZE; + uresp.gts_key = ucontext->key; + ucontext->key += PAGE_SIZE; + spin_unlock(&ucontext->mmap_lock); + ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); + if (ret) + goto err5; + + mm->key = uresp.key; + mm->addr = virt_to_phys(chp->cq.queue); + mm->len = chp->cq.memsize; + insert_mmap(ucontext, mm); + + mm2->key = uresp.gts_key; + mm2->addr = chp->cq.ugts; + mm2->len = PAGE_SIZE; + insert_mmap(ucontext, mm2); + } + PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", + __func__, chp->cq.cqid, chp, chp->cq.size, + chp->cq.memsize, + (unsigned long long) chp->cq.dma_addr); + return &chp->ibcq; +err5: + kfree(mm2); +err4: + kfree(mm); +err3: + remove_handle(rhp, &rhp->cqidr, chp->cq.cqid); +err2: + destroy_cq(&chp->rhp->rdev, &chp->cq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); +err1: + kfree(chp); + return ERR_PTR(ret); +} + +int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) +{ + return -ENOSYS; +} + +int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) +{ + struct c4iw_cq *chp; + int ret; + unsigned long flag; + + chp = to_c4iw_cq(ibcq); + spin_lock_irqsave(&chp->lock, flag); + ret = t4_arm_cq(&chp->cq, + (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); + spin_unlock_irqrestore(&chp->lock, flag); + if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS)) + ret = 0; + return ret; +} diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c new file mode 100644 index 00000000000..be23b5eab13 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include + +#include + +#include "iw_cxgb4.h" + +#define DRV_VERSION "0.1" + +MODULE_AUTHOR("Steve Wise"); +MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +static LIST_HEAD(dev_list); +static DEFINE_MUTEX(dev_mutex); + +static struct dentry *c4iw_debugfs_root; + +struct debugfs_qp_data { + struct c4iw_dev *devp; + char *buf; + int bufsize; + int pos; +}; + +static int count_qps(int id, void *p, void *data) +{ + struct c4iw_qp *qp = p; + int *countp = data; + + if (id != qp->wq.sq.qid) + return 0; + + *countp = *countp + 1; + return 0; +} + +static int dump_qps(int id, void *p, void *data) +{ + struct c4iw_qp *qp = p; + struct debugfs_qp_data *qpd = data; + int space; + int cc; + + if (id != qp->wq.sq.qid) + return 0; + + space = qpd->bufsize - qpd->pos - 1; + if (space == 0) + return 1; + + if (qp->ep) + cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u " + "ep tid %u state %u %pI4:%u->%pI4:%u\n", + qp->wq.sq.qid, (int)qp->attr.state, + qp->ep->hwtid, (int)qp->ep->com.state, + &qp->ep->com.local_addr.sin_addr.s_addr, + ntohs(qp->ep->com.local_addr.sin_port), + &qp->ep->com.remote_addr.sin_addr.s_addr, + ntohs(qp->ep->com.remote_addr.sin_port)); + else + cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n", + qp->wq.sq.qid, (int)qp->attr.state); + if (cc < space) + qpd->pos += cc; + return 0; +} + +static int qp_release(struct inode *inode, struct file *file) +{ + struct debugfs_qp_data *qpd = file->private_data; + if (!qpd) { + printk(KERN_INFO "%s null qpd?\n", __func__); + return 0; + } + kfree(qpd->buf); + kfree(qpd); + return 0; +} + +static int qp_open(struct inode *inode, struct file *file) +{ + struct debugfs_qp_data *qpd; + int ret = 0; + int count = 1; + + qpd = kmalloc(sizeof *qpd, GFP_KERNEL); + if (!qpd) { + ret = -ENOMEM; + goto out; + } + qpd->devp = inode->i_private; + qpd->pos = 0; + + spin_lock_irq(&qpd->devp->lock); + idr_for_each(&qpd->devp->qpidr, count_qps, &count); + spin_unlock_irq(&qpd->devp->lock); + + qpd->bufsize = count * 128; + qpd->buf = kmalloc(qpd->bufsize, GFP_KERNEL); + if (!qpd->buf) { + ret = -ENOMEM; + goto err1; + } + + spin_lock_irq(&qpd->devp->lock); + idr_for_each(&qpd->devp->qpidr, dump_qps, qpd); + spin_unlock_irq(&qpd->devp->lock); + + qpd->buf[qpd->pos++] = 0; + file->private_data = qpd; + goto out; +err1: + kfree(qpd); +out: + return ret; +} + +static ssize_t qp_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) +{ + struct debugfs_qp_data *qpd = file->private_data; + loff_t pos = *ppos; + loff_t avail = qpd->pos; + + if (pos < 0) + return -EINVAL; + if (pos >= avail) + return 0; + if (count > avail - pos) + count = avail - pos; + + while (count) { + size_t len = 0; + + len = min((int)count, (int)qpd->pos - (int)pos); + if (copy_to_user(buf, qpd->buf + pos, len)) + return -EFAULT; + if (len == 0) + return -EINVAL; + + buf += len; + pos += len; + count -= len; + } + count = pos - *ppos; + *ppos = pos; + return count; +} + +static const struct file_operations qp_debugfs_fops = { + .owner = THIS_MODULE, + .open = qp_open, + .release = qp_release, + .read = qp_read, +}; + +static int setup_debugfs(struct c4iw_dev *devp) +{ + struct dentry *de; + + if (!devp->debugfs_root) + return -1; + + de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root, + (void *)devp, &qp_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = 4096; + return 0; +} + +void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, + struct c4iw_dev_ucontext *uctx) +{ + struct list_head *pos, *nxt; + struct c4iw_qid_list *entry; + + mutex_lock(&uctx->lock); + list_for_each_safe(pos, nxt, &uctx->qpids) { + entry = list_entry(pos, struct c4iw_qid_list, entry); + list_del_init(&entry->entry); + if (!(entry->qid & rdev->qpmask)) + c4iw_put_resource(&rdev->resource.qid_fifo, entry->qid, + &rdev->resource.qid_fifo_lock); + kfree(entry); + } + + list_for_each_safe(pos, nxt, &uctx->qpids) { + entry = list_entry(pos, struct c4iw_qid_list, entry); + list_del_init(&entry->entry); + kfree(entry); + } + mutex_unlock(&uctx->lock); +} + +void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, + struct c4iw_dev_ucontext *uctx) +{ + INIT_LIST_HEAD(&uctx->qpids); + INIT_LIST_HEAD(&uctx->cqids); + mutex_init(&uctx->lock); +} + +/* Caller takes care of locking if needed */ +static int c4iw_rdev_open(struct c4iw_rdev *rdev) +{ + int err; + + c4iw_init_dev_ucontext(rdev, &rdev->uctx); + + /* + * qpshift is the number of bits to shift the qpid left in order + * to get the correct address of the doorbell for that qp. + */ + rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density); + rdev->qpmask = rdev->lldi.udb_density - 1; + rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density); + rdev->cqmask = rdev->lldi.ucq_density - 1; + PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d " + "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x\n", + __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, + rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), + rdev->lldi.vr->pbl.start, + rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, + rdev->lldi.vr->rq.size); + PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " + "qpmask 0x%x cqshift %lu cqmask 0x%x\n", + (unsigned)pci_resource_len(rdev->lldi.pdev, 2), + (void *)pci_resource_start(rdev->lldi.pdev, 2), + rdev->lldi.db_reg, + rdev->lldi.gts_reg, + rdev->qpshift, rdev->qpmask, + rdev->cqshift, rdev->cqmask); + + if (c4iw_num_stags(rdev) == 0) { + err = -EINVAL; + goto err1; + } + + err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD); + if (err) { + printk(KERN_ERR MOD "error %d initializing resources\n", err); + goto err1; + } + err = c4iw_pblpool_create(rdev); + if (err) { + printk(KERN_ERR MOD "error %d initializing pbl pool\n", err); + goto err2; + } + err = c4iw_rqtpool_create(rdev); + if (err) { + printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); + goto err3; + } + return 0; +err3: + c4iw_pblpool_destroy(rdev); +err2: + c4iw_destroy_resource(&rdev->resource); +err1: + return err; +} + +static void c4iw_rdev_close(struct c4iw_rdev *rdev) +{ + c4iw_pblpool_destroy(rdev); + c4iw_rqtpool_destroy(rdev); + c4iw_destroy_resource(&rdev->resource); +} + +static void c4iw_remove(struct c4iw_dev *dev) +{ + PDBG("%s c4iw_dev %p\n", __func__, dev); + cancel_delayed_work_sync(&dev->db_drop_task); + list_del(&dev->entry); + c4iw_unregister_device(dev); + c4iw_rdev_close(&dev->rdev); + idr_destroy(&dev->cqidr); + idr_destroy(&dev->qpidr); + idr_destroy(&dev->mmidr); + ib_dealloc_device(&dev->ibdev); +} + +static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) +{ + struct c4iw_dev *devp; + int ret; + + devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); + if (!devp) { + printk(KERN_ERR MOD "Cannot allocate ib device\n"); + return NULL; + } + devp->rdev.lldi = *infop; + + mutex_lock(&dev_mutex); + + ret = c4iw_rdev_open(&devp->rdev); + if (ret) { + mutex_unlock(&dev_mutex); + printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); + ib_dealloc_device(&devp->ibdev); + return NULL; + } + + idr_init(&devp->cqidr); + idr_init(&devp->qpidr); + idr_init(&devp->mmidr); + spin_lock_init(&devp->lock); + list_add_tail(&devp->entry, &dev_list); + mutex_unlock(&dev_mutex); + + if (c4iw_register_device(devp)) { + printk(KERN_ERR MOD "Unable to register device\n"); + mutex_lock(&dev_mutex); + c4iw_remove(devp); + mutex_unlock(&dev_mutex); + } + if (c4iw_debugfs_root) { + devp->debugfs_root = debugfs_create_dir( + pci_name(devp->rdev.lldi.pdev), + c4iw_debugfs_root); + setup_debugfs(devp); + } + return devp; +} + +static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) +{ + struct c4iw_dev *dev; + static int vers_printed; + int i; + + if (!vers_printed++) + printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", + DRV_VERSION); + + dev = c4iw_alloc(infop); + if (!dev) + goto out; + + PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", + __func__, pci_name(dev->rdev.lldi.pdev), + dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, + dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); + + for (i = 0; i < dev->rdev.lldi.nrxq; i++) + PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); + + printk(KERN_INFO MOD "Initialized device %s\n", + pci_name(dev->rdev.lldi.pdev)); +out: + return dev; +} + +static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, + unsigned int pull_len) +{ + struct sk_buff *skb; + struct skb_shared_info *ssi; + + if (gl->tot_len <= 512) { + skb = alloc_skb(gl->tot_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + ssi = skb_shinfo(skb); + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; + ssi->frags[0].size = gl->frags[0].size - pull_len; + if (gl->nfrags > 1) + memcpy(&ssi->frags[1], &gl->frags[1], + (gl->nfrags - 1) * sizeof(skb_frag_t)); + ssi->nr_frags = gl->nfrags; + + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + + /* Get a reference for the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); + } +out: + return skb; +} + +static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct c4iw_dev *dev = handle; + struct sk_buff *skb; + const struct cpl_act_establish *rpl; + unsigned int opcode; + + if (gl == NULL) { + /* omit RSS and rsp_ctrl at end of descriptor */ + unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8; + + skb = alloc_skb(256, GFP_ATOMIC); + if (!skb) + goto nomem; + __skb_put(skb, len); + skb_copy_to_linear_data(skb, &rsp[1], len); + } else if (gl == CXGB4_MSG_AN) { + const struct rsp_ctrl *rc = (void *)rsp; + + u32 qid = be32_to_cpu(rc->pldbuflen_qid); + c4iw_ev_handler(dev, qid); + return 0; + } else { + skb = t4_pktgl_to_skb(gl, 128, 128); + if (unlikely(!skb)) + goto nomem; + } + + rpl = cplhdr(skb); + opcode = rpl->ot.opcode; + + if (c4iw_handlers[opcode]) + c4iw_handlers[opcode](dev, skb); + else + printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__, + opcode); + + return 0; +nomem: + return -1; +} + +static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) +{ + PDBG("%s new_state %u\n", __func__, new_state); + return 0; +} + +static struct cxgb4_uld_info c4iw_uld_info = { + .name = DRV_NAME, + .add = c4iw_uld_add, + .rx_handler = c4iw_uld_rx_handler, + .state_change = c4iw_uld_state_change, +}; + +static int __init c4iw_init_module(void) +{ + int err; + + err = c4iw_cm_init(); + if (err) + return err; + + c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL); + if (!c4iw_debugfs_root) + printk(KERN_WARNING MOD + "could not create debugfs entry, continuing\n"); + + cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); + + return 0; +} + +static void __exit c4iw_exit_module(void) +{ + struct c4iw_dev *dev, *tmp; + + cxgb4_unregister_uld(CXGB4_ULD_RDMA); + + mutex_lock(&dev_mutex); + list_for_each_entry_safe(dev, tmp, &dev_list, entry) { + c4iw_remove(dev); + } + mutex_unlock(&dev_mutex); + + c4iw_cm_term(); + debugfs_remove_recursive(c4iw_debugfs_root); +} + +module_init(c4iw_init_module); +module_exit(c4iw_exit_module); diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c new file mode 100644 index 00000000000..1bd6a3e531a --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include + +#include "iw_cxgb4.h" + +static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, + struct c4iw_qp *qhp, + struct t4_cqe *err_cqe, + enum ib_event_type ib_event) +{ + struct ib_event event; + struct c4iw_qp_attributes attrs; + + if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || + (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { + PDBG("%s AE received after RTS - " + "qp state %d qpid 0x%x status 0x%x\n", __func__, + qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); + return; + } + + printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " + "type %d wrid.hi 0x%x wrid.lo 0x%x\n", __func__, + CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), + CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), + CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); + + if (qhp->attr.state == C4IW_QP_STATE_RTS) { + attrs.next_state = C4IW_QP_STATE_TERMINATE; + c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, + &attrs, 0); + } + + event.event = ib_event; + event.device = chp->ibcq.device; + if (ib_event == IB_EVENT_CQ_ERR) + event.element.cq = &chp->ibcq; + else + event.element.qp = &qhp->ibqp; + if (qhp->ibqp.event_handler) + (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); + + (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); +} + +void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) +{ + struct c4iw_cq *chp; + struct c4iw_qp *qhp; + u32 cqid; + + spin_lock(&dev->lock); + qhp = get_qhp(dev, CQE_QPID(err_cqe)); + if (!qhp) { + printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " + "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", + CQE_QPID(err_cqe), + CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), + CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), + CQE_WRID_LOW(err_cqe)); + spin_unlock(&dev->lock); + goto out; + } + + if (SQ_TYPE(err_cqe)) + cqid = qhp->attr.scq; + else + cqid = qhp->attr.rcq; + chp = get_chp(dev, cqid); + if (!chp) { + printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " + "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", + cqid, CQE_QPID(err_cqe), + CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), + CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), + CQE_WRID_LOW(err_cqe)); + spin_unlock(&dev->lock); + goto out; + } + + c4iw_qp_add_ref(&qhp->ibqp); + atomic_inc(&chp->refcnt); + spin_unlock(&dev->lock); + + /* Bad incoming write */ + if (RQ_TYPE(err_cqe) && + (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) { + post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR); + goto done; + } + + switch (CQE_STATUS(err_cqe)) { + + /* Completion Events */ + case T4_ERR_SUCCESS: + printk(KERN_ERR MOD "AE with status 0!\n"); + break; + + case T4_ERR_STAG: + case T4_ERR_PDID: + case T4_ERR_QPID: + case T4_ERR_ACCESS: + case T4_ERR_WRAP: + case T4_ERR_BOUND: + case T4_ERR_INVALIDATE_SHARED_MR: + case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: + post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR); + break; + + /* Device Fatal Errors */ + case T4_ERR_ECC: + case T4_ERR_ECC_PSTAG: + case T4_ERR_INTERNAL_ERR: + post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL); + break; + + /* QP Fatal Errors */ + case T4_ERR_OUT_OF_RQE: + case T4_ERR_PBL_ADDR_BOUND: + case T4_ERR_CRC: + case T4_ERR_MARKER: + case T4_ERR_PDU_LEN_ERR: + case T4_ERR_DDP_VERSION: + case T4_ERR_RDMA_VERSION: + case T4_ERR_OPCODE: + case T4_ERR_DDP_QUEUE_NUM: + case T4_ERR_MSN: + case T4_ERR_TBIT: + case T4_ERR_MO: + case T4_ERR_MSN_GAP: + case T4_ERR_MSN_RANGE: + case T4_ERR_RQE_ADDR_BOUND: + case T4_ERR_IRD_OVERFLOW: + post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); + break; + + default: + printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n", + CQE_STATUS(err_cqe), qhp->wq.sq.qid); + post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); + break; + } +done: + if (atomic_dec_and_test(&chp->refcnt)) + wake_up(&chp->wait); + c4iw_qp_rem_ref(&qhp->ibqp); +out: + return; +} + +int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) +{ + struct c4iw_cq *chp; + + chp = get_chp(dev, qid); + if (chp) + (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); + else + PDBG("%s unknown cqid 0x%x\n", __func__, qid); + return 0; +} diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h new file mode 100644 index 00000000000..ccce6fe7570 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -0,0 +1,743 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __IW_CXGB4_H__ +#define __IW_CXGB4_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "l2t.h" +#include "user.h" + +#define DRV_NAME "iw_cxgb4" +#define MOD DRV_NAME ":" + +extern int c4iw_debug; +#define PDBG(fmt, args...) \ +do { \ + if (c4iw_debug) \ + printk(MOD fmt, ## args); \ +} while (0) + +#include "t4.h" + +#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start) +#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start) + +static inline void *cplhdr(struct sk_buff *skb) +{ + return skb->data; +} + +#define C4IW_WR_TO (10*HZ) + +struct c4iw_wr_wait { + wait_queue_head_t wait; + int done; + int ret; +}; + +static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) +{ + wr_waitp->ret = 0; + wr_waitp->done = 0; + init_waitqueue_head(&wr_waitp->wait); +} + +struct c4iw_resource { + struct kfifo tpt_fifo; + spinlock_t tpt_fifo_lock; + struct kfifo qid_fifo; + spinlock_t qid_fifo_lock; + struct kfifo pdid_fifo; + spinlock_t pdid_fifo_lock; +}; + +struct c4iw_qid_list { + struct list_head entry; + u32 qid; +}; + +struct c4iw_dev_ucontext { + struct list_head qpids; + struct list_head cqids; + struct mutex lock; +}; + +enum c4iw_rdev_flags { + T4_FATAL_ERROR = (1<<0), +}; + +struct c4iw_rdev { + struct c4iw_resource resource; + unsigned long qpshift; + u32 qpmask; + unsigned long cqshift; + u32 cqmask; + struct c4iw_dev_ucontext uctx; + struct gen_pool *pbl_pool; + struct gen_pool *rqt_pool; + u32 flags; + struct cxgb4_lld_info lldi; +}; + +static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) +{ + return rdev->flags & T4_FATAL_ERROR; +} + +static inline int c4iw_num_stags(struct c4iw_rdev *rdev) +{ + return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); +} + +struct c4iw_dev { + struct ib_device ibdev; + struct c4iw_rdev rdev; + u32 device_cap_flags; + struct idr cqidr; + struct idr qpidr; + struct idr mmidr; + spinlock_t lock; + struct list_head entry; + struct delayed_work db_drop_task; + struct dentry *debugfs_root; +}; + +static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct c4iw_dev, ibdev); +} + +static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev) +{ + return container_of(rdev, struct c4iw_dev, rdev); +} + +static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) +{ + return idr_find(&rhp->cqidr, cqid); +} + +static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) +{ + return idr_find(&rhp->qpidr, qpid); +} + +static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) +{ + return idr_find(&rhp->mmidr, mmid); +} + +static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, + void *handle, u32 id) +{ + int ret; + int newid; + + do { + if (!idr_pre_get(idr, GFP_KERNEL)) + return -ENOMEM; + spin_lock_irq(&rhp->lock); + ret = idr_get_new_above(idr, handle, id, &newid); + BUG_ON(newid != id); + spin_unlock_irq(&rhp->lock); + } while (ret == -EAGAIN); + + return ret; +} + +static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) +{ + spin_lock_irq(&rhp->lock); + idr_remove(idr, id); + spin_unlock_irq(&rhp->lock); +} + +struct c4iw_pd { + struct ib_pd ibpd; + u32 pdid; + struct c4iw_dev *rhp; +}; + +static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct c4iw_pd, ibpd); +} + +struct tpt_attributes { + u64 len; + u64 va_fbo; + enum fw_ri_mem_perms perms; + u32 stag; + u32 pdid; + u32 qpid; + u32 pbl_addr; + u32 pbl_size; + u32 state:1; + u32 type:2; + u32 rsvd:1; + u32 remote_invaliate_disable:1; + u32 zbva:1; + u32 mw_bind_enable:1; + u32 page_size:5; +}; + +struct c4iw_mr { + struct ib_mr ibmr; + struct ib_umem *umem; + struct c4iw_dev *rhp; + u64 kva; + struct tpt_attributes attr; +}; + +static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct c4iw_mr, ibmr); +} + +struct c4iw_mw { + struct ib_mw ibmw; + struct c4iw_dev *rhp; + u64 kva; + struct tpt_attributes attr; +}; + +static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct c4iw_mw, ibmw); +} + +struct c4iw_fr_page_list { + struct ib_fast_reg_page_list ibpl; + DECLARE_PCI_UNMAP_ADDR(mapping); + dma_addr_t dma_addr; + struct c4iw_dev *dev; + int size; +}; + +static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( + struct ib_fast_reg_page_list *ibpl) +{ + return container_of(ibpl, struct c4iw_fr_page_list, ibpl); +} + +struct c4iw_cq { + struct ib_cq ibcq; + struct c4iw_dev *rhp; + struct t4_cq cq; + spinlock_t lock; + atomic_t refcnt; + wait_queue_head_t wait; +}; + +static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct c4iw_cq, ibcq); +} + +struct c4iw_mpa_attributes { + u8 initiator; + u8 recv_marker_enabled; + u8 xmit_marker_enabled; + u8 crc_enabled; + u8 version; + u8 p2p_type; +}; + +struct c4iw_qp_attributes { + u32 scq; + u32 rcq; + u32 sq_num_entries; + u32 rq_num_entries; + u32 sq_max_sges; + u32 sq_max_sges_rdma_write; + u32 rq_max_sges; + u32 state; + u8 enable_rdma_read; + u8 enable_rdma_write; + u8 enable_bind; + u8 enable_mmid0_fastreg; + u32 max_ord; + u32 max_ird; + u32 pd; + u32 next_state; + char terminate_buffer[52]; + u32 terminate_msg_len; + u8 is_terminate_local; + struct c4iw_mpa_attributes mpa_attr; + struct c4iw_ep *llp_stream_handle; +}; + +struct c4iw_qp { + struct ib_qp ibqp; + struct c4iw_dev *rhp; + struct c4iw_ep *ep; + struct c4iw_qp_attributes attr; + struct t4_wq wq; + spinlock_t lock; + atomic_t refcnt; + wait_queue_head_t wait; + struct timer_list timer; +}; + +static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct c4iw_qp, ibqp); +} + +struct c4iw_ucontext { + struct ib_ucontext ibucontext; + struct c4iw_dev_ucontext uctx; + u32 key; + spinlock_t mmap_lock; + struct list_head mmaps; +}; + +static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) +{ + return container_of(c, struct c4iw_ucontext, ibucontext); +} + +struct c4iw_mm_entry { + struct list_head entry; + u64 addr; + u32 key; + unsigned len; +}; + +static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, + u32 key, unsigned len) +{ + struct list_head *pos, *nxt; + struct c4iw_mm_entry *mm; + + spin_lock(&ucontext->mmap_lock); + list_for_each_safe(pos, nxt, &ucontext->mmaps) { + + mm = list_entry(pos, struct c4iw_mm_entry, entry); + if (mm->key == key && mm->len == len) { + list_del_init(&mm->entry); + spin_unlock(&ucontext->mmap_lock); + PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__, + key, (unsigned long long) mm->addr, mm->len); + return mm; + } + } + spin_unlock(&ucontext->mmap_lock); + return NULL; +} + +static inline void insert_mmap(struct c4iw_ucontext *ucontext, + struct c4iw_mm_entry *mm) +{ + spin_lock(&ucontext->mmap_lock); + PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__, + mm->key, (unsigned long long) mm->addr, mm->len); + list_add_tail(&mm->entry, &ucontext->mmaps); + spin_unlock(&ucontext->mmap_lock); +} + +enum c4iw_qp_attr_mask { + C4IW_QP_ATTR_NEXT_STATE = 1 << 0, + C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, + C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, + C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, + C4IW_QP_ATTR_MAX_ORD = 1 << 11, + C4IW_QP_ATTR_MAX_IRD = 1 << 12, + C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22, + C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23, + C4IW_QP_ATTR_MPA_ATTR = 1 << 24, + C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25, + C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ | + C4IW_QP_ATTR_ENABLE_RDMA_WRITE | + C4IW_QP_ATTR_MAX_ORD | + C4IW_QP_ATTR_MAX_IRD | + C4IW_QP_ATTR_LLP_STREAM_HANDLE | + C4IW_QP_ATTR_STREAM_MSG_BUFFER | + C4IW_QP_ATTR_MPA_ATTR | + C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE) +}; + +int c4iw_modify_qp(struct c4iw_dev *rhp, + struct c4iw_qp *qhp, + enum c4iw_qp_attr_mask mask, + struct c4iw_qp_attributes *attrs, + int internal); + +enum c4iw_qp_state { + C4IW_QP_STATE_IDLE, + C4IW_QP_STATE_RTS, + C4IW_QP_STATE_ERROR, + C4IW_QP_STATE_TERMINATE, + C4IW_QP_STATE_CLOSING, + C4IW_QP_STATE_TOT +}; + +static inline int c4iw_convert_state(enum ib_qp_state ib_state) +{ + switch (ib_state) { + case IB_QPS_RESET: + case IB_QPS_INIT: + return C4IW_QP_STATE_IDLE; + case IB_QPS_RTS: + return C4IW_QP_STATE_RTS; + case IB_QPS_SQD: + return C4IW_QP_STATE_CLOSING; + case IB_QPS_SQE: + return C4IW_QP_STATE_TERMINATE; + case IB_QPS_ERR: + return C4IW_QP_STATE_ERROR; + default: + return -1; + } +} + +static inline u32 c4iw_ib_to_tpt_access(int a) +{ + return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | + (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) | + (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) | + FW_RI_MEM_ACCESS_LOCAL_READ; +} + +static inline u32 c4iw_ib_to_tpt_bind_access(int acc) +{ + return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | + (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0); +} + +enum c4iw_mmid_state { + C4IW_STAG_STATE_VALID, + C4IW_STAG_STATE_INVALID +}; + +#define C4IW_NODE_DESC "cxgb4 Chelsio Communications" + +#define MPA_KEY_REQ "MPA ID Req Frame" +#define MPA_KEY_REP "MPA ID Rep Frame" + +#define MPA_MAX_PRIVATE_DATA 256 +#define MPA_REJECT 0x20 +#define MPA_CRC 0x40 +#define MPA_MARKERS 0x80 +#define MPA_FLAGS_MASK 0xE0 + +#define c4iw_put_ep(ep) { \ + PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ + ep, atomic_read(&((ep)->kref.refcount))); \ + WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \ + kref_put(&((ep)->kref), _c4iw_free_ep); \ +} + +#define c4iw_get_ep(ep) { \ + PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \ + ep, atomic_read(&((ep)->kref.refcount))); \ + kref_get(&((ep)->kref)); \ +} +void _c4iw_free_ep(struct kref *kref); + +struct mpa_message { + u8 key[16]; + u8 flags; + u8 revision; + __be16 private_data_size; + u8 private_data[0]; +}; + +struct terminate_message { + u8 layer_etype; + u8 ecode; + __be16 hdrct_rsvd; + u8 len_hdrs[0]; +}; + +#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) + +enum c4iw_layers_types { + LAYER_RDMAP = 0x00, + LAYER_DDP = 0x10, + LAYER_MPA = 0x20, + RDMAP_LOCAL_CATA = 0x00, + RDMAP_REMOTE_PROT = 0x01, + RDMAP_REMOTE_OP = 0x02, + DDP_LOCAL_CATA = 0x00, + DDP_TAGGED_ERR = 0x01, + DDP_UNTAGGED_ERR = 0x02, + DDP_LLP = 0x03 +}; + +enum c4iw_rdma_ecodes { + RDMAP_INV_STAG = 0x00, + RDMAP_BASE_BOUNDS = 0x01, + RDMAP_ACC_VIOL = 0x02, + RDMAP_STAG_NOT_ASSOC = 0x03, + RDMAP_TO_WRAP = 0x04, + RDMAP_INV_VERS = 0x05, + RDMAP_INV_OPCODE = 0x06, + RDMAP_STREAM_CATA = 0x07, + RDMAP_GLOBAL_CATA = 0x08, + RDMAP_CANT_INV_STAG = 0x09, + RDMAP_UNSPECIFIED = 0xff +}; + +enum c4iw_ddp_ecodes { + DDPT_INV_STAG = 0x00, + DDPT_BASE_BOUNDS = 0x01, + DDPT_STAG_NOT_ASSOC = 0x02, + DDPT_TO_WRAP = 0x03, + DDPT_INV_VERS = 0x04, + DDPU_INV_QN = 0x01, + DDPU_INV_MSN_NOBUF = 0x02, + DDPU_INV_MSN_RANGE = 0x03, + DDPU_INV_MO = 0x04, + DDPU_MSG_TOOBIG = 0x05, + DDPU_INV_VERS = 0x06 +}; + +enum c4iw_mpa_ecodes { + MPA_CRC_ERR = 0x02, + MPA_MARKER_ERR = 0x03 +}; + +enum c4iw_ep_state { + IDLE = 0, + LISTEN, + CONNECTING, + MPA_REQ_WAIT, + MPA_REQ_SENT, + MPA_REQ_RCVD, + MPA_REP_SENT, + FPDU_MODE, + ABORTING, + CLOSING, + MORIBUND, + DEAD, +}; + +enum c4iw_ep_flags { + PEER_ABORT_IN_PROGRESS = 0, + ABORT_REQ_IN_PROGRESS = 1, + RELEASE_RESOURCES = 2, + CLOSE_SENT = 3, +}; + +struct c4iw_ep_common { + struct iw_cm_id *cm_id; + struct c4iw_qp *qp; + struct c4iw_dev *dev; + enum c4iw_ep_state state; + struct kref kref; + spinlock_t lock; + struct sockaddr_in local_addr; + struct sockaddr_in remote_addr; + wait_queue_head_t waitq; + int rpl_done; + int rpl_err; + unsigned long flags; +}; + +struct c4iw_listen_ep { + struct c4iw_ep_common com; + unsigned int stid; + int backlog; +}; + +struct c4iw_ep { + struct c4iw_ep_common com; + struct c4iw_ep *parent_ep; + struct timer_list timer; + unsigned int atid; + u32 hwtid; + u32 snd_seq; + u32 rcv_seq; + struct l2t_entry *l2t; + struct dst_entry *dst; + struct sk_buff *mpa_skb; + struct c4iw_mpa_attributes mpa_attr; + u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA]; + unsigned int mpa_pkt_len; + u32 ird; + u32 ord; + u32 smac_idx; + u32 tx_chan; + u32 mtu; + u16 mss; + u16 emss; + u16 plen; + u16 rss_qid; + u16 txq_idx; + u8 tos; +}; + +static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) +{ + return cm_id->provider_data; +} + +static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) +{ + return cm_id->provider_data; +} + +static inline int compute_wscale(int win) +{ + int wscale = 0; + + while (wscale < 14 && (65535< +#include + +#include "iw_cxgb4.h" + +#define T4_ULPTX_MIN_IO 32 +#define C4IW_MAX_INLINE_SIZE 96 + +static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, + void *data) +{ + struct sk_buff *skb; + struct ulp_mem_io *req; + struct ulptx_idata *sc; + u8 wr_len, *to_dp, *from_dp; + int copy_len, num_wqe, i, ret = 0; + struct c4iw_wr_wait wr_wait; + + addr &= 0x7FFFFFF; + PDBG("%s addr 0x%x len %u\n", __func__, addr, len); + num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); + c4iw_init_wr_wait(&wr_wait); + for (i = 0; i < num_wqe; i++) { + + copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE : + len; + wr_len = roundup(sizeof *req + sizeof *sc + + roundup(copy_len, T4_ULPTX_MIN_IO), 16); + + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + + req = (struct ulp_mem_io *)__skb_put(skb, wr_len); + memset(req, 0, wr_len); + INIT_ULPTX_WR(req, wr_len, 0, 0); + + if (i == (num_wqe-1)) { + req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | + FW_WR_COMPL(1)); + req->wr.wr_lo = (__force __be64)&wr_wait; + } else + req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR)); + req->wr.wr_mid = cpu_to_be32( + FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); + + req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23)); + req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( + DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); + req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), + 16)); + req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3)); + + sc = (struct ulptx_idata *)(req + 1); + sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM)); + sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); + + to_dp = (u8 *)(sc + 1); + from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; + if (data) + memcpy(to_dp, from_dp, copy_len); + else + memset(to_dp, 0, copy_len); + if (copy_len % T4_ULPTX_MIN_IO) + memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - + (copy_len % T4_ULPTX_MIN_IO)); + ret = c4iw_ofld_send(rdev, skb); + if (ret) + return ret; + len -= C4IW_MAX_INLINE_SIZE; + } + + wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); + if (!wr_wait.done) { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(rdev->lldi.pdev)); + rdev->flags = T4_FATAL_ERROR; + ret = -EIO; + } else + ret = wr_wait.ret; + return ret; +} + +/* + * Build and write a TPT entry. + * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, + * pbl_size and pbl_addr + * OUT: stag index + */ +static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, + u32 *stag, u8 stag_state, u32 pdid, + enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, + int bind_enabled, u32 zbva, u64 to, + u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) +{ + int err; + struct fw_ri_tpte tpt; + u32 stag_idx; + static atomic_t key; + + if (c4iw_fatal_error(rdev)) + return -EIO; + + stag_state = stag_state > 0; + stag_idx = (*stag) >> 8; + + if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { + stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo, + &rdev->resource.tpt_fifo_lock); + if (!stag_idx) + return -ENOMEM; + *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); + } + PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", + __func__, stag_state, type, pdid, stag_idx); + + /* write TPT entry */ + if (reset_tpt_entry) + memset(&tpt, 0, sizeof(tpt)); + else { + tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | + V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | + V_FW_RI_TPTE_STAGSTATE(stag_state) | + V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); + tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | + (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | + V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : + FW_RI_VA_BASED_TO))| + V_FW_RI_TPTE_PS(page_size)); + tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( + V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); + tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); + tpt.va_hi = cpu_to_be32((u32)(to >> 32)); + tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); + tpt.dca_mwbcnt_pstag = cpu_to_be32(0); + tpt.len_hi = cpu_to_be32((u32)(len >> 32)); + } + err = write_adapter_mem(rdev, stag_idx + + (rdev->lldi.vr->stag.start >> 5), + sizeof(tpt), &tpt); + + if (reset_tpt_entry) + c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx, + &rdev->resource.tpt_fifo_lock); + return err; +} + +static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, + u32 pbl_addr, u32 pbl_size) +{ + int err; + + PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", + __func__, pbl_addr, rdev->lldi.vr->pbl.start, + pbl_size); + + err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); + return err; +} + +static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, + u32 pbl_addr) +{ + return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, + pbl_size, pbl_addr); +} + +static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) +{ + *stag = T4_STAG_UNSET; + return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, + 0UL, 0, 0, 0, 0); +} + +static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) +{ + return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, + 0); +} + +static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, + u32 pbl_size, u32 pbl_addr) +{ + *stag = T4_STAG_UNSET; + return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, + 0UL, 0, 0, pbl_size, pbl_addr); +} + +static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) +{ + u32 mmid; + + mhp->attr.state = 1; + mhp->attr.stag = stag; + mmid = stag >> 8; + mhp->ibmr.rkey = mhp->ibmr.lkey = stag; + PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); + return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); +} + +static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, + struct c4iw_mr *mhp, int shift) +{ + u32 stag = T4_STAG_UNSET; + int ret; + + ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, + FW_RI_STAG_NSMR, mhp->attr.perms, + mhp->attr.mw_bind_enable, mhp->attr.zbva, + mhp->attr.va_fbo, mhp->attr.len, shift - 12, + mhp->attr.pbl_size, mhp->attr.pbl_addr); + if (ret) + return ret; + + ret = finish_mem_reg(mhp, stag); + if (ret) + dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, + mhp->attr.pbl_addr); + return ret; +} + +static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, + struct c4iw_mr *mhp, int shift, int npages) +{ + u32 stag; + int ret; + + if (npages > mhp->attr.pbl_size) + return -ENOMEM; + + stag = mhp->attr.stag; + ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, + FW_RI_STAG_NSMR, mhp->attr.perms, + mhp->attr.mw_bind_enable, mhp->attr.zbva, + mhp->attr.va_fbo, mhp->attr.len, shift - 12, + mhp->attr.pbl_size, mhp->attr.pbl_addr); + if (ret) + return ret; + + ret = finish_mem_reg(mhp, stag); + if (ret) + dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, + mhp->attr.pbl_addr); + + return ret; +} + +static int alloc_pbl(struct c4iw_mr *mhp, int npages) +{ + mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, + npages << 3); + + if (!mhp->attr.pbl_addr) + return -ENOMEM; + + mhp->attr.pbl_size = npages; + + return 0; +} + +static int build_phys_page_list(struct ib_phys_buf *buffer_list, + int num_phys_buf, u64 *iova_start, + u64 *total_size, int *npages, + int *shift, __be64 **page_list) +{ + u64 mask; + int i, j, n; + + mask = 0; + *total_size = 0; + for (i = 0; i < num_phys_buf; ++i) { + if (i != 0 && buffer_list[i].addr & ~PAGE_MASK) + return -EINVAL; + if (i != 0 && i != num_phys_buf - 1 && + (buffer_list[i].size & ~PAGE_MASK)) + return -EINVAL; + *total_size += buffer_list[i].size; + if (i > 0) + mask |= buffer_list[i].addr; + else + mask |= buffer_list[i].addr & PAGE_MASK; + if (i != num_phys_buf - 1) + mask |= buffer_list[i].addr + buffer_list[i].size; + else + mask |= (buffer_list[i].addr + buffer_list[i].size + + PAGE_SIZE - 1) & PAGE_MASK; + } + + if (*total_size > 0xFFFFFFFFULL) + return -ENOMEM; + + /* Find largest page shift we can use to cover buffers */ + for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) + if ((1ULL << *shift) & mask) + break; + + buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); + buffer_list[0].addr &= ~0ull << *shift; + + *npages = 0; + for (i = 0; i < num_phys_buf; ++i) + *npages += (buffer_list[i].size + + (1ULL << *shift) - 1) >> *shift; + + if (!*npages) + return -EINVAL; + + *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); + if (!*page_list) + return -ENOMEM; + + n = 0; + for (i = 0; i < num_phys_buf; ++i) + for (j = 0; + j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift; + ++j) + (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + + ((u64) j << *shift)); + + PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n", + __func__, (unsigned long long)*iova_start, + (unsigned long long)mask, *shift, (unsigned long long)*total_size, + *npages); + + return 0; + +} + +int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, + struct ib_pd *pd, struct ib_phys_buf *buffer_list, + int num_phys_buf, int acc, u64 *iova_start) +{ + + struct c4iw_mr mh, *mhp; + struct c4iw_pd *php; + struct c4iw_dev *rhp; + __be64 *page_list = NULL; + int shift = 0; + u64 total_size; + int npages; + int ret; + + PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); + + /* There can be no memory windows */ + if (atomic_read(&mr->usecnt)) + return -EINVAL; + + mhp = to_c4iw_mr(mr); + rhp = mhp->rhp; + php = to_c4iw_pd(mr->pd); + + /* make sure we are on the same adapter */ + if (rhp != php->rhp) + return -EINVAL; + + memcpy(&mh, mhp, sizeof *mhp); + + if (mr_rereg_mask & IB_MR_REREG_PD) + php = to_c4iw_pd(pd); + if (mr_rereg_mask & IB_MR_REREG_ACCESS) { + mh.attr.perms = c4iw_ib_to_tpt_access(acc); + mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) == + IB_ACCESS_MW_BIND; + } + if (mr_rereg_mask & IB_MR_REREG_TRANS) { + ret = build_phys_page_list(buffer_list, num_phys_buf, + iova_start, + &total_size, &npages, + &shift, &page_list); + if (ret) + return ret; + } + + ret = reregister_mem(rhp, php, &mh, shift, npages); + kfree(page_list); + if (ret) + return ret; + if (mr_rereg_mask & IB_MR_REREG_PD) + mhp->attr.pdid = php->pdid; + if (mr_rereg_mask & IB_MR_REREG_ACCESS) + mhp->attr.perms = c4iw_ib_to_tpt_access(acc); + if (mr_rereg_mask & IB_MR_REREG_TRANS) { + mhp->attr.zbva = 0; + mhp->attr.va_fbo = *iova_start; + mhp->attr.page_size = shift - 12; + mhp->attr.len = (u32) total_size; + mhp->attr.pbl_size = npages; + } + + return 0; +} + +struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd, + struct ib_phys_buf *buffer_list, + int num_phys_buf, int acc, u64 *iova_start) +{ + __be64 *page_list; + int shift; + u64 total_size; + int npages; + struct c4iw_dev *rhp; + struct c4iw_pd *php; + struct c4iw_mr *mhp; + int ret; + + PDBG("%s ib_pd %p\n", __func__, pd); + php = to_c4iw_pd(pd); + rhp = php->rhp; + + mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); + if (!mhp) + return ERR_PTR(-ENOMEM); + + mhp->rhp = rhp; + + /* First check that we have enough alignment */ + if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { + ret = -EINVAL; + goto err; + } + + if (num_phys_buf > 1 && + ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { + ret = -EINVAL; + goto err; + } + + ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, + &total_size, &npages, &shift, + &page_list); + if (ret) + goto err; + + ret = alloc_pbl(mhp, npages); + if (ret) { + kfree(page_list); + goto err_pbl; + } + + ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr, + npages); + kfree(page_list); + if (ret) + goto err_pbl; + + mhp->attr.pdid = php->pdid; + mhp->attr.zbva = 0; + + mhp->attr.perms = c4iw_ib_to_tpt_access(acc); + mhp->attr.va_fbo = *iova_start; + mhp->attr.page_size = shift - 12; + + mhp->attr.len = (u32) total_size; + mhp->attr.pbl_size = npages; + ret = register_mem(rhp, php, mhp, shift); + if (ret) + goto err_pbl; + + return &mhp->ibmr; + +err_pbl: + c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, + mhp->attr.pbl_size << 3); + +err: + kfree(mhp); + return ERR_PTR(ret); + +} + +struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) +{ + struct c4iw_dev *rhp; + struct c4iw_pd *php; + struct c4iw_mr *mhp; + int ret; + u32 stag = T4_STAG_UNSET; + + PDBG("%s ib_pd %p\n", __func__, pd); + php = to_c4iw_pd(pd); + rhp = php->rhp; + + mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); + if (!mhp) + return ERR_PTR(-ENOMEM); + + mhp->rhp = rhp; + mhp->attr.pdid = php->pdid; + mhp->attr.perms = c4iw_ib_to_tpt_access(acc); + mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; + mhp->attr.zbva = 0; + mhp->attr.va_fbo = 0; + mhp->attr.page_size = 0; + mhp->attr.len = ~0UL; + mhp->attr.pbl_size = 0; + + ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, + FW_RI_STAG_NSMR, mhp->attr.perms, + mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); + if (ret) + goto err1; + + ret = finish_mem_reg(mhp, stag); + if (ret) + goto err2; + return &mhp->ibmr; +err2: + dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, + mhp->attr.pbl_addr); +err1: + kfree(mhp); + return ERR_PTR(ret); +} + +struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + u64 virt, int acc, struct ib_udata *udata) +{ + __be64 *pages; + int shift, n, len; + int i, j, k; + int err = 0; + struct ib_umem_chunk *chunk; + struct c4iw_dev *rhp; + struct c4iw_pd *php; + struct c4iw_mr *mhp; + + PDBG("%s ib_pd %p\n", __func__, pd); + + if (length == ~0ULL) + return ERR_PTR(-EINVAL); + + if ((length + start) < start) + return ERR_PTR(-EINVAL); + + php = to_c4iw_pd(pd); + rhp = php->rhp; + mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); + if (!mhp) + return ERR_PTR(-ENOMEM); + + mhp->rhp = rhp; + + mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); + if (IS_ERR(mhp->umem)) { + err = PTR_ERR(mhp->umem); + kfree(mhp); + return ERR_PTR(err); + } + + shift = ffs(mhp->umem->page_size) - 1; + + n = 0; + list_for_each_entry(chunk, &mhp->umem->chunk_list, list) + n += chunk->nents; + + err = alloc_pbl(mhp, n); + if (err) + goto err; + + pages = (__be64 *) __get_free_page(GFP_KERNEL); + if (!pages) { + err = -ENOMEM; + goto err_pbl; + } + + i = n = 0; + + list_for_each_entry(chunk, &mhp->umem->chunk_list, list) + for (j = 0; j < chunk->nmap; ++j) { + len = sg_dma_len(&chunk->page_list[j]) >> shift; + for (k = 0; k < len; ++k) { + pages[i++] = cpu_to_be64(sg_dma_address( + &chunk->page_list[j]) + + mhp->umem->page_size * k); + if (i == PAGE_SIZE / sizeof *pages) { + err = write_pbl(&mhp->rhp->rdev, + pages, + mhp->attr.pbl_addr + (n << 3), i); + if (err) + goto pbl_done; + n += i; + i = 0; + } + } + } + + if (i) + err = write_pbl(&mhp->rhp->rdev, pages, + mhp->attr.pbl_addr + (n << 3), i); + +pbl_done: + free_page((unsigned long) pages); + if (err) + goto err_pbl; + + mhp->attr.pdid = php->pdid; + mhp->attr.zbva = 0; + mhp->attr.perms = c4iw_ib_to_tpt_access(acc); + mhp->attr.va_fbo = virt; + mhp->attr.page_size = shift - 12; + mhp->attr.len = (u32) length; + + err = register_mem(rhp, php, mhp, shift); + if (err) + goto err_pbl; + + return &mhp->ibmr; + +err_pbl: + c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, + mhp->attr.pbl_size << 3); + +err: + ib_umem_release(mhp->umem); + kfree(mhp); + return ERR_PTR(err); +} + +struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd) +{ + struct c4iw_dev *rhp; + struct c4iw_pd *php; + struct c4iw_mw *mhp; + u32 mmid; + u32 stag = 0; + int ret; + + php = to_c4iw_pd(pd); + rhp = php->rhp; + mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); + if (!mhp) + return ERR_PTR(-ENOMEM); + ret = allocate_window(&rhp->rdev, &stag, php->pdid); + if (ret) { + kfree(mhp); + return ERR_PTR(ret); + } + mhp->rhp = rhp; + mhp->attr.pdid = php->pdid; + mhp->attr.type = FW_RI_STAG_MW; + mhp->attr.stag = stag; + mmid = (stag) >> 8; + mhp->ibmw.rkey = stag; + if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { + deallocate_window(&rhp->rdev, mhp->attr.stag); + kfree(mhp); + return ERR_PTR(-ENOMEM); + } + PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); + return &(mhp->ibmw); +} + +int c4iw_dealloc_mw(struct ib_mw *mw) +{ + struct c4iw_dev *rhp; + struct c4iw_mw *mhp; + u32 mmid; + + mhp = to_c4iw_mw(mw); + rhp = mhp->rhp; + mmid = (mw->rkey) >> 8; + deallocate_window(&rhp->rdev, mhp->attr.stag); + remove_handle(rhp, &rhp->mmidr, mmid); + kfree(mhp); + PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); + return 0; +} + +struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) +{ + struct c4iw_dev *rhp; + struct c4iw_pd *php; + struct c4iw_mr *mhp; + u32 mmid; + u32 stag = 0; + int ret = 0; + + php = to_c4iw_pd(pd); + rhp = php->rhp; + mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); + if (!mhp) + goto err; + + mhp->rhp = rhp; + ret = alloc_pbl(mhp, pbl_depth); + if (ret) + goto err1; + mhp->attr.pbl_size = pbl_depth; + ret = allocate_stag(&rhp->rdev, &stag, php->pdid, + mhp->attr.pbl_size, mhp->attr.pbl_addr); + if (ret) + goto err2; + mhp->attr.pdid = php->pdid; + mhp->attr.type = FW_RI_STAG_NSMR; + mhp->attr.stag = stag; + mhp->attr.state = 1; + mmid = (stag) >> 8; + mhp->ibmr.rkey = mhp->ibmr.lkey = stag; + if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) + goto err3; + + PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); + return &(mhp->ibmr); +err3: + dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, + mhp->attr.pbl_addr); +err2: + c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, + mhp->attr.pbl_size << 3); +err1: + kfree(mhp); +err: + return ERR_PTR(ret); +} + +struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, + int page_list_len) +{ + struct c4iw_fr_page_list *c4pl; + struct c4iw_dev *dev = to_c4iw_dev(device); + dma_addr_t dma_addr; + int size = sizeof *c4pl + page_list_len * sizeof(u64); + + if (page_list_len > T4_MAX_FR_DEPTH) + return ERR_PTR(-EINVAL); + + c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, + &dma_addr, GFP_KERNEL); + if (!c4pl) + return ERR_PTR(-ENOMEM); + + pci_unmap_addr_set(c4pl, mapping, dma_addr); + c4pl->dma_addr = dma_addr; + c4pl->dev = dev; + c4pl->size = size; + c4pl->ibpl.page_list = (u64 *)(c4pl + 1); + c4pl->ibpl.max_page_list_len = page_list_len; + + return &c4pl->ibpl; +} + +void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl) +{ + struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); + + dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, + c4pl, pci_unmap_addr(c4pl, mapping)); +} + +int c4iw_dereg_mr(struct ib_mr *ib_mr) +{ + struct c4iw_dev *rhp; + struct c4iw_mr *mhp; + u32 mmid; + + PDBG("%s ib_mr %p\n", __func__, ib_mr); + /* There can be no memory windows */ + if (atomic_read(&ib_mr->usecnt)) + return -EINVAL; + + mhp = to_c4iw_mr(ib_mr); + rhp = mhp->rhp; + mmid = mhp->attr.stag >> 8; + dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, + mhp->attr.pbl_addr); + if (mhp->attr.pbl_size) + c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, + mhp->attr.pbl_size << 3); + remove_handle(rhp, &rhp->mmidr, mmid); + if (mhp->kva) + kfree((void *) (unsigned long) mhp->kva); + if (mhp->umem) + ib_umem_release(mhp->umem); + PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp); + kfree(mhp); + return 0; +} diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c new file mode 100644 index 00000000000..3cb50af3e52 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -0,0 +1,518 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include + +#include "iw_cxgb4.h" + +static int fastreg_support; +module_param(fastreg_support, int, 0644); +MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)"); + +static int c4iw_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props) +{ + return -ENOSYS; +} + +static struct ib_ah *c4iw_ah_create(struct ib_pd *pd, + struct ib_ah_attr *ah_attr) +{ + return ERR_PTR(-ENOSYS); +} + +static int c4iw_ah_destroy(struct ib_ah *ah) +{ + return -ENOSYS; +} + +static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + return -ENOSYS; +} + +static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + return -ENOSYS; +} + +static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags, + u8 port_num, struct ib_wc *in_wc, + struct ib_grh *in_grh, struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + return -ENOSYS; +} + +static int c4iw_dealloc_ucontext(struct ib_ucontext *context) +{ + struct c4iw_dev *rhp = to_c4iw_dev(context->device); + struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); + struct c4iw_mm_entry *mm, *tmp; + + PDBG("%s context %p\n", __func__, context); + list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) + kfree(mm); + c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); + kfree(ucontext); + return 0; +} + +static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct c4iw_ucontext *context; + struct c4iw_dev *rhp = to_c4iw_dev(ibdev); + + PDBG("%s ibdev %p\n", __func__, ibdev); + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return ERR_PTR(-ENOMEM); + c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); + INIT_LIST_HEAD(&context->mmaps); + spin_lock_init(&context->mmap_lock); + return &context->ibucontext; +} + +static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + int len = vma->vm_end - vma->vm_start; + u32 key = vma->vm_pgoff << PAGE_SHIFT; + struct c4iw_rdev *rdev; + int ret = 0; + struct c4iw_mm_entry *mm; + struct c4iw_ucontext *ucontext; + u64 addr; + + PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff, + key, len); + + if (vma->vm_start & (PAGE_SIZE-1)) + return -EINVAL; + + rdev = &(to_c4iw_dev(context->device)->rdev); + ucontext = to_c4iw_ucontext(context); + + mm = remove_mmap(ucontext, key, len); + if (!mm) + return -EINVAL; + addr = mm->addr; + kfree(mm); + + if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) && + (addr < (pci_resource_start(rdev->lldi.pdev, 2) + + pci_resource_len(rdev->lldi.pdev, 2)))) { + + /* + * Map T4 DB register. + */ + if (vma->vm_flags & VM_READ) + return -EPERM; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; + vma->vm_flags &= ~VM_MAYREAD; + ret = io_remap_pfn_range(vma, vma->vm_start, + addr >> PAGE_SHIFT, + len, vma->vm_page_prot); + } else { + + /* + * Map WQ or CQ contig dma memory... + */ + ret = remap_pfn_range(vma, vma->vm_start, + addr >> PAGE_SHIFT, + len, vma->vm_page_prot); + } + + return ret; +} + +static int c4iw_deallocate_pd(struct ib_pd *pd) +{ + struct c4iw_dev *rhp; + struct c4iw_pd *php; + + php = to_c4iw_pd(pd); + rhp = php->rhp; + PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); + c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, php->pdid, + &rhp->rdev.resource.pdid_fifo_lock); + kfree(php); + return 0; +} + +static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct c4iw_pd *php; + u32 pdid; + struct c4iw_dev *rhp; + + PDBG("%s ibdev %p\n", __func__, ibdev); + rhp = (struct c4iw_dev *) ibdev; + pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_fifo, + &rhp->rdev.resource.pdid_fifo_lock); + if (!pdid) + return ERR_PTR(-EINVAL); + php = kzalloc(sizeof(*php), GFP_KERNEL); + if (!php) { + c4iw_put_resource(&rhp->rdev.resource.pdid_fifo, pdid, + &rhp->rdev.resource.pdid_fifo_lock); + return ERR_PTR(-ENOMEM); + } + php->pdid = pdid; + php->rhp = rhp; + if (context) { + if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) { + c4iw_deallocate_pd(&php->ibpd); + return ERR_PTR(-EFAULT); + } + } + PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); + return &php->ibpd; +} + +static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, + u16 *pkey) +{ + PDBG("%s ibdev %p\n", __func__, ibdev); + *pkey = 0; + return 0; +} + +static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, + union ib_gid *gid) +{ + struct c4iw_dev *dev; + + PDBG("%s ibdev %p, port %d, index %d, gid %p\n", + __func__, ibdev, port, index, gid); + dev = to_c4iw_dev(ibdev); + BUG_ON(port == 0); + memset(&(gid->raw[0]), 0, sizeof(gid->raw)); + memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6); + return 0; +} + +static int c4iw_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + + struct c4iw_dev *dev; + PDBG("%s ibdev %p\n", __func__, ibdev); + + dev = to_c4iw_dev(ibdev); + memset(props, 0, sizeof *props); + memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + props->hw_ver = dev->rdev.lldi.adapter_type; + props->fw_ver = dev->rdev.lldi.fw_vers; + props->device_cap_flags = dev->device_cap_flags; + props->page_size_cap = T4_PAGESIZE_MASK; + props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor; + props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device; + props->max_mr_size = T4_MAX_MR_SIZE; + props->max_qp = T4_MAX_NUM_QP; + props->max_qp_wr = T4_MAX_QP_DEPTH; + props->max_sge = T4_MAX_RECV_SGE; + props->max_sge_rd = 1; + props->max_qp_rd_atom = T4_MAX_READ_DEPTH; + props->max_qp_init_rd_atom = T4_MAX_READ_DEPTH; + props->max_cq = T4_MAX_NUM_CQ; + props->max_cqe = T4_MAX_CQ_DEPTH; + props->max_mr = c4iw_num_stags(&dev->rdev); + props->max_pd = T4_MAX_NUM_PD; + props->local_ca_ack_delay = 0; + props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH; + + return 0; +} + +static int c4iw_query_port(struct ib_device *ibdev, u8 port, + struct ib_port_attr *props) +{ + struct c4iw_dev *dev; + struct net_device *netdev; + struct in_device *inetdev; + + PDBG("%s ibdev %p\n", __func__, ibdev); + + dev = to_c4iw_dev(ibdev); + netdev = dev->rdev.lldi.ports[port-1]; + + memset(props, 0, sizeof(struct ib_port_attr)); + props->max_mtu = IB_MTU_4096; + if (netdev->mtu >= 4096) + props->active_mtu = IB_MTU_4096; + else if (netdev->mtu >= 2048) + props->active_mtu = IB_MTU_2048; + else if (netdev->mtu >= 1024) + props->active_mtu = IB_MTU_1024; + else if (netdev->mtu >= 512) + props->active_mtu = IB_MTU_512; + else + props->active_mtu = IB_MTU_256; + + if (!netif_carrier_ok(netdev)) + props->state = IB_PORT_DOWN; + else { + inetdev = in_dev_get(netdev); + if (inetdev) { + if (inetdev->ifa_list) + props->state = IB_PORT_ACTIVE; + else + props->state = IB_PORT_INIT; + in_dev_put(inetdev); + } else + props->state = IB_PORT_INIT; + } + + props->port_cap_flags = + IB_PORT_CM_SUP | + IB_PORT_SNMP_TUNNEL_SUP | + IB_PORT_REINIT_SUP | + IB_PORT_DEVICE_MGMT_SUP | + IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; + props->gid_tbl_len = 1; + props->pkey_tbl_len = 1; + props->active_width = 2; + props->active_speed = 2; + props->max_msg_sz = -1; + + return 0; +} + +static ssize_t show_rev(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, + ibdev.dev); + PDBG("%s dev 0x%p\n", __func__, dev); + return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type); +} + +static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, + ibdev.dev); + PDBG("%s dev 0x%p\n", __func__, dev); + + return sprintf(buf, "%u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers), + FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers), + FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers), + FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers)); +} + +static ssize_t show_hca(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, + ibdev.dev); + struct ethtool_drvinfo info; + struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0]; + + PDBG("%s dev 0x%p\n", __func__, dev); + lldev->ethtool_ops->get_drvinfo(lldev, &info); + return sprintf(buf, "%s\n", info.driver); +} + +static ssize_t show_board(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, + ibdev.dev); + PDBG("%s dev 0x%p\n", __func__, dev); + return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor, + c4iw_dev->rdev.lldi.pdev->device); +} + +static int c4iw_get_mib(struct ib_device *ibdev, + union rdma_protocol_stats *stats) +{ + return -ENOSYS; +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); +static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); + +static struct device_attribute *c4iw_class_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_fw_ver, + &dev_attr_hca_type, + &dev_attr_board_id, +}; + +int c4iw_register_device(struct c4iw_dev *dev) +{ + int ret; + int i; + + PDBG("%s c4iw_dev %p\n", __func__, dev); + BUG_ON(!dev->rdev.lldi.ports[0]); + strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX); + memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); + memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + dev->ibdev.owner = THIS_MODULE; + dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; + if (fastreg_support) + dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + dev->ibdev.local_dma_lkey = 0; + dev->ibdev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_POLL_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_POST_SEND) | + (1ull << IB_USER_VERBS_CMD_POST_RECV); + dev->ibdev.node_type = RDMA_NODE_RNIC; + memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); + dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; + dev->ibdev.num_comp_vectors = 1; + dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); + dev->ibdev.query_device = c4iw_query_device; + dev->ibdev.query_port = c4iw_query_port; + dev->ibdev.modify_port = c4iw_modify_port; + dev->ibdev.query_pkey = c4iw_query_pkey; + dev->ibdev.query_gid = c4iw_query_gid; + dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext; + dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext; + dev->ibdev.mmap = c4iw_mmap; + dev->ibdev.alloc_pd = c4iw_allocate_pd; + dev->ibdev.dealloc_pd = c4iw_deallocate_pd; + dev->ibdev.create_ah = c4iw_ah_create; + dev->ibdev.destroy_ah = c4iw_ah_destroy; + dev->ibdev.create_qp = c4iw_create_qp; + dev->ibdev.modify_qp = c4iw_ib_modify_qp; + dev->ibdev.destroy_qp = c4iw_destroy_qp; + dev->ibdev.create_cq = c4iw_create_cq; + dev->ibdev.destroy_cq = c4iw_destroy_cq; + dev->ibdev.resize_cq = c4iw_resize_cq; + dev->ibdev.poll_cq = c4iw_poll_cq; + dev->ibdev.get_dma_mr = c4iw_get_dma_mr; + dev->ibdev.reg_phys_mr = c4iw_register_phys_mem; + dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem; + dev->ibdev.reg_user_mr = c4iw_reg_user_mr; + dev->ibdev.dereg_mr = c4iw_dereg_mr; + dev->ibdev.alloc_mw = c4iw_alloc_mw; + dev->ibdev.bind_mw = c4iw_bind_mw; + dev->ibdev.dealloc_mw = c4iw_dealloc_mw; + dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr; + dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl; + dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl; + dev->ibdev.attach_mcast = c4iw_multicast_attach; + dev->ibdev.detach_mcast = c4iw_multicast_detach; + dev->ibdev.process_mad = c4iw_process_mad; + dev->ibdev.req_notify_cq = c4iw_arm_cq; + dev->ibdev.post_send = c4iw_post_send; + dev->ibdev.post_recv = c4iw_post_receive; + dev->ibdev.get_protocol_stats = c4iw_get_mib; + + dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); + if (!dev->ibdev.iwcm) + return -ENOMEM; + + dev->ibdev.iwcm->connect = c4iw_connect; + dev->ibdev.iwcm->accept = c4iw_accept_cr; + dev->ibdev.iwcm->reject = c4iw_reject_cr; + dev->ibdev.iwcm->create_listen = c4iw_create_listen; + dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen; + dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; + dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; + dev->ibdev.iwcm->get_qp = c4iw_get_qp; + + ret = ib_register_device(&dev->ibdev); + if (ret) + goto bail1; + + for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) { + ret = device_create_file(&dev->ibdev.dev, + c4iw_class_attributes[i]); + if (ret) + goto bail2; + } + return 0; +bail2: + ib_unregister_device(&dev->ibdev); +bail1: + kfree(dev->ibdev.iwcm); + return ret; +} + +void c4iw_unregister_device(struct c4iw_dev *dev) +{ + int i; + + PDBG("%s c4iw_dev %p\n", __func__, dev); + for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) + device_remove_file(&dev->ibdev.dev, + c4iw_class_attributes[i]); + ib_unregister_device(&dev->ibdev); + kfree(dev->ibdev.iwcm); + return; +} diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c new file mode 100644 index 00000000000..bd56c841ef7 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -0,0 +1,1577 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include "iw_cxgb4.h" + +static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, + struct c4iw_dev_ucontext *uctx) +{ + /* + * uP clears EQ contexts when the connection exits rdma mode, + * so no need to post a RESET WR for these EQs. + */ + dma_free_coherent(&(rdev->lldi.pdev->dev), + wq->rq.memsize, wq->rq.queue, + pci_unmap_addr(&wq->rq, mapping)); + dma_free_coherent(&(rdev->lldi.pdev->dev), + wq->sq.memsize, wq->sq.queue, + pci_unmap_addr(&wq->sq, mapping)); + c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); + kfree(wq->rq.sw_rq); + kfree(wq->sq.sw_sq); + c4iw_put_qpid(rdev, wq->rq.qid, uctx); + c4iw_put_qpid(rdev, wq->sq.qid, uctx); + return 0; +} + +static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, + struct t4_cq *rcq, struct t4_cq *scq, + struct c4iw_dev_ucontext *uctx) +{ + int user = (uctx != &rdev->uctx); + struct fw_ri_res_wr *res_wr; + struct fw_ri_res *res; + int wr_len; + struct c4iw_wr_wait wr_wait; + struct sk_buff *skb; + int ret; + int eqsize; + + wq->sq.qid = c4iw_get_qpid(rdev, uctx); + if (!wq->sq.qid) + return -ENOMEM; + + wq->rq.qid = c4iw_get_qpid(rdev, uctx); + if (!wq->rq.qid) + goto err1; + + if (!user) { + wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, + GFP_KERNEL); + if (!wq->sq.sw_sq) + goto err2; + + wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, + GFP_KERNEL); + if (!wq->rq.sw_rq) + goto err3; + } + + /* + * RQT must be a power of 2. + */ + wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); + wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); + if (!wq->rq.rqt_hwaddr) + goto err4; + + wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), + wq->sq.memsize, &(wq->sq.dma_addr), + GFP_KERNEL); + if (!wq->sq.queue) + goto err5; + memset(wq->sq.queue, 0, wq->sq.memsize); + pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); + + wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), + wq->rq.memsize, &(wq->rq.dma_addr), + GFP_KERNEL); + if (!wq->rq.queue) + goto err6; + PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", + __func__, wq->sq.queue, + (unsigned long long)virt_to_phys(wq->sq.queue), + wq->rq.queue, + (unsigned long long)virt_to_phys(wq->rq.queue)); + memset(wq->rq.queue, 0, wq->rq.memsize); + pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); + + wq->db = rdev->lldi.db_reg; + wq->gts = rdev->lldi.gts_reg; + if (user) { + wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + + (wq->sq.qid << rdev->qpshift); + wq->sq.udb &= PAGE_MASK; + wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + + (wq->rq.qid << rdev->qpshift); + wq->rq.udb &= PAGE_MASK; + } + wq->rdev = rdev; + wq->rq.msn = 1; + + /* build fw_ri_res_wr */ + wr_len = sizeof *res_wr + 2 * sizeof *res; + + skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) { + ret = -ENOMEM; + goto err7; + } + set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); + + res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); + memset(res_wr, 0, wr_len); + res_wr->op_nres = cpu_to_be32( + FW_WR_OP(FW_RI_RES_WR) | + V_FW_RI_RES_WR_NRES(2) | + FW_WR_COMPL(1)); + res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); + res_wr->cookie = (u64)&wr_wait; + res = res_wr->res; + res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; + res->u.sqrq.op = FW_RI_RES_OP_WRITE; + + /* + * eqsize is the number of 64B entries plus the status page size. + */ + eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; + + res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( + V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ + V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ + V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ + V_FW_RI_RES_WR_IQID(scq->cqid)); + res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( + V_FW_RI_RES_WR_DCAEN(0) | + V_FW_RI_RES_WR_DCACPU(0) | + V_FW_RI_RES_WR_FBMIN(3) | + V_FW_RI_RES_WR_FBMAX(3) | + V_FW_RI_RES_WR_CIDXFTHRESHO(0) | + V_FW_RI_RES_WR_CIDXFTHRESH(0) | + V_FW_RI_RES_WR_EQSIZE(eqsize)); + res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); + res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); + res++; + res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; + res->u.sqrq.op = FW_RI_RES_OP_WRITE; + + /* + * eqsize is the number of 64B entries plus the status page size. + */ + eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; + res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( + V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ + V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ + V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ + V_FW_RI_RES_WR_IQID(rcq->cqid)); + res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( + V_FW_RI_RES_WR_DCAEN(0) | + V_FW_RI_RES_WR_DCACPU(0) | + V_FW_RI_RES_WR_FBMIN(3) | + V_FW_RI_RES_WR_FBMAX(3) | + V_FW_RI_RES_WR_CIDXFTHRESHO(0) | + V_FW_RI_RES_WR_CIDXFTHRESH(0) | + V_FW_RI_RES_WR_EQSIZE(eqsize)); + res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); + res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); + + c4iw_init_wr_wait(&wr_wait); + + ret = c4iw_ofld_send(rdev, skb); + if (ret) + goto err7; + wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); + if (!wr_wait.done) { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(rdev->lldi.pdev)); + rdev->flags = T4_FATAL_ERROR; + ret = -EIO; + } else + ret = wr_wait.ret; + if (ret) + goto err7; + + PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", + __func__, wq->sq.qid, wq->rq.qid, wq->db, + (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); + + return 0; +err7: + dma_free_coherent(&(rdev->lldi.pdev->dev), + wq->rq.memsize, wq->rq.queue, + pci_unmap_addr(&wq->rq, mapping)); +err6: + dma_free_coherent(&(rdev->lldi.pdev->dev), + wq->sq.memsize, wq->sq.queue, + pci_unmap_addr(&wq->sq, mapping)); +err5: + c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); +err4: + kfree(wq->rq.sw_rq); +err3: + kfree(wq->sq.sw_sq); +err2: + c4iw_put_qpid(rdev, wq->rq.qid, uctx); +err1: + c4iw_put_qpid(rdev, wq->sq.qid, uctx); + return -ENOMEM; +} + +static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) +{ + int i; + u32 plen; + int size; + u8 *datap; + + if (wr->num_sge > T4_MAX_SEND_SGE) + return -EINVAL; + switch (wr->opcode) { + case IB_WR_SEND: + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->send.sendop_pkd = cpu_to_be32( + V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); + else + wqe->send.sendop_pkd = cpu_to_be32( + V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); + wqe->send.stag_inv = 0; + break; + case IB_WR_SEND_WITH_INV: + if (wr->send_flags & IB_SEND_SOLICITED) + wqe->send.sendop_pkd = cpu_to_be32( + V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); + else + wqe->send.sendop_pkd = cpu_to_be32( + V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); + wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); + break; + + default: + return -EINVAL; + } + plen = 0; + if (wr->num_sge) { + if (wr->send_flags & IB_SEND_INLINE) { + datap = (u8 *)wqe->send.u.immd_src[0].data; + for (i = 0; i < wr->num_sge; i++) { + if ((plen + wr->sg_list[i].length) > + T4_MAX_SEND_INLINE) { + return -EMSGSIZE; + } + plen += wr->sg_list[i].length; + memcpy(datap, + (void *)(unsigned long)wr->sg_list[i].addr, + wr->sg_list[i].length); + datap += wr->sg_list[i].length; + } + wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; + wqe->send.u.immd_src[0].r1 = 0; + wqe->send.u.immd_src[0].r2 = 0; + wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen); + size = sizeof wqe->send + sizeof(struct fw_ri_immd) + + plen; + } else { + for (i = 0; i < wr->num_sge; i++) { + if ((plen + wr->sg_list[i].length) < plen) + return -EMSGSIZE; + plen += wr->sg_list[i].length; + wqe->send.u.isgl_src[0].sge[i].stag = + cpu_to_be32(wr->sg_list[i].lkey); + wqe->send.u.isgl_src[0].sge[i].len = + cpu_to_be32(wr->sg_list[i].length); + wqe->send.u.isgl_src[0].sge[i].to = + cpu_to_be64(wr->sg_list[i].addr); + } + wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL; + wqe->send.u.isgl_src[0].r1 = 0; + wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge); + wqe->send.u.isgl_src[0].r2 = 0; + size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + + wr->num_sge * sizeof(struct fw_ri_sge); + } + } else { + wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; + wqe->send.u.immd_src[0].r1 = 0; + wqe->send.u.immd_src[0].r2 = 0; + wqe->send.u.immd_src[0].immdlen = 0; + size = sizeof wqe->send + sizeof(struct fw_ri_immd); + } + *len16 = DIV_ROUND_UP(size, 16); + wqe->send.plen = cpu_to_be32(plen); + return 0; +} + +static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) +{ + int i; + u32 plen; + int size; + u8 *datap; + + if (wr->num_sge > T4_MAX_WRITE_SGE) + return -EINVAL; + wqe->write.r2 = 0; + wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); + wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); + plen = 0; + if (wr->num_sge) { + if (wr->send_flags & IB_SEND_INLINE) { + datap = (u8 *)wqe->write.u.immd_src[0].data; + for (i = 0; i < wr->num_sge; i++) { + if ((plen + wr->sg_list[i].length) > + T4_MAX_WRITE_INLINE) { + return -EMSGSIZE; + } + plen += wr->sg_list[i].length; + memcpy(datap, + (void *)(unsigned long)wr->sg_list[i].addr, + wr->sg_list[i].length); + datap += wr->sg_list[i].length; + } + wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; + wqe->write.u.immd_src[0].r1 = 0; + wqe->write.u.immd_src[0].r2 = 0; + wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen); + size = sizeof wqe->write + sizeof(struct fw_ri_immd) + + plen; + } else { + for (i = 0; i < wr->num_sge; i++) { + if ((plen + wr->sg_list[i].length) < plen) + return -EMSGSIZE; + plen += wr->sg_list[i].length; + wqe->write.u.isgl_src[0].sge[i].stag = + cpu_to_be32(wr->sg_list[i].lkey); + wqe->write.u.isgl_src[0].sge[i].len = + cpu_to_be32(wr->sg_list[i].length); + wqe->write.u.isgl_src[0].sge[i].to = + cpu_to_be64(wr->sg_list[i].addr); + } + wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL; + wqe->write.u.isgl_src[0].r1 = 0; + wqe->write.u.isgl_src[0].nsge = + cpu_to_be16(wr->num_sge); + wqe->write.u.isgl_src[0].r2 = 0; + size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + + wr->num_sge * sizeof(struct fw_ri_sge); + } + } else { + wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; + wqe->write.u.immd_src[0].r1 = 0; + wqe->write.u.immd_src[0].r2 = 0; + wqe->write.u.immd_src[0].immdlen = 0; + size = sizeof wqe->write + sizeof(struct fw_ri_immd); + } + *len16 = DIV_ROUND_UP(size, 16); + wqe->write.plen = cpu_to_be32(plen); + return 0; +} + +static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) +{ + if (wr->num_sge > 1) + return -EINVAL; + if (wr->num_sge) { + wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); + wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr + >> 32)); + wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); + wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); + wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); + wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr + >> 32)); + wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); + } else { + wqe->read.stag_src = cpu_to_be32(2); + wqe->read.to_src_hi = 0; + wqe->read.to_src_lo = 0; + wqe->read.stag_sink = cpu_to_be32(2); + wqe->read.plen = 0; + wqe->read.to_sink_hi = 0; + wqe->read.to_sink_lo = 0; + } + wqe->read.r2 = 0; + wqe->read.r5 = 0; + *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); + return 0; +} + +static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, + struct ib_recv_wr *wr, u8 *len16) +{ + int i; + int plen = 0; + + for (i = 0; i < wr->num_sge; i++) { + if ((plen + wr->sg_list[i].length) < plen) + return -EMSGSIZE; + plen += wr->sg_list[i].length; + wqe->recv.isgl.sge[i].stag = + cpu_to_be32(wr->sg_list[i].lkey); + wqe->recv.isgl.sge[i].len = + cpu_to_be32(wr->sg_list[i].length); + wqe->recv.isgl.sge[i].to = + cpu_to_be64(wr->sg_list[i].addr); + } + for (; i < T4_MAX_RECV_SGE; i++) { + wqe->recv.isgl.sge[i].stag = 0; + wqe->recv.isgl.sge[i].len = 0; + wqe->recv.isgl.sge[i].to = 0; + } + wqe->recv.isgl.op = FW_RI_DATA_ISGL; + wqe->recv.isgl.r1 = 0; + wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge); + wqe->recv.isgl.r2 = 0; + *len16 = DIV_ROUND_UP(sizeof wqe->recv + + wr->num_sge * sizeof(struct fw_ri_sge), 16); + return 0; +} + +static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) +{ + + struct fw_ri_immd *imdp; + __be64 *p; + int i; + int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); + + if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) + return -EINVAL; + + wqe->fr.qpbinde_to_dcacpu = 0; + wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; + wqe->fr.addr_type = FW_RI_VA_BASED_TO; + wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); + wqe->fr.len_hi = 0; + wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); + wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); + wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); + wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & + 0xffffffff); + if (pbllen > T4_MAX_FR_IMMD) { + struct c4iw_fr_page_list *c4pl = + to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); + struct fw_ri_dsgl *sglp; + + sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); + sglp->op = FW_RI_DATA_DSGL; + sglp->r1 = 0; + sglp->nsge = cpu_to_be16(1); + sglp->addr0 = cpu_to_be64(c4pl->dma_addr); + sglp->len0 = cpu_to_be32(pbllen); + + *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16); + } else { + imdp = (struct fw_ri_immd *)(&wqe->fr + 1); + imdp->op = FW_RI_DATA_IMMD; + imdp->r1 = 0; + imdp->r2 = 0; + imdp->immdlen = cpu_to_be32(pbllen); + p = (__be64 *)(imdp + 1); + for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) + *p = cpu_to_be64( + (u64)wr->wr.fast_reg.page_list->page_list[i]); + *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, + 16); + } + return 0; +} + +static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, + u8 *len16) +{ + wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); + wqe->inv.r2 = 0; + *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); + return 0; +} + +void c4iw_qp_add_ref(struct ib_qp *qp) +{ + PDBG("%s ib_qp %p\n", __func__, qp); + atomic_inc(&(to_c4iw_qp(qp)->refcnt)); +} + +void c4iw_qp_rem_ref(struct ib_qp *qp) +{ + PDBG("%s ib_qp %p\n", __func__, qp); + if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) + wake_up(&(to_c4iw_qp(qp)->wait)); +} + +int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + int err = 0; + u8 len16 = 0; + enum fw_wr_opcodes fw_opcode = 0; + enum fw_ri_wr_flags fw_flags; + struct c4iw_qp *qhp; + union t4_wr *wqe; + u32 num_wrs; + struct t4_swsqe *swsqe; + unsigned long flag; + u16 idx = 0; + + qhp = to_c4iw_qp(ibqp); + spin_lock_irqsave(&qhp->lock, flag); + if (t4_wq_in_error(&qhp->wq)) { + spin_unlock_irqrestore(&qhp->lock, flag); + return -EINVAL; + } + num_wrs = t4_sq_avail(&qhp->wq); + if (num_wrs == 0) { + spin_unlock_irqrestore(&qhp->lock, flag); + return -ENOMEM; + } + while (wr) { + if (num_wrs == 0) { + err = -ENOMEM; + *bad_wr = wr; + break; + } + wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx]; + fw_flags = 0; + if (wr->send_flags & IB_SEND_SOLICITED) + fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; + if (wr->send_flags & IB_SEND_SIGNALED) + fw_flags |= FW_RI_COMPLETION_FLAG; + swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; + switch (wr->opcode) { + case IB_WR_SEND_WITH_INV: + case IB_WR_SEND: + if (wr->send_flags & IB_SEND_FENCE) + fw_flags |= FW_RI_READ_FENCE_FLAG; + fw_opcode = FW_RI_SEND_WR; + if (wr->opcode == IB_WR_SEND) + swsqe->opcode = FW_RI_SEND; + else + swsqe->opcode = FW_RI_SEND_WITH_INV; + err = build_rdma_send(wqe, wr, &len16); + break; + case IB_WR_RDMA_WRITE: + fw_opcode = FW_RI_RDMA_WRITE_WR; + swsqe->opcode = FW_RI_RDMA_WRITE; + err = build_rdma_write(wqe, wr, &len16); + break; + case IB_WR_RDMA_READ: + fw_opcode = FW_RI_RDMA_READ_WR; + swsqe->opcode = FW_RI_READ_REQ; + fw_flags = 0; + err = build_rdma_read(wqe, wr, &len16); + if (err) + break; + swsqe->read_len = wr->sg_list[0].length; + if (!qhp->wq.sq.oldest_read) + qhp->wq.sq.oldest_read = swsqe; + break; + case IB_WR_FAST_REG_MR: + fw_opcode = FW_RI_FR_NSMR_WR; + swsqe->opcode = FW_RI_FAST_REGISTER; + err = build_fastreg(wqe, wr, &len16); + break; + case IB_WR_LOCAL_INV: + fw_opcode = FW_RI_INV_LSTAG_WR; + swsqe->opcode = FW_RI_LOCAL_INV; + err = build_inv_stag(wqe, wr, &len16); + break; + default: + PDBG("%s post of type=%d TBD!\n", __func__, + wr->opcode); + err = -EINVAL; + } + if (err) { + *bad_wr = wr; + break; + } + swsqe->idx = qhp->wq.sq.pidx; + swsqe->complete = 0; + swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED); + swsqe->wr_id = wr->wr_id; + + init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); + + PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", + __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, + swsqe->opcode, swsqe->read_len); + wr = wr->next; + num_wrs--; + t4_sq_produce(&qhp->wq); + idx++; + } + if (t4_wq_db_enabled(&qhp->wq)) + t4_ring_sq_db(&qhp->wq, idx); + spin_unlock_irqrestore(&qhp->lock, flag); + return err; +} + +int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + int err = 0; + struct c4iw_qp *qhp; + union t4_recv_wr *wqe; + u32 num_wrs; + u8 len16 = 0; + unsigned long flag; + u16 idx = 0; + + qhp = to_c4iw_qp(ibqp); + spin_lock_irqsave(&qhp->lock, flag); + if (t4_wq_in_error(&qhp->wq)) { + spin_unlock_irqrestore(&qhp->lock, flag); + return -EINVAL; + } + num_wrs = t4_rq_avail(&qhp->wq); + if (num_wrs == 0) { + spin_unlock_irqrestore(&qhp->lock, flag); + return -ENOMEM; + } + while (wr) { + if (wr->num_sge > T4_MAX_RECV_SGE) { + err = -EINVAL; + *bad_wr = wr; + break; + } + wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx]; + if (num_wrs) + err = build_rdma_recv(qhp, wqe, wr, &len16); + else + err = -ENOMEM; + if (err) { + *bad_wr = wr; + break; + } + + qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; + + wqe->recv.opcode = FW_RI_RECV_WR; + wqe->recv.r1 = 0; + wqe->recv.wrid = qhp->wq.rq.pidx; + wqe->recv.r2[0] = 0; + wqe->recv.r2[1] = 0; + wqe->recv.r2[2] = 0; + wqe->recv.len16 = len16; + if (len16 < 5) + wqe->flits[8] = 0; + + PDBG("%s cookie 0x%llx pidx %u\n", __func__, + (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); + t4_rq_produce(&qhp->wq); + wr = wr->next; + num_wrs--; + idx++; + } + if (t4_wq_db_enabled(&qhp->wq)) + t4_ring_rq_db(&qhp->wq, idx); + spin_unlock_irqrestore(&qhp->lock, flag); + return err; +} + +int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) +{ + return -ENOSYS; +} + +static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, + u8 *ecode) +{ + int status; + int tagged; + int opcode; + int rqtype; + int send_inv; + + if (!err_cqe) { + *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; + *ecode = 0; + return; + } + + status = CQE_STATUS(err_cqe); + opcode = CQE_OPCODE(err_cqe); + rqtype = RQ_TYPE(err_cqe); + send_inv = (opcode == FW_RI_SEND_WITH_INV) || + (opcode == FW_RI_SEND_WITH_SE_INV); + tagged = (opcode == FW_RI_RDMA_WRITE) || + (rqtype && (opcode == FW_RI_READ_RESP)); + + switch (status) { + case T4_ERR_STAG: + if (send_inv) { + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; + *ecode = RDMAP_CANT_INV_STAG; + } else { + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_INV_STAG; + } + break; + case T4_ERR_PDID: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + if ((opcode == FW_RI_SEND_WITH_INV) || + (opcode == FW_RI_SEND_WITH_SE_INV)) + *ecode = RDMAP_CANT_INV_STAG; + else + *ecode = RDMAP_STAG_NOT_ASSOC; + break; + case T4_ERR_QPID: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_STAG_NOT_ASSOC; + break; + case T4_ERR_ACCESS: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_ACC_VIOL; + break; + case T4_ERR_WRAP: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_TO_WRAP; + break; + case T4_ERR_BOUND: + if (tagged) { + *layer_type = LAYER_DDP|DDP_TAGGED_ERR; + *ecode = DDPT_BASE_BOUNDS; + } else { + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; + *ecode = RDMAP_BASE_BOUNDS; + } + break; + case T4_ERR_INVALIDATE_SHARED_MR: + case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; + *ecode = RDMAP_CANT_INV_STAG; + break; + case T4_ERR_ECC: + case T4_ERR_ECC_PSTAG: + case T4_ERR_INTERNAL_ERR: + *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; + *ecode = 0; + break; + case T4_ERR_OUT_OF_RQE: + *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; + *ecode = DDPU_INV_MSN_NOBUF; + break; + case T4_ERR_PBL_ADDR_BOUND: + *layer_type = LAYER_DDP|DDP_TAGGED_ERR; + *ecode = DDPT_BASE_BOUNDS; + break; + case T4_ERR_CRC: + *layer_type = LAYER_MPA|DDP_LLP; + *ecode = MPA_CRC_ERR; + break; + case T4_ERR_MARKER: + *layer_type = LAYER_MPA|DDP_LLP; + *ecode = MPA_MARKER_ERR; + break; + case T4_ERR_PDU_LEN_ERR: + *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; + *ecode = DDPU_MSG_TOOBIG; + break; + case T4_ERR_DDP_VERSION: + if (tagged) { + *layer_type = LAYER_DDP|DDP_TAGGED_ERR; + *ecode = DDPT_INV_VERS; + } else { + *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; + *ecode = DDPU_INV_VERS; + } + break; + case T4_ERR_RDMA_VERSION: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; + *ecode = RDMAP_INV_VERS; + break; + case T4_ERR_OPCODE: + *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; + *ecode = RDMAP_INV_OPCODE; + break; + case T4_ERR_DDP_QUEUE_NUM: + *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; + *ecode = DDPU_INV_QN; + break; + case T4_ERR_MSN: + case T4_ERR_MSN_GAP: + case T4_ERR_MSN_RANGE: + case T4_ERR_IRD_OVERFLOW: + *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; + *ecode = DDPU_INV_MSN_RANGE; + break; + case T4_ERR_TBIT: + *layer_type = LAYER_DDP|DDP_LOCAL_CATA; + *ecode = 0; + break; + case T4_ERR_MO: + *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; + *ecode = DDPU_INV_MO; + break; + default: + *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; + *ecode = 0; + break; + } +} + +int c4iw_post_zb_read(struct c4iw_qp *qhp) +{ + union t4_wr *wqe; + struct sk_buff *skb; + u8 len16; + + PDBG("%s enter\n", __func__); + skb = alloc_skb(40, GFP_KERNEL); + if (!skb) { + printk(KERN_ERR "%s cannot send zb_read!!\n", __func__); + return -ENOMEM; + } + set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); + + wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read); + memset(wqe, 0, sizeof wqe->read); + wqe->read.r2 = cpu_to_be64(0); + wqe->read.stag_sink = cpu_to_be32(1); + wqe->read.to_sink_hi = cpu_to_be32(0); + wqe->read.to_sink_lo = cpu_to_be32(1); + wqe->read.stag_src = cpu_to_be32(1); + wqe->read.plen = cpu_to_be32(0); + wqe->read.to_src_hi = cpu_to_be32(0); + wqe->read.to_src_lo = cpu_to_be32(1); + len16 = DIV_ROUND_UP(sizeof wqe->read, 16); + init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16); + + return c4iw_ofld_send(&qhp->rhp->rdev, skb); +} + +int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) +{ + struct fw_ri_wr *wqe; + struct sk_buff *skb; + struct terminate_message *term; + + PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, + qhp->ep->hwtid); + + skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); + + wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + memset(wqe, 0, sizeof *wqe); + wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR)); + wqe->flowid_len16 = cpu_to_be32( + FW_WR_FLOWID(qhp->ep->hwtid) | + FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); + + wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; + wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); + term = (struct terminate_message *)wqe->u.terminate.termmsg; + build_term_codes(err_cqe, &term->layer_etype, &term->ecode); + return c4iw_ofld_send(&qhp->rhp->rdev, skb); +} + +/* + * Assumes qhp lock is held. + */ +static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, + struct c4iw_cq *schp, unsigned long *flag) +{ + int count; + int flushed; + + PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); + /* take a ref on the qhp since we must release the lock */ + atomic_inc(&qhp->refcnt); + spin_unlock_irqrestore(&qhp->lock, *flag); + + /* locking heirarchy: cq lock first, then qp lock. */ + spin_lock_irqsave(&rchp->lock, *flag); + spin_lock(&qhp->lock); + c4iw_flush_hw_cq(&rchp->cq); + c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); + flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); + spin_unlock(&qhp->lock); + spin_unlock_irqrestore(&rchp->lock, *flag); + if (flushed) + (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); + + /* locking heirarchy: cq lock first, then qp lock. */ + spin_lock_irqsave(&schp->lock, *flag); + spin_lock(&qhp->lock); + c4iw_flush_hw_cq(&schp->cq); + c4iw_count_scqes(&schp->cq, &qhp->wq, &count); + flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); + spin_unlock(&qhp->lock); + spin_unlock_irqrestore(&schp->lock, *flag); + if (flushed) + (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); + + /* deref */ + if (atomic_dec_and_test(&qhp->refcnt)) + wake_up(&qhp->wait); + + spin_lock_irqsave(&qhp->lock, *flag); +} + +static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) +{ + struct c4iw_cq *rchp, *schp; + + rchp = get_chp(qhp->rhp, qhp->attr.rcq); + schp = get_chp(qhp->rhp, qhp->attr.scq); + + if (qhp->ibqp.uobject) { + t4_set_wq_in_error(&qhp->wq); + t4_set_cq_in_error(&rchp->cq); + if (schp != rchp) + t4_set_cq_in_error(&schp->cq); + return; + } + __flush_qp(qhp, rchp, schp, flag); +} + +static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) +{ + struct fw_ri_wr *wqe; + int ret; + struct c4iw_wr_wait wr_wait; + struct sk_buff *skb; + + PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, + qhp->ep->hwtid); + + skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); + + wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + memset(wqe, 0, sizeof *wqe); + wqe->op_compl = cpu_to_be32( + FW_WR_OP(FW_RI_INIT_WR) | + FW_WR_COMPL(1)); + wqe->flowid_len16 = cpu_to_be32( + FW_WR_FLOWID(qhp->ep->hwtid) | + FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); + wqe->cookie = (u64)&wr_wait; + + wqe->u.fini.type = FW_RI_TYPE_FINI; + c4iw_init_wr_wait(&wr_wait); + ret = c4iw_ofld_send(&rhp->rdev, skb); + if (ret) + goto out; + + wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); + if (!wr_wait.done) { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(rhp->rdev.lldi.pdev)); + rhp->rdev.flags = T4_FATAL_ERROR; + ret = -EIO; + } else { + ret = wr_wait.ret; + if (ret) + printk(KERN_WARNING MOD + "%s: Abnormal close qpid %d ret %u\n", + pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid, + ret); + } +out: + PDBG("%s ret %d\n", __func__, ret); + return ret; +} + +static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) +{ + memset(&init->u, 0, sizeof init->u); + switch (p2p_type) { + case FW_RI_INIT_P2PTYPE_RDMA_WRITE: + init->u.write.opcode = FW_RI_RDMA_WRITE_WR; + init->u.write.stag_sink = cpu_to_be32(1); + init->u.write.to_sink = cpu_to_be64(1); + init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; + init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + + sizeof(struct fw_ri_immd), + 16); + break; + case FW_RI_INIT_P2PTYPE_READ_REQ: + init->u.write.opcode = FW_RI_RDMA_READ_WR; + init->u.read.stag_src = cpu_to_be32(1); + init->u.read.to_src_lo = cpu_to_be32(1); + init->u.read.stag_sink = cpu_to_be32(1); + init->u.read.to_sink_lo = cpu_to_be32(1); + init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); + break; + } +} + +static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) +{ + struct fw_ri_wr *wqe; + int ret; + struct c4iw_wr_wait wr_wait; + struct sk_buff *skb; + + PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, + qhp->ep->hwtid); + + skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); + if (!skb) + return -ENOMEM; + set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); + + wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); + memset(wqe, 0, sizeof *wqe); + wqe->op_compl = cpu_to_be32( + FW_WR_OP(FW_RI_INIT_WR) | + FW_WR_COMPL(1)); + wqe->flowid_len16 = cpu_to_be32( + FW_WR_FLOWID(qhp->ep->hwtid) | + FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); + + wqe->cookie = (u64)&wr_wait; + + wqe->u.init.type = FW_RI_TYPE_INIT; + wqe->u.init.mpareqbit_p2ptype = + V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | + V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); + wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; + if (qhp->attr.mpa_attr.recv_marker_enabled) + wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; + if (qhp->attr.mpa_attr.xmit_marker_enabled) + wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; + if (qhp->attr.mpa_attr.crc_enabled) + wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; + + wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | + FW_RI_QP_RDMA_WRITE_ENABLE | + FW_RI_QP_BIND_ENABLE; + if (!qhp->ibqp.uobject) + wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | + FW_RI_QP_STAG0_ENABLE; + wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); + wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); + wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); + wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); + wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); + wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); + wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); + wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); + wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); + wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); + wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); + wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); + wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - + rhp->rdev.lldi.vr->rq.start); + if (qhp->attr.mpa_attr.initiator) + build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); + + c4iw_init_wr_wait(&wr_wait); + ret = c4iw_ofld_send(&rhp->rdev, skb); + if (ret) + goto out; + + wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); + if (!wr_wait.done) { + printk(KERN_ERR MOD "Device %s not responding!\n", + pci_name(rhp->rdev.lldi.pdev)); + rhp->rdev.flags = T4_FATAL_ERROR; + ret = -EIO; + } else + ret = wr_wait.ret; +out: + PDBG("%s ret %d\n", __func__, ret); + return ret; +} + +int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, + enum c4iw_qp_attr_mask mask, + struct c4iw_qp_attributes *attrs, + int internal) +{ + int ret = 0; + struct c4iw_qp_attributes newattr = qhp->attr; + unsigned long flag; + int disconnect = 0; + int terminate = 0; + int abort = 0; + int free = 0; + struct c4iw_ep *ep = NULL; + + PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__, + qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, + (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); + + spin_lock_irqsave(&qhp->lock, flag); + + /* Process attr changes if in IDLE */ + if (mask & C4IW_QP_ATTR_VALID_MODIFY) { + if (qhp->attr.state != C4IW_QP_STATE_IDLE) { + ret = -EIO; + goto out; + } + if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) + newattr.enable_rdma_read = attrs->enable_rdma_read; + if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) + newattr.enable_rdma_write = attrs->enable_rdma_write; + if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) + newattr.enable_bind = attrs->enable_bind; + if (mask & C4IW_QP_ATTR_MAX_ORD) { + if (attrs->max_ord > T4_MAX_READ_DEPTH) { + ret = -EINVAL; + goto out; + } + newattr.max_ord = attrs->max_ord; + } + if (mask & C4IW_QP_ATTR_MAX_IRD) { + if (attrs->max_ird > T4_MAX_READ_DEPTH) { + ret = -EINVAL; + goto out; + } + newattr.max_ird = attrs->max_ird; + } + qhp->attr = newattr; + } + + if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) + goto out; + if (qhp->attr.state == attrs->next_state) + goto out; + + switch (qhp->attr.state) { + case C4IW_QP_STATE_IDLE: + switch (attrs->next_state) { + case C4IW_QP_STATE_RTS: + if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { + ret = -EINVAL; + goto out; + } + if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { + ret = -EINVAL; + goto out; + } + qhp->attr.mpa_attr = attrs->mpa_attr; + qhp->attr.llp_stream_handle = attrs->llp_stream_handle; + qhp->ep = qhp->attr.llp_stream_handle; + qhp->attr.state = C4IW_QP_STATE_RTS; + + /* + * Ref the endpoint here and deref when we + * disassociate the endpoint from the QP. This + * happens in CLOSING->IDLE transition or *->ERROR + * transition. + */ + c4iw_get_ep(&qhp->ep->com); + spin_unlock_irqrestore(&qhp->lock, flag); + ret = rdma_init(rhp, qhp); + spin_lock_irqsave(&qhp->lock, flag); + if (ret) + goto err; + break; + case C4IW_QP_STATE_ERROR: + qhp->attr.state = C4IW_QP_STATE_ERROR; + flush_qp(qhp, &flag); + break; + default: + ret = -EINVAL; + goto out; + } + break; + case C4IW_QP_STATE_RTS: + switch (attrs->next_state) { + case C4IW_QP_STATE_CLOSING: + BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); + qhp->attr.state = C4IW_QP_STATE_CLOSING; + if (!internal) { + abort = 0; + disconnect = 1; + ep = qhp->ep; + c4iw_get_ep(&ep->com); + } + spin_unlock_irqrestore(&qhp->lock, flag); + ret = rdma_fini(rhp, qhp); + spin_lock_irqsave(&qhp->lock, flag); + if (ret) { + ep = qhp->ep; + c4iw_get_ep(&ep->com); + disconnect = abort = 1; + goto err; + } + break; + case C4IW_QP_STATE_TERMINATE: + qhp->attr.state = C4IW_QP_STATE_TERMINATE; + if (qhp->ibqp.uobject) + t4_set_wq_in_error(&qhp->wq); + if (!internal) { + ep = qhp->ep; + c4iw_get_ep(&ep->com); + terminate = 1; + disconnect = 1; + } + break; + case C4IW_QP_STATE_ERROR: + qhp->attr.state = C4IW_QP_STATE_ERROR; + if (!internal) { + abort = 1; + disconnect = 1; + ep = qhp->ep; + c4iw_get_ep(&ep->com); + } + goto err; + break; + default: + ret = -EINVAL; + goto out; + } + break; + case C4IW_QP_STATE_CLOSING: + if (!internal) { + ret = -EINVAL; + goto out; + } + switch (attrs->next_state) { + case C4IW_QP_STATE_IDLE: + flush_qp(qhp, &flag); + qhp->attr.state = C4IW_QP_STATE_IDLE; + qhp->attr.llp_stream_handle = NULL; + c4iw_put_ep(&qhp->ep->com); + qhp->ep = NULL; + wake_up(&qhp->wait); + break; + case C4IW_QP_STATE_ERROR: + goto err; + default: + ret = -EINVAL; + goto err; + } + break; + case C4IW_QP_STATE_ERROR: + if (attrs->next_state != C4IW_QP_STATE_IDLE) { + ret = -EINVAL; + goto out; + } + if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { + ret = -EINVAL; + goto out; + } + qhp->attr.state = C4IW_QP_STATE_IDLE; + break; + case C4IW_QP_STATE_TERMINATE: + if (!internal) { + ret = -EINVAL; + goto out; + } + goto err; + break; + default: + printk(KERN_ERR "%s in a bad state %d\n", + __func__, qhp->attr.state); + ret = -EINVAL; + goto err; + break; + } + goto out; +err: + PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, + qhp->wq.sq.qid); + + /* disassociate the LLP connection */ + qhp->attr.llp_stream_handle = NULL; + ep = qhp->ep; + qhp->ep = NULL; + qhp->attr.state = C4IW_QP_STATE_ERROR; + free = 1; + wake_up(&qhp->wait); + BUG_ON(!ep); + flush_qp(qhp, &flag); +out: + spin_unlock_irqrestore(&qhp->lock, flag); + + if (terminate) + c4iw_post_terminate(qhp, NULL); + + /* + * If disconnect is 1, then we need to initiate a disconnect + * on the EP. This can be a normal close (RTS->CLOSING) or + * an abnormal close (RTS/CLOSING->ERROR). + */ + if (disconnect) { + c4iw_ep_disconnect(ep, abort, GFP_KERNEL); + c4iw_put_ep(&ep->com); + } + + /* + * If free is 1, then we've disassociated the EP from the QP + * and we need to dereference the EP. + */ + if (free) + c4iw_put_ep(&ep->com); + + PDBG("%s exit state %d\n", __func__, qhp->attr.state); + return ret; +} + +int c4iw_destroy_qp(struct ib_qp *ib_qp) +{ + struct c4iw_dev *rhp; + struct c4iw_qp *qhp; + struct c4iw_qp_attributes attrs; + struct c4iw_ucontext *ucontext; + + qhp = to_c4iw_qp(ib_qp); + rhp = qhp->rhp; + + attrs.next_state = C4IW_QP_STATE_ERROR; + c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); + wait_event(qhp->wait, !qhp->ep); + + remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); + remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); + atomic_dec(&qhp->refcnt); + wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); + + ucontext = ib_qp->uobject ? + to_c4iw_ucontext(ib_qp->uobject->context) : NULL; + destroy_qp(&rhp->rdev, &qhp->wq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); + + PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); + kfree(qhp); + return 0; +} + +struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, + struct ib_udata *udata) +{ + struct c4iw_dev *rhp; + struct c4iw_qp *qhp; + struct c4iw_pd *php; + struct c4iw_cq *schp; + struct c4iw_cq *rchp; + struct c4iw_create_qp_resp uresp; + int sqsize, rqsize; + struct c4iw_ucontext *ucontext; + int ret; + struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4; + + PDBG("%s ib_pd %p\n", __func__, pd); + + if (attrs->qp_type != IB_QPT_RC) + return ERR_PTR(-EINVAL); + + php = to_c4iw_pd(pd); + rhp = php->rhp; + schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); + rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); + if (!schp || !rchp) + return ERR_PTR(-EINVAL); + + if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) + return ERR_PTR(-EINVAL); + + rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); + if (rqsize > T4_MAX_RQ_SIZE) + return ERR_PTR(-E2BIG); + + sqsize = roundup(attrs->cap.max_send_wr + 1, 16); + if (sqsize > T4_MAX_SQ_SIZE) + return ERR_PTR(-E2BIG); + + ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; + + + qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); + if (!qhp) + return ERR_PTR(-ENOMEM); + qhp->wq.sq.size = sqsize; + qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; + qhp->wq.rq.size = rqsize; + qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; + + if (ucontext) { + qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); + qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); + } + + PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n", + __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); + + ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); + if (ret) + goto err1; + + attrs->cap.max_recv_wr = rqsize - 1; + attrs->cap.max_send_wr = sqsize - 1; + attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; + + qhp->rhp = rhp; + qhp->attr.pd = php->pdid; + qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; + qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; + qhp->attr.sq_num_entries = attrs->cap.max_send_wr; + qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; + qhp->attr.sq_max_sges = attrs->cap.max_send_sge; + qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; + qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; + qhp->attr.state = C4IW_QP_STATE_IDLE; + qhp->attr.next_state = C4IW_QP_STATE_IDLE; + qhp->attr.enable_rdma_read = 1; + qhp->attr.enable_rdma_write = 1; + qhp->attr.enable_bind = 1; + qhp->attr.max_ord = 1; + qhp->attr.max_ird = 1; + spin_lock_init(&qhp->lock); + init_waitqueue_head(&qhp->wait); + atomic_set(&qhp->refcnt, 1); + + ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); + if (ret) + goto err2; + + ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid); + if (ret) + goto err3; + + if (udata) { + mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); + if (!mm1) { + ret = -ENOMEM; + goto err4; + } + mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); + if (!mm2) { + ret = -ENOMEM; + goto err5; + } + mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); + if (!mm3) { + ret = -ENOMEM; + goto err6; + } + mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); + if (!mm4) { + ret = -ENOMEM; + goto err7; + } + + uresp.qid_mask = rhp->rdev.qpmask; + uresp.sqid = qhp->wq.sq.qid; + uresp.sq_size = qhp->wq.sq.size; + uresp.sq_memsize = qhp->wq.sq.memsize; + uresp.rqid = qhp->wq.rq.qid; + uresp.rq_size = qhp->wq.rq.size; + uresp.rq_memsize = qhp->wq.rq.memsize; + spin_lock(&ucontext->mmap_lock); + uresp.sq_key = ucontext->key; + ucontext->key += PAGE_SIZE; + uresp.rq_key = ucontext->key; + ucontext->key += PAGE_SIZE; + uresp.sq_db_gts_key = ucontext->key; + ucontext->key += PAGE_SIZE; + uresp.rq_db_gts_key = ucontext->key; + ucontext->key += PAGE_SIZE; + spin_unlock(&ucontext->mmap_lock); + ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); + if (ret) + goto err8; + mm1->key = uresp.sq_key; + mm1->addr = virt_to_phys(qhp->wq.sq.queue); + mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); + insert_mmap(ucontext, mm1); + mm2->key = uresp.rq_key; + mm2->addr = virt_to_phys(qhp->wq.rq.queue); + mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); + insert_mmap(ucontext, mm2); + mm3->key = uresp.sq_db_gts_key; + mm3->addr = qhp->wq.sq.udb; + mm3->len = PAGE_SIZE; + insert_mmap(ucontext, mm3); + mm4->key = uresp.rq_db_gts_key; + mm4->addr = qhp->wq.rq.udb; + mm4->len = PAGE_SIZE; + insert_mmap(ucontext, mm4); + } + qhp->ibqp.qp_num = qhp->wq.sq.qid; + init_timer(&(qhp->timer)); + PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n", + __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, + qhp->wq.sq.qid); + return &qhp->ibqp; +err8: + kfree(mm4); +err7: + kfree(mm3); +err6: + kfree(mm2); +err5: + kfree(mm1); +err4: + remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); +err3: + remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); +err2: + destroy_qp(&rhp->rdev, &qhp->wq, + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); +err1: + kfree(qhp); + return ERR_PTR(ret); +} + +int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct c4iw_dev *rhp; + struct c4iw_qp *qhp; + enum c4iw_qp_attr_mask mask = 0; + struct c4iw_qp_attributes attrs; + + PDBG("%s ib_qp %p\n", __func__, ibqp); + + /* iwarp does not support the RTR state */ + if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) + attr_mask &= ~IB_QP_STATE; + + /* Make sure we still have something left to do */ + if (!attr_mask) + return 0; + + memset(&attrs, 0, sizeof attrs); + qhp = to_c4iw_qp(ibqp); + rhp = qhp->rhp; + + attrs.next_state = c4iw_convert_state(attr->qp_state); + attrs.enable_rdma_read = (attr->qp_access_flags & + IB_ACCESS_REMOTE_READ) ? 1 : 0; + attrs.enable_rdma_write = (attr->qp_access_flags & + IB_ACCESS_REMOTE_WRITE) ? 1 : 0; + attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; + + + mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; + mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? + (C4IW_QP_ATTR_ENABLE_RDMA_READ | + C4IW_QP_ATTR_ENABLE_RDMA_WRITE | + C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; + + return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); +} + +struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) +{ + PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); + return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); +} diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c new file mode 100644 index 00000000000..fb195d1d901 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/resource.c @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +/* Crude resource management */ +#include +#include +#include +#include +#include +#include +#include +#include "iw_cxgb4.h" + +#define RANDOM_SIZE 16 + +static int __c4iw_init_resource_fifo(struct kfifo *fifo, + spinlock_t *fifo_lock, + u32 nr, u32 skip_low, + u32 skip_high, + int random) +{ + u32 i, j, entry = 0, idx; + u32 random_bytes; + u32 rarray[16]; + spin_lock_init(fifo_lock); + + if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL)) + return -ENOMEM; + + for (i = 0; i < skip_low + skip_high; i++) + kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); + if (random) { + j = 0; + random_bytes = random32(); + for (i = 0; i < RANDOM_SIZE; i++) + rarray[i] = i + skip_low; + for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { + if (j >= RANDOM_SIZE) { + j = 0; + random_bytes = random32(); + } + idx = (random_bytes >> (j * 2)) & 0xF; + kfifo_in(fifo, + (unsigned char *) &rarray[idx], + sizeof(u32)); + rarray[idx] = i; + j++; + } + for (i = 0; i < RANDOM_SIZE; i++) + kfifo_in(fifo, + (unsigned char *) &rarray[i], + sizeof(u32)); + } else + for (i = skip_low; i < nr - skip_high; i++) + kfifo_in(fifo, (unsigned char *) &i, sizeof(u32)); + + for (i = 0; i < skip_low + skip_high; i++) + if (kfifo_out_locked(fifo, (unsigned char *) &entry, + sizeof(u32), fifo_lock)) + break; + return 0; +} + +static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock, + u32 nr, u32 skip_low, u32 skip_high) +{ + return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, + skip_high, 0); +} + +static int c4iw_init_resource_fifo_random(struct kfifo *fifo, + spinlock_t *fifo_lock, + u32 nr, u32 skip_low, u32 skip_high) +{ + return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, + skip_high, 1); +} + +static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev) +{ + u32 i; + + spin_lock_init(&rdev->resource.qid_fifo_lock); + + if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32), + GFP_KERNEL)) + return -ENOMEM; + + for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++) + if (!(i & rdev->qpmask)) + kfifo_in(&rdev->resource.qid_fifo, + (unsigned char *) &i, sizeof(u32)); + return 0; +} + +/* nr_* must be power of 2 */ +int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) +{ + int err = 0; + err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo, + &rdev->resource.tpt_fifo_lock, + nr_tpt, 1, 0); + if (err) + goto tpt_err; + err = c4iw_init_qid_fifo(rdev); + if (err) + goto qid_err; + err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo, + &rdev->resource.pdid_fifo_lock, + nr_pdid, 1, 0); + if (err) + goto pdid_err; + return 0; +pdid_err: + kfifo_free(&rdev->resource.qid_fifo); +qid_err: + kfifo_free(&rdev->resource.tpt_fifo); +tpt_err: + return -ENOMEM; +} + +/* + * returns 0 if no resource available + */ +u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock) +{ + u32 entry; + if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)) + return entry; + else + return 0; +} + +void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock) +{ + PDBG("%s entry 0x%x\n", __func__, entry); + kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock); +} + +u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) +{ + struct c4iw_qid_list *entry; + u32 qid; + int i; + + mutex_lock(&uctx->lock); + if (!list_empty(&uctx->cqids)) { + entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, + entry); + list_del(&entry->entry); + qid = entry->qid; + kfree(entry); + } else { + qid = c4iw_get_resource(&rdev->resource.qid_fifo, + &rdev->resource.qid_fifo_lock); + if (!qid) + goto out; + for (i = qid+1; i & rdev->qpmask; i++) { + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + goto out; + entry->qid = i; + list_add_tail(&entry->entry, &uctx->cqids); + } + + /* + * now put the same ids on the qp list since they all + * map to the same db/gts page. + */ + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + goto out; + entry->qid = qid; + list_add_tail(&entry->entry, &uctx->qpids); + for (i = qid+1; i & rdev->qpmask; i++) { + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + goto out; + entry->qid = i; + list_add_tail(&entry->entry, &uctx->qpids); + } + } +out: + mutex_unlock(&uctx->lock); + PDBG("%s qid 0x%x\n", __func__, qid); + return qid; +} + +void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, + struct c4iw_dev_ucontext *uctx) +{ + struct c4iw_qid_list *entry; + + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + return; + PDBG("%s qid 0x%x\n", __func__, qid); + entry->qid = qid; + mutex_lock(&uctx->lock); + list_add_tail(&entry->entry, &uctx->cqids); + mutex_unlock(&uctx->lock); +} + +u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) +{ + struct c4iw_qid_list *entry; + u32 qid; + int i; + + mutex_lock(&uctx->lock); + if (!list_empty(&uctx->qpids)) { + entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, + entry); + list_del(&entry->entry); + qid = entry->qid; + kfree(entry); + } else { + qid = c4iw_get_resource(&rdev->resource.qid_fifo, + &rdev->resource.qid_fifo_lock); + if (!qid) + goto out; + for (i = qid+1; i & rdev->qpmask; i++) { + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + goto out; + entry->qid = i; + list_add_tail(&entry->entry, &uctx->qpids); + } + + /* + * now put the same ids on the cq list since they all + * map to the same db/gts page. + */ + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + goto out; + entry->qid = qid; + list_add_tail(&entry->entry, &uctx->cqids); + for (i = qid; i & rdev->qpmask; i++) { + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + goto out; + entry->qid = i; + list_add_tail(&entry->entry, &uctx->cqids); + } + } +out: + mutex_unlock(&uctx->lock); + PDBG("%s qid 0x%x\n", __func__, qid); + return qid; +} + +void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, + struct c4iw_dev_ucontext *uctx) +{ + struct c4iw_qid_list *entry; + + entry = kmalloc(sizeof *entry, GFP_KERNEL); + if (!entry) + return; + PDBG("%s qid 0x%x\n", __func__, qid); + entry->qid = qid; + mutex_lock(&uctx->lock); + list_add_tail(&entry->entry, &uctx->qpids); + mutex_unlock(&uctx->lock); +} + +void c4iw_destroy_resource(struct c4iw_resource *rscp) +{ + kfifo_free(&rscp->tpt_fifo); + kfifo_free(&rscp->qid_fifo); + kfifo_free(&rscp->pdid_fifo); +} + +/* + * PBL Memory Manager. Uses Linux generic allocator. + */ + +#define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ + +u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) +{ + unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); + PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); + return (u32)addr; +} + +void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) +{ + PDBG("%s addr 0x%x size %d\n", __func__, addr, size); + gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); +} + +int c4iw_pblpool_create(struct c4iw_rdev *rdev) +{ + unsigned pbl_start, pbl_chunk, pbl_top; + + rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); + if (!rdev->pbl_pool) + return -ENOMEM; + + pbl_start = rdev->lldi.vr->pbl.start; + pbl_chunk = rdev->lldi.vr->pbl.size; + pbl_top = pbl_start + pbl_chunk; + + while (pbl_start < pbl_top) { + pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); + if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { + PDBG("%s failed to add PBL chunk (%x/%x)\n", + __func__, pbl_start, pbl_chunk); + if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { + printk(KERN_WARNING MOD + "Failed to add all PBL chunks (%x/%x)\n", + pbl_start, + pbl_top - pbl_start); + return 0; + } + pbl_chunk >>= 1; + } else { + PDBG("%s added PBL chunk (%x/%x)\n", + __func__, pbl_start, pbl_chunk); + pbl_start += pbl_chunk; + } + } + + return 0; +} + +void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) +{ + gen_pool_destroy(rdev->pbl_pool); +} + +/* + * RQT Memory Manager. Uses Linux generic allocator. + */ + +#define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ + +u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) +{ + unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); + PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); + return (u32)addr; +} + +void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) +{ + PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); + gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); +} + +int c4iw_rqtpool_create(struct c4iw_rdev *rdev) +{ + unsigned rqt_start, rqt_chunk, rqt_top; + + rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); + if (!rdev->rqt_pool) + return -ENOMEM; + + rqt_start = rdev->lldi.vr->rq.start; + rqt_chunk = rdev->lldi.vr->rq.size; + rqt_top = rqt_start + rqt_chunk; + + while (rqt_start < rqt_top) { + rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); + if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { + PDBG("%s failed to add RQT chunk (%x/%x)\n", + __func__, rqt_start, rqt_chunk); + if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { + printk(KERN_WARNING MOD + "Failed to add all RQT chunks (%x/%x)\n", + rqt_start, rqt_top - rqt_start); + return 0; + } + rqt_chunk >>= 1; + } else { + PDBG("%s added RQT chunk (%x/%x)\n", + __func__, rqt_start, rqt_chunk); + rqt_start += rqt_chunk; + } + } + return 0; +} + +void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) +{ + gen_pool_destroy(rdev->rqt_pool); +} diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h new file mode 100644 index 00000000000..3f0d2172efd --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -0,0 +1,536 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __T4_H__ +#define __T4_H__ + +#include "t4_hw.h" +#include "t4_regs.h" +#include "t4_msg.h" +#include "t4fw_ri_api.h" + +#define T4_MAX_READ_DEPTH 16 +#define T4_QID_BASE 1024 +#define T4_MAX_QIDS 256 +#define T4_MAX_NUM_QP (1<<16) +#define T4_MAX_NUM_CQ (1<<15) +#define T4_MAX_NUM_PD (1<<15) +#define T4_MAX_PBL_SIZE 256 +#define T4_MAX_RQ_SIZE 1024 +#define T4_MAX_SQ_SIZE 1024 +#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1) +#define T4_MAX_CQ_DEPTH 8192 +#define T4_MAX_NUM_STAG (1<<15) +#define T4_MAX_MR_SIZE (~0ULL - 1) +#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ +#define T4_STAG_UNSET 0xffffffff +#define T4_FW_MAJ 0 +#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) + +struct t4_status_page { + __be32 rsvd1; /* flit 0 - hw owns */ + __be16 rsvd2; + __be16 qid; + __be16 cidx; + __be16 pidx; + u8 qp_err; /* flit 1 - sw owns */ + u8 db_off; +}; + +#define T4_EQ_SIZE 64 + +#define T4_SQ_NUM_SLOTS 4 +#define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS) +#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ + sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) +#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ + sizeof(struct fw_ri_immd))) +#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \ + sizeof(struct fw_ri_rdma_write_wr) - \ + sizeof(struct fw_ri_immd))) +#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \ + sizeof(struct fw_ri_rdma_write_wr) - \ + sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) +#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ + sizeof(struct fw_ri_immd))) +#define T4_MAX_FR_DEPTH 255 + +#define T4_RQ_NUM_SLOTS 2 +#define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) +#define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \ + sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) + +union t4_wr { + struct fw_ri_res_wr res; + struct fw_ri_wr ri; + struct fw_ri_rdma_write_wr write; + struct fw_ri_send_wr send; + struct fw_ri_rdma_read_wr read; + struct fw_ri_bind_mw_wr bind; + struct fw_ri_fr_nsmr_wr fr; + struct fw_ri_inv_lstag_wr inv; + struct t4_status_page status; + __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; +}; + +union t4_recv_wr { + struct fw_ri_recv_wr recv; + struct t4_status_page status; + __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; +}; + +static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, + enum fw_wr_opcodes opcode, u8 flags, u8 len16) +{ + int slots_used; + + wqe->send.opcode = (u8)opcode; + wqe->send.flags = flags; + wqe->send.wrid = wrid; + wqe->send.r1[0] = 0; + wqe->send.r1[1] = 0; + wqe->send.r1[2] = 0; + wqe->send.len16 = len16; + + slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE); + while (slots_used < T4_SQ_NUM_SLOTS) { + wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0; + slots_used++; + } +} + +/* CQE/AE status codes */ +#define T4_ERR_SUCCESS 0x0 +#define T4_ERR_STAG 0x1 /* STAG invalid: either the */ + /* STAG is offlimt, being 0, */ + /* or STAG_key mismatch */ +#define T4_ERR_PDID 0x2 /* PDID mismatch */ +#define T4_ERR_QPID 0x3 /* QPID mismatch */ +#define T4_ERR_ACCESS 0x4 /* Invalid access right */ +#define T4_ERR_WRAP 0x5 /* Wrap error */ +#define T4_ERR_BOUND 0x6 /* base and bounds voilation */ +#define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */ + /* shared memory region */ +#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */ + /* shared memory region */ +#define T4_ERR_ECC 0x9 /* ECC error detected */ +#define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */ + /* reading PSTAG for a MW */ + /* Invalidate */ +#define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */ + /* software error */ +#define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */ +#define T4_ERR_CRC 0x10 /* CRC error */ +#define T4_ERR_MARKER 0x11 /* Marker error */ +#define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */ +#define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */ +#define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */ +#define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */ +#define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */ +#define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */ +#define T4_ERR_MSN 0x18 /* MSN error */ +#define T4_ERR_TBIT 0x19 /* tag bit not set correctly */ +#define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */ + /* or READ_REQ */ +#define T4_ERR_MSN_GAP 0x1B +#define T4_ERR_MSN_RANGE 0x1C +#define T4_ERR_IRD_OVERFLOW 0x1D +#define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */ + /* software error */ +#define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */ + /* mismatch) */ +/* + * CQE defs + */ +struct t4_cqe { + __be32 header; + __be32 len; + union { + struct { + __be32 stag; + __be32 msn; + } rcqe; + struct { + u32 nada1; + u16 nada2; + u16 cidx; + } scqe; + struct { + __be32 wrid_hi; + __be32 wrid_low; + } gen; + } u; + __be64 reserved; + __be64 bits_type_ts; +}; + +/* macros for flit 0 of the cqe */ + +#define S_CQE_QPID 12 +#define M_CQE_QPID 0xFFFFF +#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID) +#define V_CQE_QPID(x) ((x)<> S_CQE_SWCQE)) & M_CQE_SWCQE) +#define V_CQE_SWCQE(x) ((x)<> S_CQE_STATUS)) & M_CQE_STATUS) +#define V_CQE_STATUS(x) ((x)<> S_CQE_TYPE)) & M_CQE_TYPE) +#define V_CQE_TYPE(x) ((x)<> S_CQE_OPCODE)) & M_CQE_OPCODE) +#define V_CQE_OPCODE(x) ((x)<header))) +#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header))) +#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header))) +#define SQ_TYPE(x) (CQE_TYPE((x))) +#define RQ_TYPE(x) (!CQE_TYPE((x))) +#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header))) +#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header))) + +#define CQE_SEND_OPCODE(x)( \ + (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \ + (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \ + (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \ + (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV)) + +#define CQE_LEN(x) (be32_to_cpu((x)->len)) + +/* used for RQ completion processing */ +#define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag)) +#define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn)) + +/* used for SQ completion processing */ +#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) + +/* generic accessor macros */ +#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) +#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) + +/* macros for flit 3 of the cqe */ +#define S_CQE_GENBIT 63 +#define M_CQE_GENBIT 0x1 +#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT) +#define V_CQE_GENBIT(x) ((x)<> S_CQE_OVFBIT)) & M_CQE_OVFBIT) + +#define S_CQE_IQTYPE 60 +#define M_CQE_IQTYPE 0x3 +#define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE) + +#define M_CQE_TS 0x0fffffffffffffffULL +#define G_CQE_TS(x) ((x) & M_CQE_TS) + +#define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts))) +#define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts))) +#define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts))) + +struct t4_swsqe { + u64 wr_id; + struct t4_cqe cqe; + int read_len; + int opcode; + int complete; + int signaled; + u16 idx; +}; + +struct t4_sq { + union t4_wr *queue; + dma_addr_t dma_addr; + DECLARE_PCI_UNMAP_ADDR(mapping); + struct t4_swsqe *sw_sq; + struct t4_swsqe *oldest_read; + u64 udb; + size_t memsize; + u32 qid; + u16 in_use; + u16 size; + u16 cidx; + u16 pidx; +}; + +struct t4_swrqe { + u64 wr_id; +}; + +struct t4_rq { + union t4_recv_wr *queue; + dma_addr_t dma_addr; + DECLARE_PCI_UNMAP_ADDR(mapping); + struct t4_swrqe *sw_rq; + u64 udb; + size_t memsize; + u32 qid; + u32 msn; + u32 rqt_hwaddr; + u16 rqt_size; + u16 in_use; + u16 size; + u16 cidx; + u16 pidx; +}; + +struct t4_wq { + struct t4_sq sq; + struct t4_rq rq; + void __iomem *db; + void __iomem *gts; + struct c4iw_rdev *rdev; +}; + +static inline int t4_rqes_posted(struct t4_wq *wq) +{ + return wq->rq.in_use; +} + +static inline int t4_rq_empty(struct t4_wq *wq) +{ + return wq->rq.in_use == 0; +} + +static inline int t4_rq_full(struct t4_wq *wq) +{ + return wq->rq.in_use == (wq->rq.size - 1); +} + +static inline u32 t4_rq_avail(struct t4_wq *wq) +{ + return wq->rq.size - 1 - wq->rq.in_use; +} + +static inline void t4_rq_produce(struct t4_wq *wq) +{ + wq->rq.in_use++; + if (++wq->rq.pidx == wq->rq.size) + wq->rq.pidx = 0; +} + +static inline void t4_rq_consume(struct t4_wq *wq) +{ + wq->rq.in_use--; + wq->rq.msn++; + if (++wq->rq.cidx == wq->rq.size) + wq->rq.cidx = 0; +} + +static inline int t4_sq_empty(struct t4_wq *wq) +{ + return wq->sq.in_use == 0; +} + +static inline int t4_sq_full(struct t4_wq *wq) +{ + return wq->sq.in_use == (wq->sq.size - 1); +} + +static inline u32 t4_sq_avail(struct t4_wq *wq) +{ + return wq->sq.size - 1 - wq->sq.in_use; +} + +static inline void t4_sq_produce(struct t4_wq *wq) +{ + wq->sq.in_use++; + if (++wq->sq.pidx == wq->sq.size) + wq->sq.pidx = 0; +} + +static inline void t4_sq_consume(struct t4_wq *wq) +{ + wq->sq.in_use--; + if (++wq->sq.cidx == wq->sq.size) + wq->sq.cidx = 0; +} + +static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) +{ + inc *= T4_SQ_NUM_SLOTS; + wmb(); + writel(QID(wq->sq.qid) | PIDX(inc), wq->db); +} + +static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) +{ + inc *= T4_RQ_NUM_SLOTS; + wmb(); + writel(QID(wq->rq.qid) | PIDX(inc), wq->db); +} + +static inline int t4_wq_in_error(struct t4_wq *wq) +{ + return wq->sq.queue[wq->sq.size].status.qp_err; +} + +static inline void t4_set_wq_in_error(struct t4_wq *wq) +{ + wq->sq.queue[wq->sq.size].status.qp_err = 1; + wq->rq.queue[wq->rq.size].status.qp_err = 1; +} + +static inline void t4_disable_wq_db(struct t4_wq *wq) +{ + wq->sq.queue[wq->sq.size].status.db_off = 1; + wq->rq.queue[wq->rq.size].status.db_off = 1; +} + +static inline void t4_enable_wq_db(struct t4_wq *wq) +{ + wq->sq.queue[wq->sq.size].status.db_off = 0; + wq->rq.queue[wq->rq.size].status.db_off = 0; +} + +static inline int t4_wq_db_enabled(struct t4_wq *wq) +{ + return !wq->sq.queue[wq->sq.size].status.db_off; +} + +struct t4_cq { + struct t4_cqe *queue; + dma_addr_t dma_addr; + DECLARE_PCI_UNMAP_ADDR(mapping); + struct t4_cqe *sw_queue; + void __iomem *gts; + struct c4iw_rdev *rdev; + u64 ugts; + size_t memsize; + u64 timestamp; + u32 cqid; + u16 size; /* including status page */ + u16 cidx; + u16 sw_pidx; + u16 sw_cidx; + u16 sw_in_use; + u16 cidx_inc; + u8 gen; + u8 error; +}; + +static inline int t4_arm_cq(struct t4_cq *cq, int se) +{ + u32 val; + + val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | + INGRESSQID(cq->cqid); + cq->cidx_inc = 0; + writel(val, cq->gts); + return 0; +} + +static inline void t4_swcq_produce(struct t4_cq *cq) +{ + cq->sw_in_use++; + if (++cq->sw_pidx == cq->size) + cq->sw_pidx = 0; +} + +static inline void t4_swcq_consume(struct t4_cq *cq) +{ + cq->sw_in_use--; + if (++cq->sw_cidx == cq->size) + cq->sw_cidx = 0; +} + +static inline void t4_hwcq_consume(struct t4_cq *cq) +{ + cq->cidx_inc++; + if (++cq->cidx == cq->size) { + cq->cidx = 0; + cq->gen ^= 1; + } +} + +static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) +{ + return (CQE_GENBIT(cqe) == cq->gen); +} + +static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) +{ + int ret = 0; + + if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { + *cqe = &cq->queue[cq->cidx]; + cq->timestamp = CQE_TS(*cqe); + } else if (CQE_TS(&cq->queue[cq->cidx]) > cq->timestamp) + ret = -EOVERFLOW; + else + ret = -ENODATA; + if (ret == -EOVERFLOW) { + printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); + cq->error = 1; + } + return ret; +} + +static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) +{ + if (cq->sw_in_use) + return &cq->sw_queue[cq->sw_cidx]; + return NULL; +} + +static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) +{ + int ret = 0; + + if (cq->error) + ret = -ENODATA; + else if (cq->sw_in_use) + *cqe = &cq->sw_queue[cq->sw_cidx]; + else + ret = t4_next_hw_cqe(cq, cqe); + return ret; +} + +static inline int t4_cq_in_error(struct t4_cq *cq) +{ + return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; +} + +static inline void t4_set_cq_in_error(struct t4_cq *cq) +{ + ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; +} +#endif diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h new file mode 100644 index 00000000000..fc706bd07fa --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -0,0 +1,829 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _T4FW_RI_API_H_ +#define _T4FW_RI_API_H_ + +#include "t4fw_api.h" + +enum fw_ri_wr_opcode { + FW_RI_RDMA_WRITE = 0x0, /* IETF RDMAP v1.0 ... */ + FW_RI_READ_REQ = 0x1, + FW_RI_READ_RESP = 0x2, + FW_RI_SEND = 0x3, + FW_RI_SEND_WITH_INV = 0x4, + FW_RI_SEND_WITH_SE = 0x5, + FW_RI_SEND_WITH_SE_INV = 0x6, + FW_RI_TERMINATE = 0x7, + FW_RI_RDMA_INIT = 0x8, /* CHELSIO RI specific ... */ + FW_RI_BIND_MW = 0x9, + FW_RI_FAST_REGISTER = 0xa, + FW_RI_LOCAL_INV = 0xb, + FW_RI_QP_MODIFY = 0xc, + FW_RI_BYPASS = 0xd, + FW_RI_RECEIVE = 0xe, + + FW_RI_SGE_EC_CR_RETURN = 0xf +}; + +enum fw_ri_wr_flags { + FW_RI_COMPLETION_FLAG = 0x01, + FW_RI_NOTIFICATION_FLAG = 0x02, + FW_RI_SOLICITED_EVENT_FLAG = 0x04, + FW_RI_READ_FENCE_FLAG = 0x08, + FW_RI_LOCAL_FENCE_FLAG = 0x10, + FW_RI_RDMA_READ_INVALIDATE = 0x20 +}; + +enum fw_ri_mpa_attrs { + FW_RI_MPA_RX_MARKER_ENABLE = 0x01, + FW_RI_MPA_TX_MARKER_ENABLE = 0x02, + FW_RI_MPA_CRC_ENABLE = 0x04, + FW_RI_MPA_IETF_ENABLE = 0x08 +}; + +enum fw_ri_qp_caps { + FW_RI_QP_RDMA_READ_ENABLE = 0x01, + FW_RI_QP_RDMA_WRITE_ENABLE = 0x02, + FW_RI_QP_BIND_ENABLE = 0x04, + FW_RI_QP_FAST_REGISTER_ENABLE = 0x08, + FW_RI_QP_STAG0_ENABLE = 0x10 +}; + +enum fw_ri_addr_type { + FW_RI_ZERO_BASED_TO = 0x00, + FW_RI_VA_BASED_TO = 0x01 +}; + +enum fw_ri_mem_perms { + FW_RI_MEM_ACCESS_REM_WRITE = 0x01, + FW_RI_MEM_ACCESS_REM_READ = 0x02, + FW_RI_MEM_ACCESS_REM = 0x03, + FW_RI_MEM_ACCESS_LOCAL_WRITE = 0x04, + FW_RI_MEM_ACCESS_LOCAL_READ = 0x08, + FW_RI_MEM_ACCESS_LOCAL = 0x0C +}; + +enum fw_ri_stag_type { + FW_RI_STAG_NSMR = 0x00, + FW_RI_STAG_SMR = 0x01, + FW_RI_STAG_MW = 0x02, + FW_RI_STAG_MW_RELAXED = 0x03 +}; + +enum fw_ri_data_op { + FW_RI_DATA_IMMD = 0x81, + FW_RI_DATA_DSGL = 0x82, + FW_RI_DATA_ISGL = 0x83 +}; + +enum fw_ri_sgl_depth { + FW_RI_SGL_DEPTH_MAX_SQ = 16, + FW_RI_SGL_DEPTH_MAX_RQ = 4 +}; + +struct fw_ri_dsge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct fw_ri_dsgl { + __u8 op; + __u8 r1; + __be16 nsge; + __be32 len0; + __be64 addr0; +#ifndef C99_NOT_SUPPORTED + struct fw_ri_dsge_pair sge[0]; +#endif +}; + +struct fw_ri_sge { + __be32 stag; + __be32 len; + __be64 to; +}; + +struct fw_ri_isgl { + __u8 op; + __u8 r1; + __be16 nsge; + __be32 r2; +#ifndef C99_NOT_SUPPORTED + struct fw_ri_sge sge[0]; +#endif +}; + +struct fw_ri_immd { + __u8 op; + __u8 r1; + __be16 r2; + __be32 immdlen; +#ifndef C99_NOT_SUPPORTED + __u8 data[0]; +#endif +}; + +struct fw_ri_tpte { + __be32 valid_to_pdid; + __be32 locread_to_qpid; + __be32 nosnoop_pbladdr; + __be32 len_lo; + __be32 va_hi; + __be32 va_lo_fbo; + __be32 dca_mwbcnt_pstag; + __be32 len_hi; +}; + +#define S_FW_RI_TPTE_VALID 31 +#define M_FW_RI_TPTE_VALID 0x1 +#define V_FW_RI_TPTE_VALID(x) ((x) << S_FW_RI_TPTE_VALID) +#define G_FW_RI_TPTE_VALID(x) \ + (((x) >> S_FW_RI_TPTE_VALID) & M_FW_RI_TPTE_VALID) +#define F_FW_RI_TPTE_VALID V_FW_RI_TPTE_VALID(1U) + +#define S_FW_RI_TPTE_STAGKEY 23 +#define M_FW_RI_TPTE_STAGKEY 0xff +#define V_FW_RI_TPTE_STAGKEY(x) ((x) << S_FW_RI_TPTE_STAGKEY) +#define G_FW_RI_TPTE_STAGKEY(x) \ + (((x) >> S_FW_RI_TPTE_STAGKEY) & M_FW_RI_TPTE_STAGKEY) + +#define S_FW_RI_TPTE_STAGSTATE 22 +#define M_FW_RI_TPTE_STAGSTATE 0x1 +#define V_FW_RI_TPTE_STAGSTATE(x) ((x) << S_FW_RI_TPTE_STAGSTATE) +#define G_FW_RI_TPTE_STAGSTATE(x) \ + (((x) >> S_FW_RI_TPTE_STAGSTATE) & M_FW_RI_TPTE_STAGSTATE) +#define F_FW_RI_TPTE_STAGSTATE V_FW_RI_TPTE_STAGSTATE(1U) + +#define S_FW_RI_TPTE_STAGTYPE 20 +#define M_FW_RI_TPTE_STAGTYPE 0x3 +#define V_FW_RI_TPTE_STAGTYPE(x) ((x) << S_FW_RI_TPTE_STAGTYPE) +#define G_FW_RI_TPTE_STAGTYPE(x) \ + (((x) >> S_FW_RI_TPTE_STAGTYPE) & M_FW_RI_TPTE_STAGTYPE) + +#define S_FW_RI_TPTE_PDID 0 +#define M_FW_RI_TPTE_PDID 0xfffff +#define V_FW_RI_TPTE_PDID(x) ((x) << S_FW_RI_TPTE_PDID) +#define G_FW_RI_TPTE_PDID(x) \ + (((x) >> S_FW_RI_TPTE_PDID) & M_FW_RI_TPTE_PDID) + +#define S_FW_RI_TPTE_PERM 28 +#define M_FW_RI_TPTE_PERM 0xf +#define V_FW_RI_TPTE_PERM(x) ((x) << S_FW_RI_TPTE_PERM) +#define G_FW_RI_TPTE_PERM(x) \ + (((x) >> S_FW_RI_TPTE_PERM) & M_FW_RI_TPTE_PERM) + +#define S_FW_RI_TPTE_REMINVDIS 27 +#define M_FW_RI_TPTE_REMINVDIS 0x1 +#define V_FW_RI_TPTE_REMINVDIS(x) ((x) << S_FW_RI_TPTE_REMINVDIS) +#define G_FW_RI_TPTE_REMINVDIS(x) \ + (((x) >> S_FW_RI_TPTE_REMINVDIS) & M_FW_RI_TPTE_REMINVDIS) +#define F_FW_RI_TPTE_REMINVDIS V_FW_RI_TPTE_REMINVDIS(1U) + +#define S_FW_RI_TPTE_ADDRTYPE 26 +#define M_FW_RI_TPTE_ADDRTYPE 1 +#define V_FW_RI_TPTE_ADDRTYPE(x) ((x) << S_FW_RI_TPTE_ADDRTYPE) +#define G_FW_RI_TPTE_ADDRTYPE(x) \ + (((x) >> S_FW_RI_TPTE_ADDRTYPE) & M_FW_RI_TPTE_ADDRTYPE) +#define F_FW_RI_TPTE_ADDRTYPE V_FW_RI_TPTE_ADDRTYPE(1U) + +#define S_FW_RI_TPTE_MWBINDEN 25 +#define M_FW_RI_TPTE_MWBINDEN 0x1 +#define V_FW_RI_TPTE_MWBINDEN(x) ((x) << S_FW_RI_TPTE_MWBINDEN) +#define G_FW_RI_TPTE_MWBINDEN(x) \ + (((x) >> S_FW_RI_TPTE_MWBINDEN) & M_FW_RI_TPTE_MWBINDEN) +#define F_FW_RI_TPTE_MWBINDEN V_FW_RI_TPTE_MWBINDEN(1U) + +#define S_FW_RI_TPTE_PS 20 +#define M_FW_RI_TPTE_PS 0x1f +#define V_FW_RI_TPTE_PS(x) ((x) << S_FW_RI_TPTE_PS) +#define G_FW_RI_TPTE_PS(x) \ + (((x) >> S_FW_RI_TPTE_PS) & M_FW_RI_TPTE_PS) + +#define S_FW_RI_TPTE_QPID 0 +#define M_FW_RI_TPTE_QPID 0xfffff +#define V_FW_RI_TPTE_QPID(x) ((x) << S_FW_RI_TPTE_QPID) +#define G_FW_RI_TPTE_QPID(x) \ + (((x) >> S_FW_RI_TPTE_QPID) & M_FW_RI_TPTE_QPID) + +#define S_FW_RI_TPTE_NOSNOOP 30 +#define M_FW_RI_TPTE_NOSNOOP 0x1 +#define V_FW_RI_TPTE_NOSNOOP(x) ((x) << S_FW_RI_TPTE_NOSNOOP) +#define G_FW_RI_TPTE_NOSNOOP(x) \ + (((x) >> S_FW_RI_TPTE_NOSNOOP) & M_FW_RI_TPTE_NOSNOOP) +#define F_FW_RI_TPTE_NOSNOOP V_FW_RI_TPTE_NOSNOOP(1U) + +#define S_FW_RI_TPTE_PBLADDR 0 +#define M_FW_RI_TPTE_PBLADDR 0x1fffffff +#define V_FW_RI_TPTE_PBLADDR(x) ((x) << S_FW_RI_TPTE_PBLADDR) +#define G_FW_RI_TPTE_PBLADDR(x) \ + (((x) >> S_FW_RI_TPTE_PBLADDR) & M_FW_RI_TPTE_PBLADDR) + +#define S_FW_RI_TPTE_DCA 24 +#define M_FW_RI_TPTE_DCA 0x1f +#define V_FW_RI_TPTE_DCA(x) ((x) << S_FW_RI_TPTE_DCA) +#define G_FW_RI_TPTE_DCA(x) \ + (((x) >> S_FW_RI_TPTE_DCA) & M_FW_RI_TPTE_DCA) + +#define S_FW_RI_TPTE_MWBCNT_PSTAG 0 +#define M_FW_RI_TPTE_MWBCNT_PSTAG 0xffffff +#define V_FW_RI_TPTE_MWBCNT_PSTAT(x) \ + ((x) << S_FW_RI_TPTE_MWBCNT_PSTAG) +#define G_FW_RI_TPTE_MWBCNT_PSTAG(x) \ + (((x) >> S_FW_RI_TPTE_MWBCNT_PSTAG) & M_FW_RI_TPTE_MWBCNT_PSTAG) + +enum fw_ri_res_type { + FW_RI_RES_TYPE_SQ, + FW_RI_RES_TYPE_RQ, + FW_RI_RES_TYPE_CQ, +}; + +enum fw_ri_res_op { + FW_RI_RES_OP_WRITE, + FW_RI_RES_OP_RESET, +}; + +struct fw_ri_res { + union fw_ri_restype { + struct fw_ri_res_sqrq { + __u8 restype; + __u8 op; + __be16 r3; + __be32 eqid; + __be32 r4[2]; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + } sqrq; + struct fw_ri_res_cq { + __u8 restype; + __u8 op; + __be16 r3; + __be32 iqid; + __be32 r4[2]; + __be32 iqandst_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_iqro; + __be32 r6_lo; + __be64 r7; + } cq; + } u; +}; + +struct fw_ri_res_wr { + __be32 op_nres; + __be32 len16_pkd; + __u64 cookie; +#ifndef C99_NOT_SUPPORTED + struct fw_ri_res res[0]; +#endif +}; + +#define S_FW_RI_RES_WR_NRES 0 +#define M_FW_RI_RES_WR_NRES 0xff +#define V_FW_RI_RES_WR_NRES(x) ((x) << S_FW_RI_RES_WR_NRES) +#define G_FW_RI_RES_WR_NRES(x) \ + (((x) >> S_FW_RI_RES_WR_NRES) & M_FW_RI_RES_WR_NRES) + +#define S_FW_RI_RES_WR_FETCHSZM 26 +#define M_FW_RI_RES_WR_FETCHSZM 0x1 +#define V_FW_RI_RES_WR_FETCHSZM(x) ((x) << S_FW_RI_RES_WR_FETCHSZM) +#define G_FW_RI_RES_WR_FETCHSZM(x) \ + (((x) >> S_FW_RI_RES_WR_FETCHSZM) & M_FW_RI_RES_WR_FETCHSZM) +#define F_FW_RI_RES_WR_FETCHSZM V_FW_RI_RES_WR_FETCHSZM(1U) + +#define S_FW_RI_RES_WR_STATUSPGNS 25 +#define M_FW_RI_RES_WR_STATUSPGNS 0x1 +#define V_FW_RI_RES_WR_STATUSPGNS(x) ((x) << S_FW_RI_RES_WR_STATUSPGNS) +#define G_FW_RI_RES_WR_STATUSPGNS(x) \ + (((x) >> S_FW_RI_RES_WR_STATUSPGNS) & M_FW_RI_RES_WR_STATUSPGNS) +#define F_FW_RI_RES_WR_STATUSPGNS V_FW_RI_RES_WR_STATUSPGNS(1U) + +#define S_FW_RI_RES_WR_STATUSPGRO 24 +#define M_FW_RI_RES_WR_STATUSPGRO 0x1 +#define V_FW_RI_RES_WR_STATUSPGRO(x) ((x) << S_FW_RI_RES_WR_STATUSPGRO) +#define G_FW_RI_RES_WR_STATUSPGRO(x) \ + (((x) >> S_FW_RI_RES_WR_STATUSPGRO) & M_FW_RI_RES_WR_STATUSPGRO) +#define F_FW_RI_RES_WR_STATUSPGRO V_FW_RI_RES_WR_STATUSPGRO(1U) + +#define S_FW_RI_RES_WR_FETCHNS 23 +#define M_FW_RI_RES_WR_FETCHNS 0x1 +#define V_FW_RI_RES_WR_FETCHNS(x) ((x) << S_FW_RI_RES_WR_FETCHNS) +#define G_FW_RI_RES_WR_FETCHNS(x) \ + (((x) >> S_FW_RI_RES_WR_FETCHNS) & M_FW_RI_RES_WR_FETCHNS) +#define F_FW_RI_RES_WR_FETCHNS V_FW_RI_RES_WR_FETCHNS(1U) + +#define S_FW_RI_RES_WR_FETCHRO 22 +#define M_FW_RI_RES_WR_FETCHRO 0x1 +#define V_FW_RI_RES_WR_FETCHRO(x) ((x) << S_FW_RI_RES_WR_FETCHRO) +#define G_FW_RI_RES_WR_FETCHRO(x) \ + (((x) >> S_FW_RI_RES_WR_FETCHRO) & M_FW_RI_RES_WR_FETCHRO) +#define F_FW_RI_RES_WR_FETCHRO V_FW_RI_RES_WR_FETCHRO(1U) + +#define S_FW_RI_RES_WR_HOSTFCMODE 20 +#define M_FW_RI_RES_WR_HOSTFCMODE 0x3 +#define V_FW_RI_RES_WR_HOSTFCMODE(x) ((x) << S_FW_RI_RES_WR_HOSTFCMODE) +#define G_FW_RI_RES_WR_HOSTFCMODE(x) \ + (((x) >> S_FW_RI_RES_WR_HOSTFCMODE) & M_FW_RI_RES_WR_HOSTFCMODE) + +#define S_FW_RI_RES_WR_CPRIO 19 +#define M_FW_RI_RES_WR_CPRIO 0x1 +#define V_FW_RI_RES_WR_CPRIO(x) ((x) << S_FW_RI_RES_WR_CPRIO) +#define G_FW_RI_RES_WR_CPRIO(x) \ + (((x) >> S_FW_RI_RES_WR_CPRIO) & M_FW_RI_RES_WR_CPRIO) +#define F_FW_RI_RES_WR_CPRIO V_FW_RI_RES_WR_CPRIO(1U) + +#define S_FW_RI_RES_WR_ONCHIP 18 +#define M_FW_RI_RES_WR_ONCHIP 0x1 +#define V_FW_RI_RES_WR_ONCHIP(x) ((x) << S_FW_RI_RES_WR_ONCHIP) +#define G_FW_RI_RES_WR_ONCHIP(x) \ + (((x) >> S_FW_RI_RES_WR_ONCHIP) & M_FW_RI_RES_WR_ONCHIP) +#define F_FW_RI_RES_WR_ONCHIP V_FW_RI_RES_WR_ONCHIP(1U) + +#define S_FW_RI_RES_WR_PCIECHN 16 +#define M_FW_RI_RES_WR_PCIECHN 0x3 +#define V_FW_RI_RES_WR_PCIECHN(x) ((x) << S_FW_RI_RES_WR_PCIECHN) +#define G_FW_RI_RES_WR_PCIECHN(x) \ + (((x) >> S_FW_RI_RES_WR_PCIECHN) & M_FW_RI_RES_WR_PCIECHN) + +#define S_FW_RI_RES_WR_IQID 0 +#define M_FW_RI_RES_WR_IQID 0xffff +#define V_FW_RI_RES_WR_IQID(x) ((x) << S_FW_RI_RES_WR_IQID) +#define G_FW_RI_RES_WR_IQID(x) \ + (((x) >> S_FW_RI_RES_WR_IQID) & M_FW_RI_RES_WR_IQID) + +#define S_FW_RI_RES_WR_DCAEN 31 +#define M_FW_RI_RES_WR_DCAEN 0x1 +#define V_FW_RI_RES_WR_DCAEN(x) ((x) << S_FW_RI_RES_WR_DCAEN) +#define G_FW_RI_RES_WR_DCAEN(x) \ + (((x) >> S_FW_RI_RES_WR_DCAEN) & M_FW_RI_RES_WR_DCAEN) +#define F_FW_RI_RES_WR_DCAEN V_FW_RI_RES_WR_DCAEN(1U) + +#define S_FW_RI_RES_WR_DCACPU 26 +#define M_FW_RI_RES_WR_DCACPU 0x1f +#define V_FW_RI_RES_WR_DCACPU(x) ((x) << S_FW_RI_RES_WR_DCACPU) +#define G_FW_RI_RES_WR_DCACPU(x) \ + (((x) >> S_FW_RI_RES_WR_DCACPU) & M_FW_RI_RES_WR_DCACPU) + +#define S_FW_RI_RES_WR_FBMIN 23 +#define M_FW_RI_RES_WR_FBMIN 0x7 +#define V_FW_RI_RES_WR_FBMIN(x) ((x) << S_FW_RI_RES_WR_FBMIN) +#define G_FW_RI_RES_WR_FBMIN(x) \ + (((x) >> S_FW_RI_RES_WR_FBMIN) & M_FW_RI_RES_WR_FBMIN) + +#define S_FW_RI_RES_WR_FBMAX 20 +#define M_FW_RI_RES_WR_FBMAX 0x7 +#define V_FW_RI_RES_WR_FBMAX(x) ((x) << S_FW_RI_RES_WR_FBMAX) +#define G_FW_RI_RES_WR_FBMAX(x) \ + (((x) >> S_FW_RI_RES_WR_FBMAX) & M_FW_RI_RES_WR_FBMAX) + +#define S_FW_RI_RES_WR_CIDXFTHRESHO 19 +#define M_FW_RI_RES_WR_CIDXFTHRESHO 0x1 +#define V_FW_RI_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESHO) +#define G_FW_RI_RES_WR_CIDXFTHRESHO(x) \ + (((x) >> S_FW_RI_RES_WR_CIDXFTHRESHO) & M_FW_RI_RES_WR_CIDXFTHRESHO) +#define F_FW_RI_RES_WR_CIDXFTHRESHO V_FW_RI_RES_WR_CIDXFTHRESHO(1U) + +#define S_FW_RI_RES_WR_CIDXFTHRESH 16 +#define M_FW_RI_RES_WR_CIDXFTHRESH 0x7 +#define V_FW_RI_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_RI_RES_WR_CIDXFTHRESH) +#define G_FW_RI_RES_WR_CIDXFTHRESH(x) \ + (((x) >> S_FW_RI_RES_WR_CIDXFTHRESH) & M_FW_RI_RES_WR_CIDXFTHRESH) + +#define S_FW_RI_RES_WR_EQSIZE 0 +#define M_FW_RI_RES_WR_EQSIZE 0xffff +#define V_FW_RI_RES_WR_EQSIZE(x) ((x) << S_FW_RI_RES_WR_EQSIZE) +#define G_FW_RI_RES_WR_EQSIZE(x) \ + (((x) >> S_FW_RI_RES_WR_EQSIZE) & M_FW_RI_RES_WR_EQSIZE) + +#define S_FW_RI_RES_WR_IQANDST 15 +#define M_FW_RI_RES_WR_IQANDST 0x1 +#define V_FW_RI_RES_WR_IQANDST(x) ((x) << S_FW_RI_RES_WR_IQANDST) +#define G_FW_RI_RES_WR_IQANDST(x) \ + (((x) >> S_FW_RI_RES_WR_IQANDST) & M_FW_RI_RES_WR_IQANDST) +#define F_FW_RI_RES_WR_IQANDST V_FW_RI_RES_WR_IQANDST(1U) + +#define S_FW_RI_RES_WR_IQANUS 14 +#define M_FW_RI_RES_WR_IQANUS 0x1 +#define V_FW_RI_RES_WR_IQANUS(x) ((x) << S_FW_RI_RES_WR_IQANUS) +#define G_FW_RI_RES_WR_IQANUS(x) \ + (((x) >> S_FW_RI_RES_WR_IQANUS) & M_FW_RI_RES_WR_IQANUS) +#define F_FW_RI_RES_WR_IQANUS V_FW_RI_RES_WR_IQANUS(1U) + +#define S_FW_RI_RES_WR_IQANUD 12 +#define M_FW_RI_RES_WR_IQANUD 0x3 +#define V_FW_RI_RES_WR_IQANUD(x) ((x) << S_FW_RI_RES_WR_IQANUD) +#define G_FW_RI_RES_WR_IQANUD(x) \ + (((x) >> S_FW_RI_RES_WR_IQANUD) & M_FW_RI_RES_WR_IQANUD) + +#define S_FW_RI_RES_WR_IQANDSTINDEX 0 +#define M_FW_RI_RES_WR_IQANDSTINDEX 0xfff +#define V_FW_RI_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_RI_RES_WR_IQANDSTINDEX) +#define G_FW_RI_RES_WR_IQANDSTINDEX(x) \ + (((x) >> S_FW_RI_RES_WR_IQANDSTINDEX) & M_FW_RI_RES_WR_IQANDSTINDEX) + +#define S_FW_RI_RES_WR_IQDROPRSS 15 +#define M_FW_RI_RES_WR_IQDROPRSS 0x1 +#define V_FW_RI_RES_WR_IQDROPRSS(x) ((x) << S_FW_RI_RES_WR_IQDROPRSS) +#define G_FW_RI_RES_WR_IQDROPRSS(x) \ + (((x) >> S_FW_RI_RES_WR_IQDROPRSS) & M_FW_RI_RES_WR_IQDROPRSS) +#define F_FW_RI_RES_WR_IQDROPRSS V_FW_RI_RES_WR_IQDROPRSS(1U) + +#define S_FW_RI_RES_WR_IQGTSMODE 14 +#define M_FW_RI_RES_WR_IQGTSMODE 0x1 +#define V_FW_RI_RES_WR_IQGTSMODE(x) ((x) << S_FW_RI_RES_WR_IQGTSMODE) +#define G_FW_RI_RES_WR_IQGTSMODE(x) \ + (((x) >> S_FW_RI_RES_WR_IQGTSMODE) & M_FW_RI_RES_WR_IQGTSMODE) +#define F_FW_RI_RES_WR_IQGTSMODE V_FW_RI_RES_WR_IQGTSMODE(1U) + +#define S_FW_RI_RES_WR_IQPCIECH 12 +#define M_FW_RI_RES_WR_IQPCIECH 0x3 +#define V_FW_RI_RES_WR_IQPCIECH(x) ((x) << S_FW_RI_RES_WR_IQPCIECH) +#define G_FW_RI_RES_WR_IQPCIECH(x) \ + (((x) >> S_FW_RI_RES_WR_IQPCIECH) & M_FW_RI_RES_WR_IQPCIECH) + +#define S_FW_RI_RES_WR_IQDCAEN 11 +#define M_FW_RI_RES_WR_IQDCAEN 0x1 +#define V_FW_RI_RES_WR_IQDCAEN(x) ((x) << S_FW_RI_RES_WR_IQDCAEN) +#define G_FW_RI_RES_WR_IQDCAEN(x) \ + (((x) >> S_FW_RI_RES_WR_IQDCAEN) & M_FW_RI_RES_WR_IQDCAEN) +#define F_FW_RI_RES_WR_IQDCAEN V_FW_RI_RES_WR_IQDCAEN(1U) + +#define S_FW_RI_RES_WR_IQDCACPU 6 +#define M_FW_RI_RES_WR_IQDCACPU 0x1f +#define V_FW_RI_RES_WR_IQDCACPU(x) ((x) << S_FW_RI_RES_WR_IQDCACPU) +#define G_FW_RI_RES_WR_IQDCACPU(x) \ + (((x) >> S_FW_RI_RES_WR_IQDCACPU) & M_FW_RI_RES_WR_IQDCACPU) + +#define S_FW_RI_RES_WR_IQINTCNTTHRESH 4 +#define M_FW_RI_RES_WR_IQINTCNTTHRESH 0x3 +#define V_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ + ((x) << S_FW_RI_RES_WR_IQINTCNTTHRESH) +#define G_FW_RI_RES_WR_IQINTCNTTHRESH(x) \ + (((x) >> S_FW_RI_RES_WR_IQINTCNTTHRESH) & M_FW_RI_RES_WR_IQINTCNTTHRESH) + +#define S_FW_RI_RES_WR_IQO 3 +#define M_FW_RI_RES_WR_IQO 0x1 +#define V_FW_RI_RES_WR_IQO(x) ((x) << S_FW_RI_RES_WR_IQO) +#define G_FW_RI_RES_WR_IQO(x) \ + (((x) >> S_FW_RI_RES_WR_IQO) & M_FW_RI_RES_WR_IQO) +#define F_FW_RI_RES_WR_IQO V_FW_RI_RES_WR_IQO(1U) + +#define S_FW_RI_RES_WR_IQCPRIO 2 +#define M_FW_RI_RES_WR_IQCPRIO 0x1 +#define V_FW_RI_RES_WR_IQCPRIO(x) ((x) << S_FW_RI_RES_WR_IQCPRIO) +#define G_FW_RI_RES_WR_IQCPRIO(x) \ + (((x) >> S_FW_RI_RES_WR_IQCPRIO) & M_FW_RI_RES_WR_IQCPRIO) +#define F_FW_RI_RES_WR_IQCPRIO V_FW_RI_RES_WR_IQCPRIO(1U) + +#define S_FW_RI_RES_WR_IQESIZE 0 +#define M_FW_RI_RES_WR_IQESIZE 0x3 +#define V_FW_RI_RES_WR_IQESIZE(x) ((x) << S_FW_RI_RES_WR_IQESIZE) +#define G_FW_RI_RES_WR_IQESIZE(x) \ + (((x) >> S_FW_RI_RES_WR_IQESIZE) & M_FW_RI_RES_WR_IQESIZE) + +#define S_FW_RI_RES_WR_IQNS 31 +#define M_FW_RI_RES_WR_IQNS 0x1 +#define V_FW_RI_RES_WR_IQNS(x) ((x) << S_FW_RI_RES_WR_IQNS) +#define G_FW_RI_RES_WR_IQNS(x) \ + (((x) >> S_FW_RI_RES_WR_IQNS) & M_FW_RI_RES_WR_IQNS) +#define F_FW_RI_RES_WR_IQNS V_FW_RI_RES_WR_IQNS(1U) + +#define S_FW_RI_RES_WR_IQRO 30 +#define M_FW_RI_RES_WR_IQRO 0x1 +#define V_FW_RI_RES_WR_IQRO(x) ((x) << S_FW_RI_RES_WR_IQRO) +#define G_FW_RI_RES_WR_IQRO(x) \ + (((x) >> S_FW_RI_RES_WR_IQRO) & M_FW_RI_RES_WR_IQRO) +#define F_FW_RI_RES_WR_IQRO V_FW_RI_RES_WR_IQRO(1U) + +struct fw_ri_rdma_write_wr { + __u8 opcode; + __u8 flags; + __u16 wrid; + __u8 r1[3]; + __u8 len16; + __be64 r2; + __be32 plen; + __be32 stag_sink; + __be64 to_sink; +#ifndef C99_NOT_SUPPORTED + union { + struct fw_ri_immd immd_src[0]; + struct fw_ri_isgl isgl_src[0]; + } u; +#endif +}; + +struct fw_ri_send_wr { + __u8 opcode; + __u8 flags; + __u16 wrid; + __u8 r1[3]; + __u8 len16; + __be32 sendop_pkd; + __be32 stag_inv; + __be32 plen; + __be32 r3; + __be64 r4; +#ifndef C99_NOT_SUPPORTED + union { + struct fw_ri_immd immd_src[0]; + struct fw_ri_isgl isgl_src[0]; + } u; +#endif +}; + +#define S_FW_RI_SEND_WR_SENDOP 0 +#define M_FW_RI_SEND_WR_SENDOP 0xf +#define V_FW_RI_SEND_WR_SENDOP(x) ((x) << S_FW_RI_SEND_WR_SENDOP) +#define G_FW_RI_SEND_WR_SENDOP(x) \ + (((x) >> S_FW_RI_SEND_WR_SENDOP) & M_FW_RI_SEND_WR_SENDOP) + +struct fw_ri_rdma_read_wr { + __u8 opcode; + __u8 flags; + __u16 wrid; + __u8 r1[3]; + __u8 len16; + __be64 r2; + __be32 stag_sink; + __be32 to_sink_hi; + __be32 to_sink_lo; + __be32 plen; + __be32 stag_src; + __be32 to_src_hi; + __be32 to_src_lo; + __be32 r5; +}; + +struct fw_ri_recv_wr { + __u8 opcode; + __u8 r1; + __u16 wrid; + __u8 r2[3]; + __u8 len16; + struct fw_ri_isgl isgl; +}; + +struct fw_ri_bind_mw_wr { + __u8 opcode; + __u8 flags; + __u16 wrid; + __u8 r1[3]; + __u8 len16; + __u8 qpbinde_to_dcacpu; + __u8 pgsz_shift; + __u8 addr_type; + __u8 mem_perms; + __be32 stag_mr; + __be32 stag_mw; + __be32 r3; + __be64 len_mw; + __be64 va_fbo; + __be64 r4; +}; + +#define S_FW_RI_BIND_MW_WR_QPBINDE 6 +#define M_FW_RI_BIND_MW_WR_QPBINDE 0x1 +#define V_FW_RI_BIND_MW_WR_QPBINDE(x) ((x) << S_FW_RI_BIND_MW_WR_QPBINDE) +#define G_FW_RI_BIND_MW_WR_QPBINDE(x) \ + (((x) >> S_FW_RI_BIND_MW_WR_QPBINDE) & M_FW_RI_BIND_MW_WR_QPBINDE) +#define F_FW_RI_BIND_MW_WR_QPBINDE V_FW_RI_BIND_MW_WR_QPBINDE(1U) + +#define S_FW_RI_BIND_MW_WR_NS 5 +#define M_FW_RI_BIND_MW_WR_NS 0x1 +#define V_FW_RI_BIND_MW_WR_NS(x) ((x) << S_FW_RI_BIND_MW_WR_NS) +#define G_FW_RI_BIND_MW_WR_NS(x) \ + (((x) >> S_FW_RI_BIND_MW_WR_NS) & M_FW_RI_BIND_MW_WR_NS) +#define F_FW_RI_BIND_MW_WR_NS V_FW_RI_BIND_MW_WR_NS(1U) + +#define S_FW_RI_BIND_MW_WR_DCACPU 0 +#define M_FW_RI_BIND_MW_WR_DCACPU 0x1f +#define V_FW_RI_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_BIND_MW_WR_DCACPU) +#define G_FW_RI_BIND_MW_WR_DCACPU(x) \ + (((x) >> S_FW_RI_BIND_MW_WR_DCACPU) & M_FW_RI_BIND_MW_WR_DCACPU) + +struct fw_ri_fr_nsmr_wr { + __u8 opcode; + __u8 flags; + __u16 wrid; + __u8 r1[3]; + __u8 len16; + __u8 qpbinde_to_dcacpu; + __u8 pgsz_shift; + __u8 addr_type; + __u8 mem_perms; + __be32 stag; + __be32 len_hi; + __be32 len_lo; + __be32 va_hi; + __be32 va_lo_fbo; +}; + +#define S_FW_RI_FR_NSMR_WR_QPBINDE 6 +#define M_FW_RI_FR_NSMR_WR_QPBINDE 0x1 +#define V_FW_RI_FR_NSMR_WR_QPBINDE(x) ((x) << S_FW_RI_FR_NSMR_WR_QPBINDE) +#define G_FW_RI_FR_NSMR_WR_QPBINDE(x) \ + (((x) >> S_FW_RI_FR_NSMR_WR_QPBINDE) & M_FW_RI_FR_NSMR_WR_QPBINDE) +#define F_FW_RI_FR_NSMR_WR_QPBINDE V_FW_RI_FR_NSMR_WR_QPBINDE(1U) + +#define S_FW_RI_FR_NSMR_WR_NS 5 +#define M_FW_RI_FR_NSMR_WR_NS 0x1 +#define V_FW_RI_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_FR_NSMR_WR_NS) +#define G_FW_RI_FR_NSMR_WR_NS(x) \ + (((x) >> S_FW_RI_FR_NSMR_WR_NS) & M_FW_RI_FR_NSMR_WR_NS) +#define F_FW_RI_FR_NSMR_WR_NS V_FW_RI_FR_NSMR_WR_NS(1U) + +#define S_FW_RI_FR_NSMR_WR_DCACPU 0 +#define M_FW_RI_FR_NSMR_WR_DCACPU 0x1f +#define V_FW_RI_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_FR_NSMR_WR_DCACPU) +#define G_FW_RI_FR_NSMR_WR_DCACPU(x) \ + (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU) + +struct fw_ri_inv_lstag_wr { + __u8 opcode; + __u8 flags; + __u16 wrid; + __u8 r1[3]; + __u8 len16; + __be32 r2; + __be32 stag_inv; +}; + +enum fw_ri_type { + FW_RI_TYPE_INIT, + FW_RI_TYPE_FINI, + FW_RI_TYPE_TERMINATE +}; + +enum fw_ri_init_p2ptype { + FW_RI_INIT_P2PTYPE_RDMA_WRITE = FW_RI_RDMA_WRITE, + FW_RI_INIT_P2PTYPE_READ_REQ = FW_RI_READ_REQ, + FW_RI_INIT_P2PTYPE_SEND = FW_RI_SEND, + FW_RI_INIT_P2PTYPE_SEND_WITH_INV = FW_RI_SEND_WITH_INV, + FW_RI_INIT_P2PTYPE_SEND_WITH_SE = FW_RI_SEND_WITH_SE, + FW_RI_INIT_P2PTYPE_SEND_WITH_SE_INV = FW_RI_SEND_WITH_SE_INV, + FW_RI_INIT_P2PTYPE_DISABLED = 0xf, +}; + +struct fw_ri_wr { + __be32 op_compl; + __be32 flowid_len16; + __u64 cookie; + union fw_ri { + struct fw_ri_init { + __u8 type; + __u8 mpareqbit_p2ptype; + __u8 r4[2]; + __u8 mpa_attrs; + __u8 qp_caps; + __be16 nrqe; + __be32 pdid; + __be32 qpid; + __be32 sq_eqid; + __be32 rq_eqid; + __be32 scqid; + __be32 rcqid; + __be32 ord_max; + __be32 ird_max; + __be32 iss; + __be32 irs; + __be32 hwrqsize; + __be32 hwrqaddr; + __be64 r5; + union fw_ri_init_p2p { + struct fw_ri_rdma_write_wr write; + struct fw_ri_rdma_read_wr read; + struct fw_ri_send_wr send; + } u; + } init; + struct fw_ri_fini { + __u8 type; + __u8 r3[7]; + __be64 r4; + } fini; + struct fw_ri_terminate { + __u8 type; + __u8 r3[3]; + __be32 immdlen; + __u8 termmsg[40]; + } terminate; + } u; +}; + +#define S_FW_RI_WR_MPAREQBIT 7 +#define M_FW_RI_WR_MPAREQBIT 0x1 +#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT) +#define G_FW_RI_WR_MPAREQBIT(x) \ + (((x) >> S_FW_RI_WR_MPAREQBIT) & M_FW_RI_WR_MPAREQBIT) +#define F_FW_RI_WR_MPAREQBIT V_FW_RI_WR_MPAREQBIT(1U) + +#define S_FW_RI_WR_P2PTYPE 0 +#define M_FW_RI_WR_P2PTYPE 0xf +#define V_FW_RI_WR_P2PTYPE(x) ((x) << S_FW_RI_WR_P2PTYPE) +#define G_FW_RI_WR_P2PTYPE(x) \ + (((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE) + +struct tcp_options { + __be16 mss; + __u8 wsf; +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8:4; + __u8 unknown:1; + __u8:1; + __u8 sack:1; + __u8 tstamp:1; +#else + __u8 tstamp:1; + __u8 sack:1; + __u8:1; + __u8 unknown:1; + __u8:4; +#endif +}; + +struct cpl_pass_accept_req { + union opcode_tid ot; + __be16 rsvd; + __be16 len; + __be32 hdr_len; + __be16 vlan; + __be16 l2info; + __be32 tos_stid; + struct tcp_options tcpopt; +}; + +/* cpl_pass_accept_req.hdr_len fields */ +#define S_SYN_RX_CHAN 0 +#define M_SYN_RX_CHAN 0xF +#define V_SYN_RX_CHAN(x) ((x) << S_SYN_RX_CHAN) +#define G_SYN_RX_CHAN(x) (((x) >> S_SYN_RX_CHAN) & M_SYN_RX_CHAN) + +#define S_TCP_HDR_LEN 10 +#define M_TCP_HDR_LEN 0x3F +#define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN) +#define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN) + +#define S_IP_HDR_LEN 16 +#define M_IP_HDR_LEN 0x3FF +#define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN) +#define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN) + +#define S_ETH_HDR_LEN 26 +#define M_ETH_HDR_LEN 0x1F +#define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN) +#define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN) + +/* cpl_pass_accept_req.l2info fields */ +#define S_SYN_MAC_IDX 0 +#define M_SYN_MAC_IDX 0x1FF +#define V_SYN_MAC_IDX(x) ((x) << S_SYN_MAC_IDX) +#define G_SYN_MAC_IDX(x) (((x) >> S_SYN_MAC_IDX) & M_SYN_MAC_IDX) + +#define S_SYN_XACT_MATCH 9 +#define V_SYN_XACT_MATCH(x) ((x) << S_SYN_XACT_MATCH) +#define F_SYN_XACT_MATCH V_SYN_XACT_MATCH(1U) + +#define S_SYN_INTF 12 +#define M_SYN_INTF 0xF +#define V_SYN_INTF(x) ((x) << S_SYN_INTF) +#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF) + +struct ulptx_idata { + __be32 cmd_more; + __be32 len; +}; + +#define S_ULPTX_NSGE 0 +#define M_ULPTX_NSGE 0xFFFF +#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) +#endif /* _T4FW_RI_API_H_ */ diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h new file mode 100644 index 00000000000..ed6414abde0 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/user.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __C4IW_USER_H__ +#define __C4IW_USER_H__ + +#define C4IW_UVERBS_ABI_VERSION 1 + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * In particular do not use pointer types -- pass pointers in __u64 + * instead. + */ +struct c4iw_create_cq_resp { + __u64 key; + __u64 gts_key; + __u64 memsize; + __u32 cqid; + __u32 size; + __u32 qid_mask; +}; + +struct c4iw_create_qp_resp { + __u64 sq_key; + __u64 rq_key; + __u64 sq_db_gts_key; + __u64 rq_db_gts_key; + __u64 sq_memsize; + __u64 rq_memsize; + __u32 sqid; + __u32 rqid; + __u32 sq_size; + __u32 rq_size; + __u32 qid_mask; +}; +#endif -- cgit v1.2.3-70-g09d2 From ce6e74f23d8018f50609f694b6177c139486ebe5 Mon Sep 17 00:00:00 2001 From: Chien Tung Date: Tue, 9 Mar 2010 21:50:40 +0000 Subject: RDMA/nes: Make nesadapter->phy_lock usage consistent nes_{read,write}_1G_phy_reg() are using phy_lock while nes_{read,write}_10G_phy_reg() leave that to the caller. Remove phy_lock from 1G routines and leave the locking to the caller. Add additional phy_lock calls around 1G read/write. Signed-off-by: Chien Tung Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes_hw.c | 8 +++----- drivers/infiniband/hw/nes/nes_nic.c | 16 +++++++++++----- drivers/infiniband/hw/nes/nes_utils.c | 10 ---------- 3 files changed, 14 insertions(+), 20 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index c36a3f51492..8b67207c707 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -2458,7 +2458,6 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) return; } nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT; - spin_unlock_irqrestore(&nesadapter->phy_lock, flags); /* ack the MAC interrupt */ mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200)); @@ -2469,11 +2468,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) { nesdev->link_status_interrupts++; - if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) { - spin_lock_irqsave(&nesadapter->phy_lock, flags); + if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) nes_reset_link(nesdev, mac_index); - spin_unlock_irqrestore(&nesadapter->phy_lock, flags); - } + /* read the PHY interrupt status register */ if ((nesadapter->OneG_Mode) && (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { @@ -2587,6 +2584,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) break; } } + spin_unlock_irqrestore(&nesadapter->phy_lock, flags); if (phy_data & 0x0004) { if (wide_ppm_offset && diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index b7c813f4be4..9f4cadf9f85 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1461,11 +1461,14 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd et_cmd->transceiver = XCVR_INTERNAL; et_cmd->phy_address = mac_index; } else { + unsigned long flags; et_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg; et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg; + spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); + spin_unlock_irqrestore(&nesadapter->phy_lock, flags); if (phy_data & 0x1000) et_cmd->autoneg = AUTONEG_ENABLE; else @@ -1503,12 +1506,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; - u16 phy_data; if ((nesadapter->OneG_Mode) && (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) { - nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], - &phy_data); + unsigned long flags; + u16 phy_data; + u8 phy_index = nesadapter->phy_index[nesdev->mac_index]; + + spin_lock_irqsave(&nesadapter->phy_lock, flags); + nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); if (et_cmd->autoneg) { /* Turn on Full duplex, Autoneg, and restart autonegotiation */ phy_data |= 0x1300; @@ -1516,8 +1522,8 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd /* Turn off autoneg */ phy_data &= ~0x1000; } - nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], - phy_data); + nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); + spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } return 0; diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index 186623d8695..a9f5dd272f1 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c @@ -381,12 +381,8 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset) */ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data) { - struct nes_adapter *nesadapter = nesdev->nesadapter; u32 u32temp; u32 counter; - unsigned long flags; - - spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); @@ -402,8 +398,6 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1 if (!(u32temp & 1)) nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n", u32temp); - - spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } @@ -414,14 +408,11 @@ void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u1 */ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data) { - struct nes_adapter *nesadapter = nesdev->nesadapter; u32 u32temp; u32 counter; - unsigned long flags; /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n", phy_addr, nesdev->mac_index); */ - spin_lock_irqsave(&nesadapter->phy_lock, flags); nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL, 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23)); @@ -441,7 +432,6 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 } else { *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); } - spin_unlock_irqrestore(&nesadapter->phy_lock, flags); } -- cgit v1.2.3-70-g09d2 From 53978b46cd946ef1dba96ed6d0276ff656dd5d42 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 21 Apr 2010 15:58:28 -0700 Subject: RDMA/nes: Make unnecessarily global functions static This allows the compiler to do a bit better; on my x86-64 build: add/remove: 0/2 grow/shrink: 1/0 up/down: 2288/-2365 (-77) function old new delta nes_init_phy 273 2561 +2288 nes_init_1g_phy 469 - -469 nes_init_2025_phy 1896 - -1896 Signed-off-by: Roland Dreier --- drivers/infiniband/hw/nes/nes_hw.c | 4 ++-- drivers/infiniband/hw/nes/nes_verbs.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8b67207c707..86acb7d5706 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -1297,7 +1297,7 @@ int nes_destroy_cqp(struct nes_device *nesdev) /** * nes_init_1g_phy */ -int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) +static int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) { u32 counter = 0; u16 phy_data; @@ -1351,7 +1351,7 @@ int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) /** * nes_init_2025_phy */ -int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) +static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) { u32 temp_phy_data = 0; u32 temp_phy_data2 = 0; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index e54f312e4bd..925e1f2d1d5 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -374,7 +374,7 @@ static int alloc_fast_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, /* * nes_alloc_fast_reg_mr */ -struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len) +static struct ib_mr *nes_alloc_fast_reg_mr(struct ib_pd *ibpd, int max_page_list_len) { struct nes_pd *nespd = to_nespd(ibpd); struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); -- cgit v1.2.3-70-g09d2 From 5d7220e8dc24feed4bbd66667b7696906a147ac4 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Thu, 15 Apr 2010 11:29:04 +0900 Subject: RDMA/cma: Randomize local port allocation Randomize local port allocation in the way sctp_get_port_local() does. Update rover at the end of loop since we're likely to pick a valid port on the first try. Signed-off-by: Tetsuo Handa Reviewed-by: Sean Hefty Signed-off-by: Roland Dreier --- drivers/infiniband/core/cma.c | 70 ++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 45 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6d777069d86..6ae418e81d8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -79,7 +79,6 @@ static DEFINE_IDR(sdp_ps); static DEFINE_IDR(tcp_ps); static DEFINE_IDR(udp_ps); static DEFINE_IDR(ipoib_ps); -static int next_port; struct cma_device { struct list_head list; @@ -1970,47 +1969,33 @@ err1: static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv) { - struct rdma_bind_list *bind_list; - int port, ret, low, high; - - bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL); - if (!bind_list) - return -ENOMEM; - -retry: - /* FIXME: add proper port randomization per like inet_csk_get_port */ - do { - ret = idr_get_new_above(ps, bind_list, next_port, &port); - } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL)); - - if (ret) - goto err1; + static unsigned int last_used_port; + int low, high, remaining; + unsigned int rover; inet_get_local_port_range(&low, &high); - if (port > high) { - if (next_port != low) { - idr_remove(ps, port); - next_port = low; - goto retry; - } - ret = -EADDRNOTAVAIL; - goto err2; + remaining = (high - low) + 1; + rover = net_random() % remaining + low; +retry: + if (last_used_port != rover && + !idr_find(ps, (unsigned short) rover)) { + int ret = cma_alloc_port(ps, id_priv, rover); + /* + * Remember previously used port number in order to avoid + * re-using same port immediately after it is closed. + */ + if (!ret) + last_used_port = rover; + if (ret != -EADDRNOTAVAIL) + return ret; } - - if (port == high) - next_port = low; - else - next_port = port + 1; - - bind_list->ps = ps; - bind_list->port = (unsigned short) port; - cma_bind_port(bind_list, id_priv); - return 0; -err2: - idr_remove(ps, port); -err1: - kfree(bind_list); - return ret; + if (--remaining) { + rover++; + if ((rover < low) || (rover > high)) + rover = low; + goto retry; + } + return -EADDRNOTAVAIL; } static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) @@ -2995,12 +2980,7 @@ static void cma_remove_one(struct ib_device *device) static int __init cma_init(void) { - int ret, low, high, remaining; - - get_random_bytes(&next_port, sizeof next_port); - inet_get_local_port_range(&low, &high); - remaining = (high - low) + 1; - next_port = ((unsigned int) next_port % remaining) + low; + int ret; cma_wq = create_singlethread_workqueue("rdma_cm"); if (!cma_wq) -- cgit v1.2.3-70-g09d2 From 6fa8f719844b8455033e295f720e739c1dc3804a Mon Sep 17 00:00:00 2001 From: Vladimir Sokolovsky Date: Wed, 14 Apr 2010 17:23:39 +0300 Subject: IB/mlx4: Add support for masked atomic operations Add support for masked atomic operations (masked compare and swap, masked fetch and add). Signed-off-by: Vladimir Sokolovsky Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/cq.c | 8 +++++++ drivers/infiniband/hw/mlx4/main.c | 1 + drivers/infiniband/hw/mlx4/qp.c | 50 ++++++++++++++++++++++++++++++--------- include/linux/mlx4/device.h | 4 ++-- include/linux/mlx4/qp.h | 7 ++++++ 5 files changed, 57 insertions(+), 13 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index cc2ddd29ac5..5a219a2fdf1 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -661,6 +661,14 @@ repoll: wc->opcode = IB_WC_FETCH_ADD; wc->byte_len = 8; break; + case MLX4_OPCODE_MASKED_ATOMIC_CS: + wc->opcode = IB_WC_MASKED_COMP_SWAP; + wc->byte_len = 8; + break; + case MLX4_OPCODE_MASKED_ATOMIC_FA: + wc->opcode = IB_WC_MASKED_FETCH_ADD; + wc->byte_len = 8; + break; case MLX4_OPCODE_BIND_MW: wc->opcode = IB_WC_BIND_MW; break; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 01f2a3f9335..39051417054 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -139,6 +139,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? IB_ATOMIC_HCA : IB_ATOMIC_NONE; + props->masked_atomic_cap = IB_ATOMIC_HCA; props->max_pkeys = dev->dev->caps.pkey_table_len[1]; props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 5643f4a8ffe..6a60827b230 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -74,17 +74,19 @@ enum { }; static const __be32 mlx4_ib_opcode[] = { - [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), - [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), - [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), - [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), - [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), - [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), - [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), - [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), - [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), - [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), - [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), + [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), + [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), + [IB_WR_SEND_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_SEND_IMM), + [IB_WR_RDMA_WRITE] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), + [IB_WR_RDMA_WRITE_WITH_IMM] = cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), + [IB_WR_RDMA_READ] = cpu_to_be32(MLX4_OPCODE_RDMA_READ), + [IB_WR_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_ATOMIC_CS), + [IB_WR_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_ATOMIC_FA), + [IB_WR_SEND_WITH_INV] = cpu_to_be32(MLX4_OPCODE_SEND_INVAL), + [IB_WR_LOCAL_INV] = cpu_to_be32(MLX4_OPCODE_LOCAL_INVAL), + [IB_WR_FAST_REG_MR] = cpu_to_be32(MLX4_OPCODE_FMR), + [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS), + [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), }; static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) @@ -1407,6 +1409,9 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr * if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); + } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) { + aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask); } else { aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); aseg->compare = 0; @@ -1414,6 +1419,15 @@ static void set_atomic_seg(struct mlx4_wqe_atomic_seg *aseg, struct ib_send_wr * } +static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, + struct ib_send_wr *wr) +{ + aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); + aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask); + aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); + aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask); +} + static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, struct ib_send_wr *wr) { @@ -1567,6 +1581,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: + case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD: set_raddr_seg(wqe, wr->wr.atomic.remote_addr, wr->wr.atomic.rkey); wqe += sizeof (struct mlx4_wqe_raddr_seg); @@ -1579,6 +1594,19 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; + case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: + set_raddr_seg(wqe, wr->wr.atomic.remote_addr, + wr->wr.atomic.rkey); + wqe += sizeof (struct mlx4_wqe_raddr_seg); + + set_masked_atomic_seg(wqe, wr); + wqe += sizeof (struct mlx4_wqe_masked_atomic_seg); + + size += (sizeof (struct mlx4_wqe_raddr_seg) + + sizeof (struct mlx4_wqe_masked_atomic_seg)) / 16; + + break; + case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index e92d1bfdb33..7a7f9c1e679 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -123,8 +123,8 @@ enum { MLX4_OPCODE_RDMA_READ = 0x10, MLX4_OPCODE_ATOMIC_CS = 0x11, MLX4_OPCODE_ATOMIC_FA = 0x12, - MLX4_OPCODE_ATOMIC_MASK_CS = 0x14, - MLX4_OPCODE_ATOMIC_MASK_FA = 0x15, + MLX4_OPCODE_MASKED_ATOMIC_CS = 0x14, + MLX4_OPCODE_MASKED_ATOMIC_FA = 0x15, MLX4_OPCODE_BIND_MW = 0x18, MLX4_OPCODE_FMR = 0x19, MLX4_OPCODE_LOCAL_INVAL = 0x1b, diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 9f29d86e5dc..7abe64326f7 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -285,6 +285,13 @@ struct mlx4_wqe_atomic_seg { __be64 compare; }; +struct mlx4_wqe_masked_atomic_seg { + __be64 swap_add; + __be64 compare; + __be64 swap_add_mask; + __be64 compare_mask; +}; + struct mlx4_wqe_data_seg { __be32 byte_count; __be32 lkey; -- cgit v1.2.3-70-g09d2 From d414371795d54fa916938f948105d08928abfbb9 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 4 Mar 2010 13:16:52 +0000 Subject: IPoIB: Allow disabling/enabling TSO on the fly through ethtool Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index d10b4ec68d2..40e858492f9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -49,6 +49,25 @@ static u32 ipoib_get_rx_csum(struct net_device *dev) !test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); } +static int ipoib_set_tso(struct net_device *dev, u32 data) +{ + struct ipoib_dev_priv *priv = netdev_priv(dev); + + if (data) { + if (!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) && + (dev->features & NETIF_F_SG) && + (priv->hca_caps & IB_DEVICE_UD_TSO)) { + dev->features |= NETIF_F_TSO; + } else { + ipoib_warn(priv, "can't set TSO on\n"); + return -EOPNOTSUPP; + } + } else + dev->features &= ~NETIF_F_TSO; + + return 0; +} + static int ipoib_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { @@ -131,6 +150,7 @@ static void ipoib_get_ethtool_stats(struct net_device *dev, static const struct ethtool_ops ipoib_ethtool_ops = { .get_drvinfo = ipoib_get_drvinfo, .get_rx_csum = ipoib_get_rx_csum, + .set_tso = ipoib_set_tso, .get_coalesce = ipoib_get_coalesce, .set_coalesce = ipoib_set_coalesce, .get_flags = ethtool_op_get_flags, -- cgit v1.2.3-70-g09d2 From 617c9a7e398878d036a3aa9a063ccba145854b45 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 28 Apr 2010 14:57:40 -0700 Subject: RDMA/cxgb3: Shrink .text with compile-time init of handlers arrays Using compile-time designated initializers for the handler arrays instead of open-coding the initialization in iwch_cm_init() is (IMHO) cleaner, and leads to substantially smaller code: on my x86-64 build, bloat-o-meter shows: add/remove: 0/1 grow/shrink: 4/3 up/down: 4/-1682 (-1678) function old new delta tx_ack 167 168 +1 state_set 55 56 +1 start_ep_timer 99 100 +1 pass_establish 177 178 +1 act_open_req_arp_failure 39 38 -1 sched 84 82 -2 iwch_cm_init 442 91 -351 work_handlers 1328 - -1328 Signed-off-by: Roland Dreier --- drivers/infiniband/hw/cxgb3/iwch.c | 2 - drivers/infiniband/hw/cxgb3/iwch_cm.c | 129 +++++++++++++++++----------------- 2 files changed, 65 insertions(+), 66 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index 63f975f3e30..8e77dc543dd 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c @@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); -cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS]; - static void open_rnic_dev(struct t3cdev *); static void close_rnic_dev(struct t3cdev *); static void iwch_event_handler(struct t3cdev *, u32, u32); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index cfd6db019f1..ebfb117ba68 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1; module_param(cong_flavor, uint, 0644); MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); -static void process_work(struct work_struct *work); static struct workqueue_struct *workq; -static DECLARE_WORK(skb_work, process_work); static struct sk_buff_head rxq; -static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS]; static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); static void ep_timeout(unsigned long arg); @@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep) put_ep(&ep->com); } -static void process_work(struct work_struct *work) -{ - struct sk_buff *skb = NULL; - void *ep; - struct t3cdev *tdev; - int ret; - - while ((skb = skb_dequeue(&rxq))) { - ep = *((void **) (skb->cb)); - tdev = *((struct t3cdev **) (skb->cb + sizeof(void *))); - ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep); - if (ret & CPL_RET_BUF_DONE) - kfree_skb(skb); - - /* - * ep was referenced in sched(), and is freed here. - */ - put_ep((struct iwch_ep_common *)ep); - } -} - static int status2errno(int status) { switch (status) { @@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, /* * All the CM events are handled on a work queue to have a safe context. + * These are the real handlers that are called from the work queue. */ +static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = act_establish, + [CPL_ACT_OPEN_RPL] = act_open_rpl, + [CPL_RX_DATA] = rx_data, + [CPL_TX_DMA_ACK] = tx_ack, + [CPL_ABORT_RPL_RSS] = abort_rpl, + [CPL_ABORT_RPL] = abort_rpl, + [CPL_PASS_OPEN_RPL] = pass_open_rpl, + [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, + [CPL_PASS_ACCEPT_REQ] = pass_accept_req, + [CPL_PASS_ESTABLISH] = pass_establish, + [CPL_PEER_CLOSE] = peer_close, + [CPL_ABORT_REQ_RSS] = peer_abort, + [CPL_CLOSE_CON_RPL] = close_con_rpl, + [CPL_RDMA_TERMINATE] = terminate, + [CPL_RDMA_EC_STATUS] = ec_status, +}; + +static void process_work(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + void *ep; + struct t3cdev *tdev; + int ret; + + while ((skb = skb_dequeue(&rxq))) { + ep = *((void **) (skb->cb)); + tdev = *((struct t3cdev **) (skb->cb + sizeof(void *))); + ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep); + if (ret & CPL_RET_BUF_DONE) + kfree_skb(skb); + + /* + * ep was referenced in sched(), and is freed here. + */ + put_ep((struct iwch_ep_common *)ep); + } +} + +static DECLARE_WORK(skb_work, process_work); + static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) { struct iwch_ep_common *epc = ctx; @@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) return CPL_RET_BUF_DONE; } +/* + * All upcalls from the T3 Core go to sched() to schedule the + * processing on a work queue. + */ +cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = sched, + [CPL_ACT_OPEN_RPL] = sched, + [CPL_RX_DATA] = sched, + [CPL_TX_DMA_ACK] = sched, + [CPL_ABORT_RPL_RSS] = sched, + [CPL_ABORT_RPL] = sched, + [CPL_PASS_OPEN_RPL] = sched, + [CPL_CLOSE_LISTSRV_RPL] = sched, + [CPL_PASS_ACCEPT_REQ] = sched, + [CPL_PASS_ESTABLISH] = sched, + [CPL_PEER_CLOSE] = sched, + [CPL_CLOSE_CON_RPL] = sched, + [CPL_ABORT_REQ_RSS] = sched, + [CPL_RDMA_TERMINATE] = sched, + [CPL_RDMA_EC_STATUS] = sched, + [CPL_SET_TCB_RPL] = set_tcb_rpl, +}; + int __init iwch_cm_init(void) { skb_queue_head_init(&rxq); @@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void) if (!workq) return -ENOMEM; - /* - * All upcalls from the T3 Core go to sched() to - * schedule the processing on a work queue. - */ - t3c_handlers[CPL_ACT_ESTABLISH] = sched; - t3c_handlers[CPL_ACT_OPEN_RPL] = sched; - t3c_handlers[CPL_RX_DATA] = sched; - t3c_handlers[CPL_TX_DMA_ACK] = sched; - t3c_handlers[CPL_ABORT_RPL_RSS] = sched; - t3c_handlers[CPL_ABORT_RPL] = sched; - t3c_handlers[CPL_PASS_OPEN_RPL] = sched; - t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched; - t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched; - t3c_handlers[CPL_PASS_ESTABLISH] = sched; - t3c_handlers[CPL_PEER_CLOSE] = sched; - t3c_handlers[CPL_CLOSE_CON_RPL] = sched; - t3c_handlers[CPL_ABORT_REQ_RSS] = sched; - t3c_handlers[CPL_RDMA_TERMINATE] = sched; - t3c_handlers[CPL_RDMA_EC_STATUS] = sched; - t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl; - - /* - * These are the real handlers that are called from a - * work queue. - */ - work_handlers[CPL_ACT_ESTABLISH] = act_establish; - work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl; - work_handlers[CPL_RX_DATA] = rx_data; - work_handlers[CPL_TX_DMA_ACK] = tx_ack; - work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl; - work_handlers[CPL_ABORT_RPL] = abort_rpl; - work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl; - work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl; - work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req; - work_handlers[CPL_PASS_ESTABLISH] = pass_establish; - work_handlers[CPL_PEER_CLOSE] = peer_close; - work_handlers[CPL_ABORT_REQ_RSS] = peer_abort; - work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl; - work_handlers[CPL_RDMA_TERMINATE] = terminate; - work_handlers[CPL_RDMA_EC_STATUS] = ec_status; return 0; } -- cgit v1.2.3-70-g09d2 From be4c9bad9d0edb6bc3bd8fffc2f98e0e2112da39 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 5 May 2010 14:45:40 -0700 Subject: MAINTAINERS: Add cxgb4 and iw_cxgb4 entries Signed-off-by: Roland Dreier --- MAINTAINERS | 14 ++ drivers/infiniband/hw/cxgb4/cm.c | 316 +++++++++++++++++++-------------- drivers/infiniband/hw/cxgb4/ev.c | 6 +- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 + drivers/infiniband/hw/cxgb4/provider.c | 4 +- drivers/infiniband/hw/cxgb4/qp.c | 28 +-- drivers/infiniband/hw/cxgb4/t4.h | 32 +++- 7 files changed, 238 insertions(+), 164 deletions(-) (limited to 'drivers/infiniband') diff --git a/MAINTAINERS b/MAINTAINERS index 7a9ccda2a30..8a32aec8a48 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1719,6 +1719,20 @@ W: http://www.openfabrics.org S: Supported F: drivers/infiniband/hw/cxgb3/ +CXGB4 ETHERNET DRIVER (CXGB4) +M: Dimitris Michailidis +L: netdev@vger.kernel.org +W: http://www.chelsio.com +S: Supported +F: drivers/net/cxgb4/ + +CXGB4 IWARP RNIC DRIVER (IW_CXGB4) +M: Steve Wise +L: linux-rdma@vger.kernel.org +W: http://www.openfabrics.org +S: Supported +F: drivers/infiniband/hw/cxgb4/ + CYBERPRO FB DRIVER M: Russell King L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 07b068be0cf..30ce0a8eca0 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -61,6 +61,10 @@ static char *states[] = { NULL, }; +int c4iw_max_read_depth = 8; +module_param(c4iw_max_read_depth, int, 0644); +MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); + static int enable_tcp_timestamps; module_param(enable_tcp_timestamps, int, 0644); MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); @@ -113,18 +117,17 @@ static int snd_win = 32 * 1024; module_param(snd_win, int, 0644); MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); -static void process_work(struct work_struct *work); static struct workqueue_struct *workq; -static DECLARE_WORK(skb_work, process_work); static struct sk_buff_head rxq; -static c4iw_handler_func work_handlers[NUM_CPL_CMDS]; -c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); static void ep_timeout(unsigned long arg); static void connect_reply_upcall(struct c4iw_ep *ep, int status); +static LIST_HEAD(timeout_list); +static spinlock_t timeout_lock; + static void start_ep_timer(struct c4iw_ep *ep) { PDBG("%s ep %p\n", __func__, ep); @@ -271,26 +274,6 @@ static void release_ep_resources(struct c4iw_ep *ep) c4iw_put_ep(&ep->com); } -static void process_work(struct work_struct *work) -{ - struct sk_buff *skb = NULL; - struct c4iw_dev *dev; - struct cpl_act_establish *rpl = cplhdr(skb); - unsigned int opcode; - int ret; - - while ((skb = skb_dequeue(&rxq))) { - rpl = cplhdr(skb); - dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); - opcode = rpl->ot.opcode; - - BUG_ON(!work_handlers[opcode]); - ret = work_handlers[opcode](dev, skb); - if (!ret) - kfree_skb(skb); - } -} - static int status2errno(int status) { switch (status) { @@ -1795,76 +1778,6 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) return 0; } -static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) -{ - struct cpl_fw6_msg *rpl = cplhdr(skb); - struct c4iw_wr_wait *wr_waitp; - int ret; - - PDBG("%s type %u\n", __func__, rpl->type); - - switch (rpl->type) { - case 1: - ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); - wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1]; - PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); - if (wr_waitp) { - wr_waitp->ret = ret; - wr_waitp->done = 1; - wake_up(&wr_waitp->wait); - } - break; - case 2: - c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); - break; - default: - printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, - rpl->type); - break; - } - return 0; -} - -static void ep_timeout(unsigned long arg) -{ - struct c4iw_ep *ep = (struct c4iw_ep *)arg; - struct c4iw_qp_attributes attrs; - unsigned long flags; - int abort = 1; - - spin_lock_irqsave(&ep->com.lock, flags); - PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, - ep->com.state); - switch (ep->com.state) { - case MPA_REQ_SENT: - __state_set(&ep->com, ABORTING); - connect_reply_upcall(ep, -ETIMEDOUT); - break; - case MPA_REQ_WAIT: - __state_set(&ep->com, ABORTING); - break; - case CLOSING: - case MORIBUND: - if (ep->com.cm_id && ep->com.qp) { - attrs.next_state = C4IW_QP_STATE_ERROR; - c4iw_modify_qp(ep->com.qp->rhp, - ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, - &attrs, 1); - } - __state_set(&ep->com, ABORTING); - break; - default: - printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", - __func__, ep, ep->hwtid, ep->com.state); - WARN_ON(1); - abort = 0; - } - spin_unlock_irqrestore(&ep->com.lock, flags); - if (abort) - abort_connection(ep, NULL, GFP_ATOMIC); - c4iw_put_ep(&ep->com); -} - int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { int err; @@ -1904,8 +1817,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); BUG_ON(!qp); - if ((conn_param->ord > T4_MAX_READ_DEPTH) || - (conn_param->ird > T4_MAX_READ_DEPTH)) { + if ((conn_param->ord > c4iw_max_read_depth) || + (conn_param->ird > c4iw_max_read_depth)) { abort_connection(ep, NULL, GFP_KERNEL); err = -EINVAL; goto err; @@ -1968,6 +1881,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct net_device *pdev; int step; + if ((conn_param->ord > c4iw_max_read_depth) || + (conn_param->ird > c4iw_max_read_depth)) { + err = -EINVAL; + goto out; + } ep = alloc_ep(sizeof(*ep), GFP_KERNEL); if (!ep) { printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); @@ -2115,7 +2033,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) */ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); if (ep->stid == -1) { - printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); + printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); err = -ENOMEM; goto fail2; } @@ -2243,6 +2161,116 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) return ret; } +/* + * These are the real handlers that are called from a + * work queue. + */ +static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = act_establish, + [CPL_ACT_OPEN_RPL] = act_open_rpl, + [CPL_RX_DATA] = rx_data, + [CPL_ABORT_RPL_RSS] = abort_rpl, + [CPL_ABORT_RPL] = abort_rpl, + [CPL_PASS_OPEN_RPL] = pass_open_rpl, + [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, + [CPL_PASS_ACCEPT_REQ] = pass_accept_req, + [CPL_PASS_ESTABLISH] = pass_establish, + [CPL_PEER_CLOSE] = peer_close, + [CPL_ABORT_REQ_RSS] = peer_abort, + [CPL_CLOSE_CON_RPL] = close_con_rpl, + [CPL_RDMA_TERMINATE] = terminate, + [CPL_FW4_ACK] = fw4_ack +}; + +static void process_timeout(struct c4iw_ep *ep) +{ + struct c4iw_qp_attributes attrs; + int abort = 1; + + spin_lock_irq(&ep->com.lock); + PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, + ep->com.state); + switch (ep->com.state) { + case MPA_REQ_SENT: + __state_set(&ep->com, ABORTING); + connect_reply_upcall(ep, -ETIMEDOUT); + break; + case MPA_REQ_WAIT: + __state_set(&ep->com, ABORTING); + break; + case CLOSING: + case MORIBUND: + if (ep->com.cm_id && ep->com.qp) { + attrs.next_state = C4IW_QP_STATE_ERROR; + c4iw_modify_qp(ep->com.qp->rhp, + ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, + &attrs, 1); + } + __state_set(&ep->com, ABORTING); + break; + default: + printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n", + __func__, ep, ep->hwtid, ep->com.state); + WARN_ON(1); + abort = 0; + } + spin_unlock_irq(&ep->com.lock); + if (abort) + abort_connection(ep, NULL, GFP_KERNEL); + c4iw_put_ep(&ep->com); +} + +static void process_timedout_eps(void) +{ + struct c4iw_ep *ep; + + spin_lock_irq(&timeout_lock); + while (!list_empty(&timeout_list)) { + struct list_head *tmp; + + tmp = timeout_list.next; + list_del(tmp); + spin_unlock_irq(&timeout_lock); + ep = list_entry(tmp, struct c4iw_ep, entry); + process_timeout(ep); + spin_lock_irq(&timeout_lock); + } + spin_unlock_irq(&timeout_lock); +} + +static void process_work(struct work_struct *work) +{ + struct sk_buff *skb = NULL; + struct c4iw_dev *dev; + struct cpl_act_establish *rpl = cplhdr(skb); + unsigned int opcode; + int ret; + + while ((skb = skb_dequeue(&rxq))) { + rpl = cplhdr(skb); + dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); + opcode = rpl->ot.opcode; + + BUG_ON(!work_handlers[opcode]); + ret = work_handlers[opcode](dev, skb); + if (!ret) + kfree_skb(skb); + } + process_timedout_eps(); +} + +static DECLARE_WORK(skb_work, process_work); + +static void ep_timeout(unsigned long arg) +{ + struct c4iw_ep *ep = (struct c4iw_ep *)arg; + + spin_lock(&timeout_lock); + list_add_tail(&ep->entry, &timeout_list); + spin_unlock(&timeout_lock); + queue_work(workq, &skb_work); +} + /* * All the CM events are handled on a work queue to have a safe context. */ @@ -2273,58 +2301,74 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) return 0; } +static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct cpl_fw6_msg *rpl = cplhdr(skb); + struct c4iw_wr_wait *wr_waitp; + int ret; + + PDBG("%s type %u\n", __func__, rpl->type); + + switch (rpl->type) { + case 1: + ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); + wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1]; + PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); + if (wr_waitp) { + wr_waitp->ret = ret; + wr_waitp->done = 1; + wake_up(&wr_waitp->wait); + } + break; + case 2: + c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); + break; + default: + printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, + rpl->type); + break; + } + return 0; +} + +/* + * Most upcalls from the T4 Core go to sched() to + * schedule the processing on a work queue. + */ +c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { + [CPL_ACT_ESTABLISH] = sched, + [CPL_ACT_OPEN_RPL] = sched, + [CPL_RX_DATA] = sched, + [CPL_ABORT_RPL_RSS] = sched, + [CPL_ABORT_RPL] = sched, + [CPL_PASS_OPEN_RPL] = sched, + [CPL_CLOSE_LISTSRV_RPL] = sched, + [CPL_PASS_ACCEPT_REQ] = sched, + [CPL_PASS_ESTABLISH] = sched, + [CPL_PEER_CLOSE] = sched, + [CPL_CLOSE_CON_RPL] = sched, + [CPL_ABORT_REQ_RSS] = sched, + [CPL_RDMA_TERMINATE] = sched, + [CPL_FW4_ACK] = sched, + [CPL_SET_TCB_RPL] = set_tcb_rpl, + [CPL_FW6_MSG] = fw6_msg +}; + int __init c4iw_cm_init(void) { + spin_lock_init(&timeout_lock); skb_queue_head_init(&rxq); workq = create_singlethread_workqueue("iw_cxgb4"); if (!workq) return -ENOMEM; - /* - * Most upcalls from the T4 Core go to sched() to - * schedule the processing on a work queue. - */ - c4iw_handlers[CPL_ACT_ESTABLISH] = sched; - c4iw_handlers[CPL_ACT_OPEN_RPL] = sched; - c4iw_handlers[CPL_RX_DATA] = sched; - c4iw_handlers[CPL_ABORT_RPL_RSS] = sched; - c4iw_handlers[CPL_ABORT_RPL] = sched; - c4iw_handlers[CPL_PASS_OPEN_RPL] = sched; - c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched; - c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched; - c4iw_handlers[CPL_PASS_ESTABLISH] = sched; - c4iw_handlers[CPL_PEER_CLOSE] = sched; - c4iw_handlers[CPL_CLOSE_CON_RPL] = sched; - c4iw_handlers[CPL_ABORT_REQ_RSS] = sched; - c4iw_handlers[CPL_RDMA_TERMINATE] = sched; - c4iw_handlers[CPL_FW4_ACK] = sched; - c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl; - c4iw_handlers[CPL_FW6_MSG] = fw6_msg; - - /* - * These are the real handlers that are called from a - * work queue. - */ - work_handlers[CPL_ACT_ESTABLISH] = act_establish; - work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl; - work_handlers[CPL_RX_DATA] = rx_data; - work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl; - work_handlers[CPL_ABORT_RPL] = abort_rpl; - work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl; - work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl; - work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req; - work_handlers[CPL_PASS_ESTABLISH] = pass_establish; - work_handlers[CPL_PEER_CLOSE] = peer_close; - work_handlers[CPL_ABORT_REQ_RSS] = peer_abort; - work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl; - work_handlers[CPL_RDMA_TERMINATE] = terminate; - work_handlers[CPL_FW4_ACK] = fw4_ack; return 0; } void __exit c4iw_cm_term(void) { + WARN_ON(!list_empty(&timeout_list)); flush_workqueue(workq); destroy_workqueue(workq); } diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 1bd6a3e531a..491e76a0327 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -51,8 +51,8 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, return; } - printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " - "type %d wrid.hi 0x%x wrid.lo 0x%x\n", __func__, + printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x " + "type %d wrid.hi 0x%x wrid.lo 0x%x\n", CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); @@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, if (qhp->attr.state == C4IW_QP_STATE_RTS) { attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, - &attrs, 0); + &attrs, 1); } event.event = ib_event; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index ccce6fe7570..a6269981e81 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -597,6 +597,7 @@ struct c4iw_ep { struct c4iw_ep_common com; struct c4iw_ep *parent_ep; struct timer_list timer; + struct list_head entry; unsigned int atid; u32 hwtid; u32 snd_seq; @@ -739,5 +740,6 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); extern struct cxgb4_client t4c_client; extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; +extern int c4iw_max_read_depth; #endif diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 3cb50af3e52..dfc49020bb9 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -267,8 +267,8 @@ static int c4iw_query_device(struct ib_device *ibdev, props->max_qp_wr = T4_MAX_QP_DEPTH; props->max_sge = T4_MAX_RECV_SGE; props->max_sge_rd = 1; - props->max_qp_rd_atom = T4_MAX_READ_DEPTH; - props->max_qp_init_rd_atom = T4_MAX_READ_DEPTH; + props->max_qp_rd_atom = c4iw_max_read_depth; + props->max_qp_init_rd_atom = c4iw_max_read_depth; props->max_cq = T4_MAX_NUM_CQ; props->max_cqe = T4_MAX_CQ_DEPTH; props->max_mr = c4iw_num_stags(&dev->rdev); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index bd56c841ef7..83a01dc0c4c 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -856,7 +856,8 @@ int c4iw_post_zb_read(struct c4iw_qp *qhp) return c4iw_ofld_send(&qhp->rhp->rdev, skb); } -int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) +static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, + gfp_t gfp) { struct fw_ri_wr *wqe; struct sk_buff *skb; @@ -865,9 +866,9 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, qhp->ep->hwtid); - skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); + skb = alloc_skb(sizeof *wqe, gfp); if (!skb) - return -ENOMEM; + return; set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); @@ -881,7 +882,7 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); term = (struct terminate_message *)wqe->u.terminate.termmsg; build_term_codes(err_cqe, &term->layer_etype, &term->ecode); - return c4iw_ofld_send(&qhp->rhp->rdev, skb); + c4iw_ofld_send(&qhp->rhp->rdev, skb); } /* @@ -1130,14 +1131,14 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) newattr.enable_bind = attrs->enable_bind; if (mask & C4IW_QP_ATTR_MAX_ORD) { - if (attrs->max_ord > T4_MAX_READ_DEPTH) { + if (attrs->max_ord > c4iw_max_read_depth) { ret = -EINVAL; goto out; } newattr.max_ord = attrs->max_ord; } if (mask & C4IW_QP_ATTR_MAX_IRD) { - if (attrs->max_ird > T4_MAX_READ_DEPTH) { + if (attrs->max_ird > c4iw_max_read_depth) { ret = -EINVAL; goto out; } @@ -1215,12 +1216,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, qhp->attr.state = C4IW_QP_STATE_TERMINATE; if (qhp->ibqp.uobject) t4_set_wq_in_error(&qhp->wq); - if (!internal) { - ep = qhp->ep; - c4iw_get_ep(&ep->com); - terminate = 1; - disconnect = 1; - } + ep = qhp->ep; + c4iw_get_ep(&ep->com); + terminate = 1; + disconnect = 1; break; case C4IW_QP_STATE_ERROR: qhp->attr.state = C4IW_QP_STATE_ERROR; @@ -1301,7 +1300,7 @@ out: spin_unlock_irqrestore(&qhp->lock, flag); if (terminate) - c4iw_post_terminate(qhp, NULL); + post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); /* * If disconnect is 1, then we need to initiate a disconnect @@ -1309,7 +1308,8 @@ out: * an abnormal close (RTS/CLOSING->ERROR). */ if (disconnect) { - c4iw_ep_disconnect(ep, abort, GFP_KERNEL); + c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : + GFP_KERNEL); c4iw_put_ep(&ep->com); } diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 3f0d2172efd..d0e8af35240 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -36,7 +36,6 @@ #include "t4_msg.h" #include "t4fw_ri_api.h" -#define T4_MAX_READ_DEPTH 16 #define T4_QID_BASE 1024 #define T4_MAX_QIDS 256 #define T4_MAX_NUM_QP (1<<16) @@ -450,11 +449,25 @@ struct t4_cq { static inline int t4_arm_cq(struct t4_cq *cq, int se) { u32 val; - - val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | - INGRESSQID(cq->cqid); - cq->cidx_inc = 0; - writel(val, cq->gts); + u16 inc; + + do { + /* + * inc must be less the both the max update value -and- + * the size of the CQ. + */ + inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc : + CIDXINC_MASK; + inc = inc <= (cq->size - 1) ? inc : (cq->size - 1); + if (inc == cq->cidx_inc) + val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) | + INGRESSQID(cq->cqid); + else + val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) | + INGRESSQID(cq->cqid); + cq->cidx_inc -= inc; + writel(val, cq->gts); + } while (cq->cidx_inc); return 0; } @@ -489,11 +502,12 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe) static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) { int ret = 0; + u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts); - if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { + if (G_CQE_GENBIT(bits_type_ts) == cq->gen) { *cqe = &cq->queue[cq->cidx]; - cq->timestamp = CQE_TS(*cqe); - } else if (CQE_TS(&cq->queue[cq->cidx]) > cq->timestamp) + cq->timestamp = G_CQE_TS(bits_type_ts); + } else if (G_CQE_TS(bits_type_ts) > cq->timestamp) ret = -EOVERFLOW; else ret = -ENODATA; -- cgit v1.2.3-70-g09d2 From 2110f9bf37511df06220bb7e977f417baecf2950 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 5 May 2010 17:30:10 +0300 Subject: IB/iser: Add asynchronous event handler Add handler to handle events such as port up and down. This is useful when testing high-availability schemes such as multi-pathing. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/iser/iscsi_iser.h | 1 + drivers/infiniband/ulp/iser/iser_verbs.c | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 036934cdcb9..53da74b45c7 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -232,6 +232,7 @@ struct iser_device { struct ib_cq *tx_cq; struct ib_mr *mr; struct tasklet_struct cq_tasklet; + struct ib_event_handler event_handler; struct list_head ig_list; /* entry in ig devices list */ int refcount; }; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index b89d76b39a1..b9d6aa102aa 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -54,6 +54,13 @@ static void iser_qp_event_callback(struct ib_event *cause, void *context) iser_err("got qp event %d\n",cause->event); } +static void iser_event_handler(struct ib_event_handler *handler, + struct ib_event *event) +{ + iser_err("async event %d on device %s port %d\n", event->event, + event->device->name, event->element.port_num); +} + /** * iser_create_device_ib_res - creates Protection Domain (PD), Completion * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with @@ -96,8 +103,15 @@ static int iser_create_device_ib_res(struct iser_device *device) if (IS_ERR(device->mr)) goto dma_mr_err; + INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, + iser_event_handler); + if (ib_register_event_handler(&device->event_handler)) + goto handler_err; + return 0; +handler_err: + ib_dereg_mr(device->mr); dma_mr_err: tasklet_kill(&device->cq_tasklet); cq_arm_err: @@ -120,7 +134,7 @@ static void iser_free_device_ib_res(struct iser_device *device) BUG_ON(device->mr == NULL); tasklet_kill(&device->cq_tasklet); - + (void)ib_unregister_event_handler(&device->event_handler); (void)ib_dereg_mr(device->mr); (void)ib_destroy_cq(device->tx_cq); (void)ib_destroy_cq(device->rx_cq); -- cgit v1.2.3-70-g09d2 From d265b9808272c9f25e1c36d3fb5ddb466efd90e9 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 5 May 2010 17:30:34 +0300 Subject: IB/iser: Remove buggy back-pointer setting The iscsi connection object life cycle includes binding and unbinding (conn_stop) to/from the iscsi transport connection object. Since iscsi connection objects are recycled, at the time the transport connection (e.g iser's IB connection) is released, it is not valid to touch the iscsi connection tied to the transport back-pointer since it may already point to a different transport connection. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/iser/iser_verbs.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index b9d6aa102aa..ed7c9013541 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -346,8 +346,6 @@ static void iser_conn_release(struct iser_conn *ib_conn) /* on EVENT_ADDR_ERROR there's no device yet for this conn */ if (device != NULL) iser_device_try_release(device); - if (ib_conn->iser_conn) - ib_conn->iser_conn->ib_conn = NULL; iscsi_destroy_endpoint(ib_conn->ep); } -- cgit v1.2.3-70-g09d2 From 39ff05dbbbdb082bbabf06206c56b3cd4ef73904 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 5 May 2010 17:31:44 +0300 Subject: IB/iser: Enhance disconnection logic for multi-pathing The iser connection teardown flow isn't over until the underlying Connection Manager (e.g the IB CM) delivers a disconnected or timeout event through the RDMA-CM. When the remote (target) side isn't reachable, e.g when some HW e.g port/hca/switch isn't functioning or taken down administratively, the CM timeout flow is used and the event may be generated only after relatively long time -- on the order of tens of seconds. The current iser code exposes this possibly long delay to higher layers, specifically to the iscsid daemon and iscsi kernel stack. As a result, the iscsi stack doesn't respond well: this low-level CM delay is added to the fail-over time under HA schemes such as the one provided by DM multipath through the multipathd(8) service. This patch enhances the reference counting scheme on iser's IB connections so that the disconnect flow initiated by iscsid from user space (ep_disconnect) doesn't wait for the CM to deliver the disconnect/timeout event. (The connection teardown isn't done from iser's view point until the event is delivered) The iser ib (rdma) connection object is destroyed when its reference count reaches zero. When this happens on the RDMA-CM callback context, extra care is taken so that the RDMA-CM does the actual destroying of the associated ID, since doing it in the callback is prohibited. The reference count of iser ib connection normally reaches three, where the relations are 1. conn 2. conn 3. cma id With this patch, multipath fail-over time is about 30 seconds, while without this patch, multipath fail-over time is about 130 seconds. Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/iser/iscsi_iser.c | 9 ++-- drivers/infiniband/ulp/iser/iscsi_iser.h | 3 +- drivers/infiniband/ulp/iser/iser_verbs.c | 72 ++++++++++++++++++-------------- 3 files changed, 46 insertions(+), 38 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 93399dff0c6..7b2fc98e2f2 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -325,7 +325,7 @@ iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) */ if (ib_conn) { ib_conn->iser_conn = NULL; - iser_conn_put(ib_conn); + iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ } } @@ -357,11 +357,12 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges * connection pointers */ - iser_err("binding iscsi conn %p to iser_conn %p\n",conn,ib_conn); + iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n", + conn, conn->dd_data, ib_conn); iser_conn = conn->dd_data; ib_conn->iser_conn = iser_conn; iser_conn->ib_conn = ib_conn; - iser_conn_get(ib_conn); + iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */ return 0; } @@ -382,7 +383,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) * There is no unbind event so the stop callback * must release the ref from the bind. */ - iser_conn_put(ib_conn); + iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ } iser_conn->ib_conn = NULL; } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 53da74b45c7..f1df01567bb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -247,7 +247,6 @@ struct iser_conn { struct rdma_cm_id *cma_id; /* CMA ID */ struct ib_qp *qp; /* QP */ struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ - int disc_evt_flag; /* disconn event delivered */ wait_queue_head_t wait; /* waitq for conn/disconn */ int post_recv_buf_count; /* posted rx count */ atomic_t post_send_buf_count; /* posted tx count */ @@ -321,7 +320,7 @@ void iser_conn_init(struct iser_conn *ib_conn); void iser_conn_get(struct iser_conn *ib_conn); -void iser_conn_put(struct iser_conn *ib_conn); +int iser_conn_put(struct iser_conn *ib_conn, int destroy_cma_id_allowed); void iser_conn_terminate(struct iser_conn *ib_conn); diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ed7c9013541..78fdecacea3 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -238,7 +238,7 @@ alloc_err: * releases the FMR pool, QP and CMA ID objects, returns 0 on success, * -1 on failure */ -static int iser_free_ib_conn_res(struct iser_conn *ib_conn) +static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) { BUG_ON(ib_conn == NULL); @@ -253,7 +253,8 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn) if (ib_conn->qp != NULL) rdma_destroy_qp(ib_conn->cma_id); - if (ib_conn->cma_id != NULL) + /* if cma handler context, the caller acts s.t the cma destroy the id */ + if (ib_conn->cma_id != NULL && can_destroy_id) rdma_destroy_id(ib_conn->cma_id); ib_conn->fmr_pool = NULL; @@ -331,7 +332,7 @@ static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, /** * Frees all conn objects and deallocs conn descriptor */ -static void iser_conn_release(struct iser_conn *ib_conn) +static void iser_conn_release(struct iser_conn *ib_conn, int can_destroy_id) { struct iser_device *device = ib_conn->device; @@ -341,7 +342,7 @@ static void iser_conn_release(struct iser_conn *ib_conn) list_del(&ib_conn->conn_list); mutex_unlock(&ig.connlist_mutex); iser_free_rx_descriptors(ib_conn); - iser_free_ib_conn_res(ib_conn); + iser_free_ib_conn_res(ib_conn, can_destroy_id); ib_conn->device = NULL; /* on EVENT_ADDR_ERROR there's no device yet for this conn */ if (device != NULL) @@ -354,10 +355,13 @@ void iser_conn_get(struct iser_conn *ib_conn) atomic_inc(&ib_conn->refcount); } -void iser_conn_put(struct iser_conn *ib_conn) +int iser_conn_put(struct iser_conn *ib_conn, int can_destroy_id) { - if (atomic_dec_and_test(&ib_conn->refcount)) - iser_conn_release(ib_conn); + if (atomic_dec_and_test(&ib_conn->refcount)) { + iser_conn_release(ib_conn, can_destroy_id); + return 1; + } + return 0; } /** @@ -381,19 +385,20 @@ void iser_conn_terminate(struct iser_conn *ib_conn) wait_event_interruptible(ib_conn->wait, ib_conn->state == ISER_CONN_DOWN); - iser_conn_put(ib_conn); + iser_conn_put(ib_conn, 1); /* deref ib conn deallocate */ } -static void iser_connect_error(struct rdma_cm_id *cma_id) +static int iser_connect_error(struct rdma_cm_id *cma_id) { struct iser_conn *ib_conn; ib_conn = (struct iser_conn *)cma_id->context; ib_conn->state = ISER_CONN_DOWN; wake_up_interruptible(&ib_conn->wait); + return iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ } -static void iser_addr_handler(struct rdma_cm_id *cma_id) +static int iser_addr_handler(struct rdma_cm_id *cma_id) { struct iser_device *device; struct iser_conn *ib_conn; @@ -402,8 +407,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) device = iser_device_find_by_ib_device(cma_id); if (!device) { iser_err("device lookup/creation failed\n"); - iser_connect_error(cma_id); - return; + return iser_connect_error(cma_id); } ib_conn = (struct iser_conn *)cma_id->context; @@ -412,11 +416,13 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) ret = rdma_resolve_route(cma_id, 1000); if (ret) { iser_err("resolve route failed: %d\n", ret); - iser_connect_error(cma_id); + return iser_connect_error(cma_id); } + + return 0; } -static void iser_route_handler(struct rdma_cm_id *cma_id) +static int iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; int ret; @@ -437,9 +443,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) goto failure; } - return; + return 0; failure: - iser_connect_error(cma_id); + return iser_connect_error(cma_id); } static void iser_connected_handler(struct rdma_cm_id *cma_id) @@ -451,12 +457,12 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id) wake_up_interruptible(&ib_conn->wait); } -static void iser_disconnected_handler(struct rdma_cm_id *cma_id) +static int iser_disconnected_handler(struct rdma_cm_id *cma_id) { struct iser_conn *ib_conn; + int ret; ib_conn = (struct iser_conn *)cma_id->context; - ib_conn->disc_evt_flag = 1; /* getting here when the state is UP means that the conn is being * * terminated asynchronously from the iSCSI layer's perspective. */ @@ -471,20 +477,24 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) ib_conn->state = ISER_CONN_DOWN; wake_up_interruptible(&ib_conn->wait); } + + ret = iser_conn_put(ib_conn, 0); /* deref ib conn's cma id */ + return ret; } static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) { int ret = 0; - iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id); + iser_err("event %d status %d conn %p id %p\n", + event->event, event->status, cma_id->context, cma_id); switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: - iser_addr_handler(cma_id); + ret = iser_addr_handler(cma_id); break; case RDMA_CM_EVENT_ROUTE_RESOLVED: - iser_route_handler(cma_id); + ret = iser_route_handler(cma_id); break; case RDMA_CM_EVENT_ESTABLISHED: iser_connected_handler(cma_id); @@ -494,13 +504,12 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_REJECTED: - iser_err("event: %d, error: %d\n", event->event, event->status); - iser_connect_error(cma_id); + ret = iser_connect_error(cma_id); break; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_ADDR_CHANGE: - iser_disconnected_handler(cma_id); + ret = iser_disconnected_handler(cma_id); break; default: iser_err("Unexpected RDMA CM event (%d)\n", event->event); @@ -515,7 +524,7 @@ void iser_conn_init(struct iser_conn *ib_conn) init_waitqueue_head(&ib_conn->wait); ib_conn->post_recv_buf_count = 0; atomic_set(&ib_conn->post_send_buf_count, 0); - atomic_set(&ib_conn->refcount, 1); + atomic_set(&ib_conn->refcount, 1); /* ref ib conn allocation */ INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); } @@ -543,6 +552,7 @@ int iser_connect(struct iser_conn *ib_conn, ib_conn->state = ISER_CONN_PENDING; + iser_conn_get(ib_conn); /* ref ib conn's cma id */ ib_conn->cma_id = rdma_create_id(iser_cma_handler, (void *)ib_conn, RDMA_PS_TCP); @@ -580,7 +590,7 @@ id_failure: addr_failure: ib_conn->state = ISER_CONN_DOWN; connect_failure: - iser_conn_release(ib_conn); + iser_conn_release(ib_conn, 1); return err; } @@ -749,12 +759,10 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc, iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); - /* complete the termination process if disconnect event was delivered * - * note there are no more non completed posts to the QP */ - if (ib_conn->disc_evt_flag) { - ib_conn->state = ISER_CONN_DOWN; - wake_up_interruptible(&ib_conn->wait); - } + /* no more non completed posts to the QP, complete the + * termination process w.o worrying on disconnect event */ + ib_conn->state = ISER_CONN_DOWN; + wake_up_interruptible(&ib_conn->wait); } } -- cgit v1.2.3-70-g09d2 From 9fda1ac5fa09c49e9148f85be14f55e2bb856c0f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 6 May 2010 16:22:21 +0300 Subject: IB/iser: Fix error flow in iser_create_ib_conn_res() We shouldn't free things here because we free them later. The call tree looks like this: iser_connect() ==> initiating the connection establishment and later iser_cma_handler() => iser_route_handler() => iser_create_ib_conn_res() if we fail here, eventually iser_conn_release() is called, resulting in a double free. Signed-off-by: Dan Carpenter Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/ulp/iser/iser_verbs.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 78fdecacea3..9876865732f 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -163,10 +163,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) device = ib_conn->device; ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); - if (!ib_conn->login_buf) { - goto alloc_err; - ret = -ENOMEM; - } + if (!ib_conn->login_buf) + goto out_err; ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, @@ -175,10 +173,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), GFP_KERNEL); - if (!ib_conn->page_vec) { - ret = -ENOMEM; - goto alloc_err; - } + if (!ib_conn->page_vec) + goto out_err; + ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1); params.page_shift = SHIFT_4K; @@ -198,7 +195,8 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms); if (IS_ERR(ib_conn->fmr_pool)) { ret = PTR_ERR(ib_conn->fmr_pool); - goto fmr_pool_err; + ib_conn->fmr_pool = NULL; + goto out_err; } memset(&init_attr, 0, sizeof init_attr); @@ -216,7 +214,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); if (ret) - goto qp_err; + goto out_err; ib_conn->qp = ib_conn->cma_id->qp; iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", @@ -224,12 +222,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ib_conn->fmr_pool, ib_conn->cma_id->qp); return ret; -qp_err: - (void)ib_destroy_fmr_pool(ib_conn->fmr_pool); -fmr_pool_err: - kfree(ib_conn->page_vec); - kfree(ib_conn->login_buf); -alloc_err: +out_err: iser_err("unable to alloc mem or create resource, err %d\n", ret); return ret; } -- cgit v1.2.3-70-g09d2 From 9893e742a0d942dda2277e9f3e19b726900adf27 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sat, 15 May 2010 23:22:38 +0200 Subject: IB/core: Use kmemdup() instead of kmalloc()+memcpy() Use kmemdup when some other buffer is immediately copied into the allocated region. A simplified version of the semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) // @@ expression from,to,size,flag; statement S; @@ - to = \(kmalloc\|kzalloc\)(size,flag); + to = kmemdup(from,size,flag); if (to==NULL || ...) S - memcpy(to, from, size); // Signed-off-by: Julia Lawall Signed-off-by: Roland Dreier --- drivers/infiniband/core/cma.c | 4 ++-- drivers/infiniband/core/mad.c | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6d777069d86..ce2e87b7285 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1677,13 +1677,13 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) return -EINVAL; - id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL); + id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, + GFP_KERNEL); if (!id->route.path_rec) { ret = -ENOMEM; goto err; } - memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths); id->route.num_paths = num_paths; return 0; err: diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1df1194aeba..6dc7b77d5d2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -291,13 +291,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, } if (mad_reg_req) { - reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL); + reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL); if (!reg_req) { ret = ERR_PTR(-ENOMEM); goto error3; } - /* Make a copy of the MAD registration request */ - memcpy(reg_req, mad_reg_req, sizeof *reg_req); } /* Now, fill in the various structures */ -- cgit v1.2.3-70-g09d2