summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/mac80211.tmpl1
-rw-r--r--Documentation/networking/dccp.txt6
-rw-r--r--arch/xtensa/platforms/iss/network.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c7
-rw-r--r--drivers/net/3c505.c6
-rw-r--r--drivers/net/3c509.c10
-rw-r--r--drivers/net/3c523.c4
-rw-r--r--drivers/net/3c527.c11
-rw-r--r--drivers/net/7990.c2
-rw-r--r--drivers/net/82596.c8
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/a2065.c2
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c7
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/arm/am79c961a.c2
-rw-r--r--drivers/net/arm/at91_ether.c4
-rw-r--r--drivers/net/arm/ether3.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c2
-rw-r--r--drivers/net/arm/ks8695net.c4
-rw-r--r--drivers/net/at1700.c6
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1e/atl1e_hw.c23
-rw-r--r--drivers/net/atl1e/atl1e_main.c153
-rw-r--r--drivers/net/atl1e/atl1e_param.c35
-rw-r--r--drivers/net/atp.c7
-rw-r--r--drivers/net/au1000_eth.c4
-rw-r--r--drivers/net/b44.c4
-rw-r--r--drivers/net/bcm63xx_enet.c4
-rw-r--r--drivers/net/benet/Kconfig4
-rw-r--r--drivers/net/benet/be.h12
-rw-r--r--drivers/net/benet/be_cmds.c32
-rw-r--r--drivers/net/benet/be_cmds.h3
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_hw.h119
-rw-r--r--drivers/net/benet/be_main.c392
-rw-r--r--drivers/net/bfin_mac.c4
-rw-r--r--drivers/net/bmac.c8
-rw-r--r--drivers/net/bnx2.c2
-rw-r--r--drivers/net/bnx2x_main.c7
-rw-r--r--drivers/net/chelsio/common.h4
-rw-r--r--drivers/net/cpmac.c2
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/cxgb3/t3_hw.c3
-rw-r--r--drivers/net/cxgb3/xgmac.c3
-rw-r--r--drivers/net/davinci_emac.c4
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/declance.c2
-rw-r--r--drivers/net/defxx.c4
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/dl2k.c6
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e100.c6
-rw-r--r--drivers/net/eepro.c15
-rw-r--r--drivers/net/eexpress.c6
-rw-r--r--drivers/net/ehea/ehea_main.c6
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/enic_main.c4
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/ewrk3.c2
-rw-r--r--drivers/net/fealnx.c4
-rw-r--r--drivers/net/fec.c80
-rw-r--r--drivers/net/fec_mpc52xx.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/hamachi.c6
-rw-r--r--drivers/net/hp100.c7
-rw-r--r--drivers/net/ibm_newemac/core.c6
-rw-r--r--drivers/net/ibmveth.c5
-rw-r--r--drivers/net/igb/e1000_82575.h3
-rw-r--r--drivers/net/igb/e1000_regs.h1
-rw-r--r--drivers/net/igb/igb.h3
-rw-r--r--drivers/net/igb/igb_main.c138
-rw-r--r--drivers/net/igbvf/netdev.c6
-rw-r--r--drivers/net/ioc3-eth.c5
-rw-r--r--drivers/net/ipg.c4
-rw-r--r--drivers/net/isa-skeleton.c5
-rw-r--r--drivers/net/iseries_veth.c4
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c101
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c111
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h9
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/jme.c56
-rw-r--r--drivers/net/jme.h41
-rw-r--r--drivers/net/korina.c6
-rw-r--r--drivers/net/ks8851.c4
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/ksz884x.c7335
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c6
-rw-r--r--drivers/net/ll_temac_main.c6
-rw-r--r--drivers/net/lp486e.c12
-rw-r--r--drivers/net/macb.c33
-rw-r--r--drivers/net/mace.c2
-rw-r--r--drivers/net/macmace.c2
-rw-r--r--drivers/net/macvtap.c35
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c10
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c691
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c6
-rw-r--r--drivers/net/pci-skeleton.c4
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c6
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c14
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c4
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c8
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/ps3_gelic_net.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c4
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_dbg.c58
-rw-r--r--drivers/net/qlge/qlge_ethtool.c49
-rw-r--r--drivers/net/qlge/qlge_main.c845
-rw-r--r--drivers/net/qlge/qlge_mpi.c183
-rw-r--r--drivers/net/r6040.c11
-rw-r--r--drivers/net/r8169.c140
-rw-r--r--drivers/net/s2io.c8
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/sfc/efx.c3
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sis190.c4
-rw-r--r--drivers/net/sis900.c4
-rw-r--r--drivers/net/skfp/skfddi.c6
-rw-r--r--drivers/net/skge.c79
-rw-r--r--drivers/net/sky2.c436
-rw-r--r--drivers/net/sky2.h8
-rw-r--r--drivers/net/smc911x.c6
-rw-r--r--drivers/net/smc9194.c4
-rw-r--r--drivers/net/smc91x.c6
-rw-r--r--drivers/net/smsc911x.c4
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/sonic.c8
-rw-r--r--drivers/net/starfire.c8
-rw-r--r--drivers/net/stmmac/dwmac100.c6
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c8
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/sunbmac.c4
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/sungem.c4
-rw-r--r--drivers/net/sunhme.c8
-rw-r--r--drivers/net/sunlance.c2
-rw-r--r--drivers/net/sunqe.c4
-rw-r--r--drivers/net/tc35815.c6
-rw-r--r--drivers/net/tehuti.c4
-rw-r--r--drivers/net/tg3.c148
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tsi108_eth.c2
-rw-r--r--drivers/net/tulip/de2104x.c8
-rw-r--r--drivers/net/tulip/de4x5.c6
-rw-r--r--drivers/net/tulip/dmfe.c15
-rw-r--r--drivers/net/tulip/tulip_core.c18
-rw-r--r--drivers/net/tulip/uli526x.c12
-rw-r--r--drivers/net/tulip/winbond-840.c7
-rw-r--r--drivers/net/typhoon.c9
-rw-r--r--drivers/net/ucc_geth.c3
-rw-r--r--drivers/net/usb/asix.c12
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/mcs7830.c6
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/via-rhine.c5
-rw-r--r--drivers/net/via-velocity.c47
-rw-r--r--drivers/net/virtio_net.c39
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vxge/vxge-main.c6
-rw-r--r--drivers/net/wireless/adm8211.c13
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h8
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c13
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c44
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c11
-rw-r--r--drivers/net/wireless/ath/regd.c5
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/dma.c17
-rw-r--r--drivers/net/wireless/b43/dma.h5
-rw-r--r--drivers/net/wireless/b43/main.c38
-rw-r--r--drivers/net/wireless/b43/phy_n.c302
-rw-r--r--drivers/net/wireless/b43/phy_n.h4
-rw-r--r--drivers/net/wireless/b43/pio.c17
-rw-r--r--drivers/net/wireless/b43/pio.h5
-rw-r--r--drivers/net/wireless/b43legacy/dma.c20
-rw-r--r--drivers/net/wireless/b43legacy/dma.h10
-rw-r--r--drivers/net/wireless/b43legacy/main.c26
-rw-r--r--drivers/net/wireless/b43legacy/pio.c13
-rw-r--r--drivers/net/wireless/b43legacy/pio.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/main.c31
-rw-r--r--drivers/net/wireless/libertas_tf/main.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c44
-rw-r--r--drivers/net/wireless/mwl8k.c35
-rw-r--r--drivers/net/wireless/orinoco/main.c4
-rw-r--r--drivers/net/wireless/p54/main.c11
-rw-r--r--drivers/net/wireless/p54/p54.h8
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c3
-rw-r--r--drivers/net/wireless/zd1201.c7
-rw-r--r--drivers/net/xilinx_emaclite.c378
-rw-r--r--drivers/net/yellowfin.c7
-rw-r--r--drivers/pci/iov.c15
-rw-r--r--drivers/ssb/driver_mipscore.c5
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/staging/et131x/et131x_netdev.c10
-rw-r--r--drivers/staging/netwave/netwave_cs.c8
-rw-r--r--drivers/staging/slicoss/slicoss.c2
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/vt6656/main_usb.c5
-rw-r--r--drivers/staging/wavelan/wavelan.c8
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c11
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c12
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--fs/seq_file.c59
-rw-r--r--include/linux/ethtool.h51
-rw-r--r--include/linux/if_link.h30
-rw-r--r--include/linux/netdevice.h144
-rw-r--r--include/linux/pci.h11
-rw-r--r--include/linux/seq_file.h11
-rw-r--r--include/linux/skbuff.h4
-rw-r--r--include/net/cfg80211.h83
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/mac80211.h49
-rw-r--r--include/net/sock.h7
-rw-r--r--include/net/xfrm.h26
-rw-r--r--net/appletalk/atalk_proc.c30
-rw-r--r--net/atm/proc.c2
-rw-r--r--net/atm/resources.c18
-rw-r--r--net/ax25/af_ax25.c18
-rw-r--r--net/ax25/ax25_uid.c25
-rw-r--r--net/bluetooth/bnep/netdev.c6
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/ethtool.c383
-rw-r--r--net/core/rtnetlink.c67
-rw-r--r--net/dccp/ccid.c9
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv6/addrconf.c56
-rw-r--r--net/ipv6/ip6_fib.c29
-rw-r--r--net/ipv6/reassembly.c13
-rw-r--r--net/ipv6/udp.c15
-rw-r--r--net/ipx/ipx_proc.c90
-rw-r--r--net/irda/irlan/irlan_common.c28
-rw-r--r--net/irda/irlan/irlan_eth.c5
-rw-r--r--net/key/af_key.c24
-rw-r--r--net/mac80211/agg-tx.c8
-rw-r--r--net/mac80211/cfg.c25
-rw-r--r--net/mac80211/debugfs.c33
-rw-r--r--net/mac80211/driver-ops.h42
-rw-r--r--net/mac80211/driver-trace.h73
-rw-r--r--net/mac80211/ibss.c24
-rw-r--r--net/mac80211/ieee80211_i.h19
-rw-r--r--net/mac80211/mesh_plink.c17
-rw-r--r--net/mac80211/mlme.c60
-rw-r--r--net/mac80211/pm.c10
-rw-r--r--net/mac80211/rate.c5
-rw-r--r--net/mac80211/rate.h7
-rw-r--r--net/mac80211/rx.c12
-rw-r--r--net/mac80211/scan.c27
-rw-r--r--net/mac80211/sta_info.c731
-rw-r--r--net/mac80211/sta_info.h36
-rw-r--r--net/mac80211/status.c17
-rw-r--r--net/mac80211/tx.c11
-rw-r--r--net/mac80211/util.c24
-rw-r--r--net/netrom/af_netrom.c21
-rw-r--r--net/netrom/nr_route.c53
-rw-r--r--net/packet/af_packet.c20
-rw-r--r--net/rose/af_rose.c22
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/wireless/radiotap.c305
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/x25_proc.c114
-rw-r--r--net/xfrm/xfrm_policy.c13
-rw-r--r--net/xfrm/xfrm_state.c8
-rw-r--r--net/xfrm/xfrm_user.c8
328 files changed, 12836 insertions, 3883 deletions
diff --git a/Documentation/DocBook/mac80211.tmpl b/Documentation/DocBook/mac80211.tmpl
index 971d1c0c83e..affb15a344a 100644
--- a/Documentation/DocBook/mac80211.tmpl
+++ b/Documentation/DocBook/mac80211.tmpl
@@ -234,7 +234,6 @@ usage should require reading the full document.
<title>Multiple queues and QoS support</title>
<para>TBD</para>
!Finclude/net/mac80211.h ieee80211_tx_queue_params
-!Finclude/net/mac80211.h ieee80211_tx_queue_stats
</chapter>
<chapter id="AP">
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index b132e4a3cf0..a62fdf7a6bf 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -58,8 +58,10 @@ DCCP_SOCKOPT_GET_CUR_MPS is read-only and retrieves the current maximum packet
size (application payload size) in bytes, see RFC 4340, section 14.
DCCP_SOCKOPT_AVAILABLE_CCIDS is also read-only and returns the list of CCIDs
-supported by the endpoint (see include/linux/dccp.h for symbolic constants).
-The caller needs to provide a sufficiently large (> 2) array of type uint8_t.
+supported by the endpoint. The option value is an array of type uint8_t whose
+size is passed as option length. The minimum array size is 4 elements, the
+value returned in the optlen argument always reflects the true number of
+built-in CCIDs.
DCCP_SOCKOPT_CCID is write-only and sets both the TX and RX CCIDs at the same
time, combining the operation of the next two socket options. This option is
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 2f0b86b37cf..87e218f98ef 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -560,7 +560,7 @@ static void iss_net_set_multicast_list(struct net_device *dev)
#if 0
if (dev->flags & IFF_PROMISC)
return;
- else if (dev->mc_count)
+ else if (!netdev_mc_empty(dev))
dev->flags |= IFF_ALLMULTI;
else
dev->flags &= ~IFF_ALLMULTI;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index ab110278018..c04f8fc6fc2 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -862,7 +862,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
- netdev->mc_count, !!(netdev->flags & IFF_PROMISC),
+ netdev_mc_count(netdev), !!(netdev->flags & IFF_PROMISC),
!!(netdev->flags & IFF_ALLMULTI));
if (!mc_all_on) {
multicast_addr = netdev->mc_list;
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index da6552d32cf..37d8579fc7a 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1138,18 +1138,18 @@ static void wq_set_multicast_list (struct work_struct *work)
} else if ((dev->flags & IFF_ALLMULTI)) {
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
int mci;
struct dev_mc_list *mc;
dprintk("%s: set_mc_list, %d entries\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
for (mci = 0, mc=dev->mc_list;
- mci < dev->mc_count;
+ mci < netdev_mc_count(dev);
mc = mc->next, mci++) {
dvb_set_mc_filter(dev, mc);
}
@@ -1236,7 +1236,6 @@ static void dvb_net_setup(struct net_device *dev)
dev->header_ops = &dvb_header_ops;
dev->netdev_ops = &dvb_netdev_ops;
dev->mtu = 4096;
- dev->mc_count = 0;
dev->flags |= IFF_NOARP;
}
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 9257d7ce037..dadb46a8833 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1229,8 +1229,8 @@ static void elp_set_mc_list(struct net_device *dev)
/* send a "load multicast list" command to the board, max 10 addrs/cmd */
/* if num_addrs==0 the list will be cleared */
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * dev->mc_count;
- for (i = 0; i < dev->mc_count; i++) {
+ adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
+ for (i = 0; i < netdev_mc_count(dev); i++) {
memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
dmi = dmi->next;
}
@@ -1244,7 +1244,7 @@ static void elp_set_mc_list(struct net_device *dev)
TIMEOUT_MSG(__LINE__);
}
}
- if (dev->mc_count)
+ if (!netdev_mc_empty(dev))
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
else /* num_addrs == 0 */
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9d85efce591..902435a7646 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1111,12 +1111,14 @@ set_multicast_list(struct net_device *dev)
unsigned long flags;
struct el3_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
+ int mc_count = netdev_mc_count(dev);
if (el3_debug > 1) {
static int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
- pr_debug("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ if (old != mc_count) {
+ old = mc_count;
+ pr_debug("%s: Setting Rx mode to %d addresses.\n",
+ dev->name, mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
@@ -1124,7 +1126,7 @@ set_multicast_list(struct net_device *dev)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
}
- else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
}
else
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 27d80ca5e4c..6948d667fc5 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -626,7 +626,7 @@ static int init586(struct net_device *dev)
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -771,7 +771,7 @@ static int init586(struct net_device *dev)
* Multicast setup
*/
- if (dev->mc_count) {
+ if (num_addrs) {
/* I don't understand this: do we really need memory after the init? */
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if (len <= 0) {
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 36c4191e7bc..ce982698051 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1526,10 +1526,10 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
if ((dev->flags&IFF_PROMISC) ||
(dev->flags&IFF_ALLMULTI) ||
- dev->mc_count > 10)
+ netdev_mc_count(dev) > 10)
/* Enable promiscuous mode */
filt |= 1;
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
unsigned char block[62];
unsigned char *bp;
@@ -1542,16 +1542,17 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
if(!lp->mc_list_valid)
{
block[1]=0;
- block[0]=dev->mc_count;
+ block[0]=netdev_mc_count(dev);
bp=block+2;
- for(i=0;i<dev->mc_count;i++)
+ for(i=0;i<netdev_mc_count(dev);i++)
{
memcpy(bp, dmc->dmi_addr, 6);
bp+=6;
dmc=dmc->next;
}
- if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
+ if(mc32_command_nowait(dev, 2, block,
+ 2+6*netdev_mc_count(dev))==-1)
{
lp->mc_reload_wait = 1;
return;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index b1e5764628c..079d0be3782 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -611,7 +611,7 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ for (i = 0; i < netdev_mc_count(dev); i++){
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 1663bc9e45d..638ce3b2985 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1505,7 +1505,7 @@ static void set_multicast_list(struct net_device *dev)
int config = 0, cnt;
DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count,
+ dev->name, netdev_mc_count(dev),
dev->flags & IFF_PROMISC ? "ON" : "OFF",
dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
@@ -1533,7 +1533,7 @@ static void set_multicast_list(struct net_device *dev)
i596_add_cmd(dev, &lp->cf_cmd.cmd);
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT)
{
cnt = MAX_MC_CNT;
@@ -1541,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1550,7 +1550,7 @@ static void set_multicast_list(struct net_device *dev)
return;
cmd = &lp->mc_cmd;
cmd->cmd.command = CmdMulticastList;
- cmd->mc_cnt = dev->mc_count * 6;
+ cmd->mc_cnt = netdev_mc_count(dev) * 6;
cp = cmd->mc_addrs;
for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
memcpy(cp, dmi->dmi_addr, 6);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ef662e15c3d..b711d541610 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1380,6 +1380,17 @@ config AC3200
To compile this driver as a module, choose M here. The module
will be called ac3200.
+config KSZ884X_PCI
+ tristate "Micrel KSZ8841/2 PCI"
+ depends on NET_PCI && PCI
+ select MII
+ select CRC32
+ help
+ This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ksz884x.
+
config APRICOT
tristate "Apricot Xen-II on board Ethernet"
depends on NET_PCI && ISA
@@ -1895,7 +1906,8 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25
+ depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
+ MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -1951,6 +1963,7 @@ config ATL2
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
depends on PPC32 || MICROBLAZE
+ select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 95958032cd3..622cfd450d4 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index b7ec0368d7e..6a65f660c19 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -619,7 +619,7 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ for (i = 0; i < netdev_mc_count(dev); i++){
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index ec624ab03e8..4ae750ef1e1 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -2845,7 +2845,7 @@ static void ace_set_multicast_list(struct net_device *dev)
* set the entire multicast list at a time and keeping track of
* it here is going to be messy.
*/
- if ((dev->mc_count) && !(ap->mcast_all)) {
+ if (!netdev_mc_empty(dev) && !ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index bb27b27d967..bdffdfb4c88 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1387,7 +1387,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
}
else
writel( PROM, lp->mmio + CMD2);
- if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
/* get all multicast packet */
mc_filter[1] = mc_filter[0] = 0xffffffff;
lp->mc_list = dev->mc_list;
@@ -1395,7 +1396,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
return;
}
- if( dev->mc_count == 0 ){
+ if (netdev_mc_empty(dev)) {
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
lp->mc_list = NULL;
@@ -1409,7 +1410,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
lp->options |= OPTION_MULTICAST_ENABLE;
lp->mc_list = dev->mc_list;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
+ for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < netdev_mc_count(dev);
i++, mc_ptr = mc_ptr->next) {
bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index e2c202493fa..08d8be47dae 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -819,7 +819,7 @@ static void set_multicast_list(struct net_device *dev)
lance->RDP = PROM; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer filtering. */
memset(multicast_table, (num_addrs == 0) ? 0 : -1,
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 164b37e85ee..1c3c1f94268 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -680,7 +680,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
#endif
};
-static int __init am79c961_probe(struct platform_device *pdev)
+static int __devinit am79c961_probe(struct platform_device *pdev)
{
struct resource *res;
struct net_device *dev;
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index c8bc60a7040..17d85d98987 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -563,7 +563,7 @@ static void at91ether_sethashtable(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, curr = curr->next) {
if (!curr) break; /* unexpected end of list */
bitnr = hash_get_index(curr->dmi_addr);
@@ -592,7 +592,7 @@ static void at91ether_set_multicast_list(struct net_device *dev)
at91_emac_write(AT91_EMAC_HSH, -1);
at91_emac_write(AT91_EMAC_HSL, -1);
cfg |= AT91_EMAC_MTI;
- } else if (dev->mc_count > 0) { /* Enable specific multicasts */
+ } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
at91ether_sethashtable(dev);
cfg |= AT91_EMAC_MTI;
} else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1f7a69c929a..d9de9bce239 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -463,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index c3dfbdd2cdc..1a5f78b160f 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -737,7 +737,7 @@ static void eth_set_mcast_list(struct net_device *dev)
struct port *port = netdev_priv(dev);
struct dev_mc_list *mclist = dev->mc_list;
u8 diffs[ETH_ALEN], *addr;
- int cnt = dev->mc_count, i;
+ int cnt = netdev_mc_count(dev), i;
if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index be256b34cea..1dc181a9fbc 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1207,7 +1207,7 @@ ks8695_set_multicast(struct net_device *ndev)
if (ndev->flags & IFF_ALLMULTI) {
/* enable all multicast mode */
ctrl |= DRXC_RM;
- } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
+ } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
/* more specific multicast addresses than can be
* handled in hardware
*/
@@ -1216,7 +1216,7 @@ ks8695_set_multicast(struct net_device *ndev)
/* enable specific multicasts */
ctrl &= ~DRXC_RM;
ks8695_init_partial_multicast(ksp, ndev->mc_list,
- ndev->mc_count);
+ netdev_mc_count(ndev));
}
ks8695_writereg(ksp, KS8695_DRXC, ctrl);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index b14f4799d5d..fe60cd02c86 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -839,12 +839,12 @@ set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
@@ -852,7 +852,7 @@ set_rx_mode(struct net_device *dev)
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index cc9ed864391..280cfff48b4 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1097,7 +1097,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 4a770062011..76cc043def8 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -394,7 +394,6 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
int atl1e_phy_commit(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 phy_data;
@@ -415,12 +414,12 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
}
if (0 != (val & (MDIO_START | MDIO_BUSY))) {
- dev_err(&pdev->dev,
- "pcie linkdown at least for 25ms\n");
+ netdev_err(adapter->netdev,
+ "pcie linkdown at least for 25ms\n");
return ret_val;
}
- dev_err(&pdev->dev, "pcie linkup after %d ms\n", i);
+ netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i);
}
return 0;
}
@@ -428,7 +427,6 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int atl1e_phy_init(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
s32 ret_val;
u16 phy_val;
@@ -492,20 +490,22 @@ int atl1e_phy_init(struct atl1e_hw *hw)
/*Enable PHY LinkChange Interrupt */
ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
if (ret_val) {
- dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n");
+ netdev_err(adapter->netdev,
+ "Error enable PHY linkChange Interrupt\n");
return ret_val;
}
/* setup AutoNeg parameters */
ret_val = atl1e_phy_setup_autoneg_adv(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n");
+ netdev_err(adapter->netdev,
+ "Error Setting up Auto-Negotiation\n");
return ret_val;
}
/* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
- dev_dbg(&pdev->dev, "Restarting Auto-Neg");
+ netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n");
ret_val = atl1e_phy_commit(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Resetting the phy");
+ netdev_err(adapter->netdev, "Error resetting the phy\n");
return ret_val;
}
@@ -559,9 +559,8 @@ int atl1e_reset_hw(struct atl1e_hw *hw)
}
if (timeout >= AT_HW_MAX_IDLE_DELAY) {
- dev_err(&pdev->dev,
- "MAC state machine cann't be idle since"
- " disabled for 10ms second\n");
+ netdev_err(adapter->netdev,
+ "MAC state machine can't be idle since disabled for 10ms second\n");
return AT_ERR_TIMEOUT;
}
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index d59f8e89c65..7d8de10ba62 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -164,11 +164,10 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
int err = 0;
u16 speed, duplex, phy_data;
- /* MII_BMSR must read twise */
+ /* MII_BMSR must read twice */
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
if ((phy_data & BMSR_LSTATUS) == 0) {
@@ -195,12 +194,11 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
adapter->link_speed = speed;
adapter->link_duplex = duplex;
atl1e_setup_mac_ctrl(adapter);
- dev_info(&pdev->dev,
- "%s: %s NIC Link is Up<%d Mbps %s>\n",
- atl1e_driver_name, netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex");
+ netdev_info(netdev,
+ "NIC Link is Up <%d Mbps %s Duplex>\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full" : "Half");
}
if (!netif_carrier_ok(netdev)) {
@@ -230,7 +228,6 @@ static void atl1e_link_chg_task(struct work_struct *work)
static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
u16 phy_data = 0;
u16 link_up = 0;
@@ -243,8 +240,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
if (!link_up) {
if (netif_carrier_ok(netdev)) {
/* old link state: Up */
- dev_info(&pdev->dev, "%s: %s NIC Link is Down\n",
- atl1e_driver_name, netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
adapter->link_speed = SPEED_0;
netif_stop_queue(netdev);
}
@@ -321,10 +317,9 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
u32 mac_ctrl_data = 0;
- dev_dbg(&pdev->dev, "atl1e_vlan_rx_register\n");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_irq_disable(adapter);
@@ -345,9 +340,7 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
-
- dev_dbg(&pdev->dev, "atl1e_restore_vlan !");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp);
}
/*
@@ -391,7 +384,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ netdev_warn(adapter->netdev, "invalid MTU setting\n");
return -EINVAL;
}
/* set MTU */
@@ -438,7 +431,6 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int retval = 0;
@@ -466,8 +458,8 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
goto out;
}
- dev_dbg(&pdev->dev, "<atl1e_mii_ioctl> write %x %x",
- data->reg_num, data->val_in);
+ netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
+ data->reg_num, data->val_in);
if (atl1e_write_phy_reg(&adapter->hw,
data->reg_num, data->val_in)) {
retval = -EIO;
@@ -602,7 +594,7 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
hw->dmaw_dly_cnt = 4;
if (atl1e_alloc_queues(adapter)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -800,8 +792,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
adapter->ring_size, &adapter->ring_dma);
if (adapter->ring_vir_addr == NULL) {
- dev_err(&pdev->dev, "pci_alloc_consistent failed, "
- "size = D%d", size);
+ netdev_err(adapter->netdev,
+ "pci_alloc_consistent failed, size = D%d\n", size);
return -ENOMEM;
}
@@ -817,7 +809,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
- dev_err(&pdev->dev, "kzalloc failed , size = D%d", size);
+ netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
+ size);
err = -ENOMEM;
goto failed;
}
@@ -852,8 +845,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
}
if (unlikely(offset > adapter->ring_size)) {
- dev_err(&pdev->dev, "offset(%d) > ring size(%d) !!\n",
- offset, adapter->ring_size);
+ netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
err = -1;
goto failed;
}
@@ -1077,7 +1070,6 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
static int atl1e_configure(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
u32 intr_status_data = 0;
@@ -1130,8 +1122,8 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
intr_status_data = AT_READ_REG(hw, REG_ISR);
if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
- dev_err(&pdev->dev, "atl1e_configure failed,"
- "PCIE phy link down\n");
+ netdev_err(adapter->netdev,
+ "atl1e_configure failed, PCIE phy link down\n");
return -1;
}
@@ -1262,7 +1254,6 @@ static irqreturn_t atl1e_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct atl1e_hw *hw = &adapter->hw;
int max_ints = AT_MAX_INT_WORK;
int handled = IRQ_NONE;
@@ -1285,8 +1276,8 @@ static irqreturn_t atl1e_intr(int irq, void *data)
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
if (status & ISR_PHY_LINKDOWN) {
- dev_err(&pdev->dev,
- "pcie phy linkdown %x\n", status);
+ netdev_err(adapter->netdev,
+ "pcie phy linkdown %x\n", status);
if (netif_running(adapter->netdev)) {
/* reset MAC */
atl1e_irq_reset(adapter);
@@ -1297,9 +1288,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
/* check if DMA read/write error */
if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
- dev_err(&pdev->dev,
- "PCIE DMA RW error (status = 0x%x)\n",
- status);
+ netdev_err(adapter->netdev,
+ "PCIE DMA RW error (status = 0x%x)\n",
+ status);
atl1e_irq_reset(adapter);
schedule_work(&adapter->reset_task);
break;
@@ -1382,7 +1373,6 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
- struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
&adapter->rx_ring;
@@ -1404,11 +1394,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
rx_page->read_offset);
/* check sequence number */
if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
- dev_err(&pdev->dev,
- "rx sequence number"
- " error (rx=%d) (expect=%d)\n",
- prrs->seq_num,
- rx_page_desc[que].rx_nxseq);
+ netdev_err(netdev,
+ "rx sequence number error (rx=%d) (expect=%d)\n",
+ prrs->seq_num,
+ rx_page_desc[que].rx_nxseq);
rx_page_desc[que].rx_nxseq++;
/* just for debug use */
AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
@@ -1424,9 +1413,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_ERR_DRIBBLE | RRS_ERR_CODE |
RRS_ERR_TRUNC)) {
/* hardware error, discard this packet*/
- dev_err(&pdev->dev,
- "rx packet desc error %x\n",
- *((u32 *)prrs + 1));
+ netdev_err(netdev,
+ "rx packet desc error %x\n",
+ *((u32 *)prrs + 1));
goto skip_pkt;
}
}
@@ -1435,8 +1424,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
if (skb == NULL) {
- dev_warn(&pdev->dev, "%s: Memory squeeze,"
- "deferring packet.\n", netdev->name);
+ netdev_warn(netdev,
+ "Memory squeeze, deferring packet\n");
goto skip_pkt;
}
skb->dev = netdev;
@@ -1450,9 +1439,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
u16 vlan_tag = (prrs->vtag >> 4) |
((prrs->vtag & 7) << 13) |
((prrs->vtag & 8) << 9);
- dev_dbg(&pdev->dev,
- "RXD VLAN TAG<RRD>=0x%04x\n",
- prrs->vtag);
+ netdev_dbg(netdev,
+ "RXD VLAN TAG<RRD>=0x%04x\n",
+ prrs->vtag);
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
vlan_tag);
} else {
@@ -1500,7 +1489,6 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
{
struct atl1e_adapter *adapter =
container_of(napi, struct atl1e_adapter, napi);
- struct pci_dev *pdev = adapter->pdev;
u32 imr_data;
int work_done = 0;
@@ -1519,8 +1507,8 @@ quit_polling:
/* test debug */
if (test_bit(__AT_DOWN, &adapter->flags)) {
atomic_dec(&adapter->irq_sem);
- dev_err(&pdev->dev,
- "atl1e_clean is called when AT_DOWN\n");
+ netdev_err(adapter->netdev,
+ "atl1e_clean is called when AT_DOWN\n");
}
/* reenable RX intr */
/*atl1e_irq_enable(adapter); */
@@ -1618,7 +1606,6 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
static int atl1e_tso_csum(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
- struct pci_dev *pdev = adapter->pdev;
u8 hdr_len;
u32 real_len;
unsigned short offload_type;
@@ -1642,8 +1629,8 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
- dev_warn(&pdev->dev,
- "IPV4 tso with zero data??\n");
+ netdev_warn(adapter->netdev,
+ "IPV4 tso with zero data??\n");
goto check_sum;
} else {
ip_hdr(skb)->check = 0;
@@ -1672,8 +1659,8 @@ check_sum:
cso = skb_transport_offset(skb);
if (unlikely(cso & 0x1)) {
- dev_err(&adapter->pdev->dev,
- "pay load offset should not ant event number\n");
+ netdev_err(adapter->netdev,
+ "payload offset should not ant event number\n");
return -1;
} else {
css = cso + skb->csum_offset;
@@ -1886,8 +1873,8 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
adapter->have_msi = true;
err = pci_enable_msi(adapter->pdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = false;
} else
netdev->irq = pdev->irq;
@@ -1898,13 +1885,13 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
netdev->name, netdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
return err;
}
- dev_dbg(&pdev->dev, "atl1e_request_irq OK\n");
+ netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
return err;
}
@@ -2078,7 +2065,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
- dev_dbg(&pdev->dev, "set phy register failed\n");
+ netdev_dbg(adapter->netdev, "set phy register failed\n");
goto wol_dis;
}
@@ -2100,17 +2087,14 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
}
if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
- dev_dbg(&pdev->dev,
- "%s: Link may change"
- "when suspend\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "Link may change when suspend\n");
}
wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
/* only link up can wake up */
if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
- dev_dbg(&pdev->dev, "%s: read write phy "
- "register failed.\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "read write phy register failed\n");
goto wol_dis;
}
}
@@ -2131,9 +2115,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc & AT_WUFC_MAG)
mac_ctrl_data |= MAC_CTRL_BC_EN;
- dev_dbg(&pdev->dev,
- "%s: suspend MAC=0x%x\n",
- atl1e_driver_name, mac_ctrl_data);
+ netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
+ mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
@@ -2183,8 +2166,8 @@ static int atl1e_resume(struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "ATL1e: Cannot enable PCI"
- " device from suspend\n");
+ netdev_err(adapter->netdev,
+ "Cannot enable PCI device from suspend\n");
return err;
}
@@ -2315,7 +2298,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
err = atl1e_init_netdev(netdev, pdev);
if (err) {
- dev_err(&pdev->dev, "init netdevice failed\n");
+ netdev_err(netdev, "init netdevice failed\n");
goto err_init_netdev;
}
adapter = netdev_priv(netdev);
@@ -2326,7 +2309,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
if (!adapter->hw.hw_addr) {
err = -EIO;
- dev_err(&pdev->dev, "cannot map device registers\n");
+ netdev_err(netdev, "cannot map device registers\n");
goto err_ioremap;
}
netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
@@ -2356,7 +2339,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
/* setup the private structure */
err = atl1e_sw_init(adapter);
if (err) {
- dev_err(&pdev->dev, "net device private data init failed\n");
+ netdev_err(netdev, "net device private data init failed\n");
goto err_sw_init;
}
@@ -2372,19 +2355,19 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
if (atl1e_read_mac_addr(&adapter->hw) != 0) {
err = -EIO;
- dev_err(&pdev->dev, "get mac address failed\n");
+ netdev_err(netdev, "get mac address failed\n");
goto err_eeprom;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr);
+ netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "register netdevice failed\n");
+ netdev_err(netdev, "register netdevice failed\n");
goto err_register;
}
@@ -2485,8 +2468,8 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev,
- "ATL1e: Cannot re-enable PCI device after reset.\n");
+ netdev_err(adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -2514,8 +2497,8 @@ static void atl1e_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (atl1e_up(adapter)) {
- dev_err(&pdev->dev,
- "ATL1e: can't bring device back up after reset\n");
+ netdev_err(adapter->netdev,
+ "can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c
index b3be59fd3fb..0ce60b6e7ef 100644
--- a/drivers/net/atl1e/atl1e_param.c
+++ b/drivers/net/atl1e/atl1e_param.c
@@ -116,7 +116,7 @@ struct atl1e_option {
} arg;
};
-static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev)
+static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -127,16 +127,19 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- dev_info(&pdev->dev, "%s Enabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- dev_info(&pdev->dev, "%s Disabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value);
+ netdev_info(adapter->netdev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -148,8 +151,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- dev_info(&pdev->dev, "%s\n",
- ent->str);
+ netdev_info(adapter->netdev,
+ "%s\n", ent->str);
return 0;
}
}
@@ -159,8 +162,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
BUG();
}
- dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -176,11 +179,13 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
*/
void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
+
if (bd >= ATL1E_MAX_NIC) {
- dev_notice(&pdev->dev, "no configuration for board #%i\n", bd);
- dev_notice(&pdev->dev, "Using defaults for all values\n");
+ netdev_notice(adapter->netdev,
+ "no configuration for board #%i\n", bd);
+ netdev_notice(adapter->netdev,
+ "Using defaults for all values\n");
}
{ /* Transmit Ring Size */
@@ -196,7 +201,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_tx_desc_cnt > bd) {
val = tx_desc_cnt[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->tx_ring.count = (u16) val & 0xFFFC;
} else
adapter->tx_ring.count = (u16)opt.def;
@@ -215,7 +220,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_rx_mem_size > bd) {
val = rx_mem_size[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->rx_ring.page_size = (u32)val * 1024;
} else {
adapter->rx_ring.page_size = (u32)opt.def * 1024;
@@ -235,7 +240,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.imt = (u16) val;
} else
adapter->hw.imt = (u16)(opt.def);
@@ -254,7 +259,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_media_type > bd) {
val = media_type[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.media_type = (u16) val;
} else
adapter->hw.media_type = (u16)(opt.def);
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 2f8261c9614..a841feb5df2 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -861,7 +861,7 @@ static void set_rx_mode_8002(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
+ if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
lp->addr_mode = CMR2h_PROMISC;
else
lp->addr_mode = CMR2h_Normal;
@@ -877,7 +877,8 @@ static void set_rx_mode_8012(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
new_mode = CMR2h_PROMISC;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
@@ -885,7 +886,7 @@ static void set_rx_mode_8012(struct net_device *dev)
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next)
{
int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6bac04603a8..9337d023919 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1013,7 +1013,7 @@ static void au1000_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > MULTICAST_FILTER_LIMIT) {
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
@@ -1023,7 +1023,7 @@ static void au1000_multicast_list(struct net_device *dev)
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
(long *)mc_filter);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 44b66be3813..9091c6574b1 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1691,7 +1691,7 @@ static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
struct dev_mc_list *mclist;
int i, num_ents;
- num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
+ num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
mclist = dev->mc_list;
for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
__b44_cam_write(bp, mclist->dmi_addr, i + 1);
@@ -1716,7 +1716,7 @@ static void __b44_set_rx_mode(struct net_device *dev)
__b44_set_mac_addr(bp);
if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > B44_MCAST_TABLE_SIZE))
+ (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI;
else
i = __b44_load_mcast(bp, dev);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0bd47d32ec4..0927ffa0d75 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -619,7 +619,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
/* only 3 perfect match registers left, first one is used for
* own mac address */
- if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK;
else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
@@ -632,7 +632,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
}
for (i = 0, mc_list = dev->mc_list;
- (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
+ (mc_list != NULL) && (i < netdev_mc_count(dev)) && (i < 3);
i++, mc_list = mc_list->next) {
u8 *dmi_addr;
u32 tmp;
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
index fdb6e81a437..1a41a49bb61 100644
--- a/drivers/net/benet/Kconfig
+++ b/drivers/net/benet/Kconfig
@@ -1,6 +1,6 @@
config BE2NET
- tristate "ServerEngines' 10Gbps NIC - BladeEngine 2"
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
depends on PCI && INET
help
This driver implements the NIC functionality for ServerEngines'
- 10Gbps network adapter - BladeEngine 2.
+ 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 5bc74590c73..5038c16bfe9 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,22 +38,20 @@
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
-#define DRV_DESC BE_NAME "Driver"
+#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
#define OC_DEVICE_ID1 0x700
-#define OC_DEVICE_ID2 0x701
-#define OC_DEVICE_ID3 0x710
+#define OC_DEVICE_ID2 0x710
static inline char *nic_name(struct pci_dev *pdev)
{
switch (pdev->device) {
case OC_DEVICE_ID1:
- case OC_DEVICE_ID2:
return OC_NAME;
- case OC_DEVICE_ID3:
+ case OC_DEVICE_ID2:
return OC_NAME1;
case BE_DEVICE_ID2:
return BE3_NAME;
@@ -252,7 +250,8 @@ struct be_adapter {
bool rx_post_starved; /* Zero rx frags have been posted to BE */
struct vlan_group *vlan_grp;
- u16 num_vlans;
+ u16 vlans_added;
+ u16 max_vlans; /* Number of vlans supported */
u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
struct be_dma_mem mc_cmd_mem;
@@ -266,6 +265,7 @@ struct be_adapter {
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
+ bool eeh_err;
bool link_up;
u32 port_num;
bool promiscuous;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 3227b11131c..3397ee327e1 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -167,7 +167,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
u32 ready;
do {
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ ready = ioread32(db);
+ if (ready == 0xffffffff) {
+ dev_err(&adapter->pdev->dev,
+ "pci slot disconnected\n");
+ return -1;
+ }
+
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
@@ -198,6 +205,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
struct be_mcc_mailbox *mbox = mbox_mem->va;
struct be_mcc_compl *compl = &mbox->compl;
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
val |= MPU_MAILBOX_DB_HI_MASK;
/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -396,6 +408,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = (u8 *)wrb_from_mbox(adapter);
@@ -768,6 +783,9 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -856,6 +874,9 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
struct be_cmd_req_if_destroy *req;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -1374,7 +1395,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req = cmd->va;
+ struct be_cmd_write_flashrom *req;
struct be_sge *sge;
int status;
@@ -1408,7 +1429,8 @@ err:
return status;
}
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -1429,9 +1451,9 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
- req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
- req->params.offset = 0x3FFFC;
+ req->params.offset = offset;
req->params.data_buf_size = 0x4;
status = be_mcc_notify_wait(adapter);
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c622a968c37..7297b5a4765 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -931,7 +931,8 @@ extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
-extern int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc);
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
extern int be_cmd_fw_init(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 09d8899b2de..dcc7f37b542 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -614,7 +614,7 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!status) {
resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
- memcpy(data, resp->seeprom_data, eeprom->len);
+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
}
pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
eeprom_cmd.dma);
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index e2b3beffd49..bb2ae6f924d 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,63 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/* Flashrom related descriptors */
+#define IMAGE_TYPE_FIRMWARE 160
+#define IMAGE_TYPE_BOOTCODE 224
+#define IMAGE_TYPE_OPTIONROM 32
+
+#define NUM_FLASHDIR_ENTRIES 32
+
+#define IMG_TYPE_ISCSI_ACTIVE 0
+#define IMG_TYPE_REDBOOT 1
+#define IMG_TYPE_BIOS 2
+#define IMG_TYPE_PXE_BIOS 3
+#define IMG_TYPE_FCOE_BIOS 8
+#define IMG_TYPE_ISCSI_BACKUP 9
+#define IMG_TYPE_FCOE_FW_ACTIVE 10
+#define IMG_TYPE_FCOE_FW_BACKUP 11
+#define IMG_TYPE_NCSI_BITFILE 13
+#define IMG_TYPE_NCSI_8051 14
+
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+
+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image sz */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max fw image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
+
+#define FLASH_NCSI_MAGIC (0x16032009)
+#define FLASH_NCSI_DISABLED (0)
+#define FLASH_NCSI_ENABLED (1)
+
+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
+
+/* Offsets for components on Flash. */
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
+#define FLASH_PXE_BIOS_START_g2 (7864320)
+#define FLASH_FCoE_BIOS_START_g2 (524288)
+#define FLASH_REDBOOT_START_g2 (0)
+
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
+#define FLASH_PXE_BIOS_START_g3 (13107200)
+#define FLASH_FCoE_BIOS_START_g3 (13631488)
+#define FLASH_REDBOOT_START_g3 (262144)
+
+
+
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
@@ -107,6 +164,7 @@
#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
#define EQ_ENTRY_RES_ID_SHIFT 16
+
struct be_eq_entry {
u32 evt;
};
@@ -221,41 +279,6 @@ struct be_eth_rx_compl {
u32 dw[4];
};
-/* Flashrom related descriptors */
-#define IMAGE_TYPE_FIRMWARE 160
-#define IMAGE_TYPE_BOOTCODE 224
-#define IMAGE_TYPE_OPTIONROM 32
-
-#define NUM_FLASHDIR_ENTRIES 32
-
-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
-#define FLASHROM_TYPE_REDBOOT 1
-#define FLASHROM_TYPE_BIOS 2
-#define FLASHROM_TYPE_PXE_BIOS 3
-#define FLASHROM_TYPE_FCOE_BIOS 8
-#define FLASHROM_TYPE_ISCSI_BACKUP 9
-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
-
-#define FLASHROM_OPER_FLASH 1
-#define FLASHROM_OPER_SAVE 2
-#define FLASHROM_OPER_REPORT 4
-
-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE (262144) /* Max redboot image sz */
-
-/* Offsets for components on Flash. */
-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
-#define FLASH_iSCSI_BIOS_START (7340032)
-#define FLASH_PXE_BIOS_START (7864320)
-#define FLASH_FCoE_BIOS_START (524288)
-#define FLASH_REDBOOT_START (32768)
-#define FLASH_REDBOOT_ISM_START (0)
-
struct controller_id {
u32 vendor;
u32 device;
@@ -263,7 +286,20 @@ struct controller_id {
u32 subdevice;
};
-struct flash_file_hdr {
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+struct flash_file_hdr_g2 {
u8 sign[32];
u32 cksum;
u32 antidote;
@@ -275,6 +311,17 @@ struct flash_file_hdr {
u8 build[24];
};
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 rsvd[32];
+};
+
struct flash_section_hdr {
u32 format_rev;
u32 cksum;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 626b76c0ebc..cbfaa3feb7c 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -34,7 +34,6 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -69,6 +68,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
u32 reg = ioread32(addr);
u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (adapter->eeh_err)
+ return;
+
if (!enabled && enable)
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
else if (enabled && !enable)
@@ -100,6 +102,10 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_EQ_REARM_SHIFT;
if (clear_int)
@@ -113,6 +119,10 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_CQ_REARM_SHIFT;
val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
@@ -474,10 +484,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
"MTU must be between %d and %d bytes\n",
- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
@@ -487,17 +499,16 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
}
/*
- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
- * set the BE in promiscuous VLAN mode.
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
*/
static int be_vid_config(struct be_adapter *adapter)
{
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
- int status;
+ int status = 0;
- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
+ if (adapter->vlans_added <= adapter->max_vlans) {
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (adapter->vlan_tag[i]) {
@@ -531,21 +542,21 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans++;
adapter->vlan_tag[vid] = 1;
-
- be_vid_config(adapter);
+ adapter->vlans_added++;
+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ be_vid_config(adapter);
}
static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans--;
adapter->vlan_tag[vid] = 0;
-
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
- be_vid_config(adapter);
+ adapter->vlans_added--;
+ if (adapter->vlans_added <= adapter->max_vlans)
+ be_vid_config(adapter);
}
static void be_set_multicast_list(struct net_device *netdev)
@@ -565,14 +576,15 @@ static void be_set_multicast_list(struct net_device *netdev)
}
/* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > BE_MAX_MC) {
be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
&adapter->mc_cmd_mem);
goto done;
}
be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
- netdev->mc_count, &adapter->mc_cmd_mem);
+ netdev_mc_count(netdev), &adapter->mc_cmd_mem);
done:
return;
}
@@ -634,9 +646,11 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user)
+ if (rx_page_info->last_page_user) {
pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ rx_page_info->last_page_user = false;
+ }
atomic_dec(&rxq->used);
return rx_page_info;
@@ -704,7 +718,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->data_len = curr_frag_len - hdr_len;
skb->tail += hdr_len;
}
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
if (pktsize <= rx_frag_size) {
BUG_ON(num_rcvd != 1);
@@ -737,7 +751,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -782,7 +796,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->dev = adapter->netdev;
if (vlanf) {
- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
+ if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
return;
}
@@ -862,7 +876,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
vid = be16_to_cpu(vid);
- if (!adapter->vlan_grp || adapter->num_vlans == 0)
+ if (!adapter->vlan_grp || adapter->vlans_added == 0)
return;
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
@@ -1798,15 +1812,19 @@ char flash_cookie[2][16] = {"*** SE FLAS",
"H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p)
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
{
u32 crc_offset;
u8 flashed_crc[4];
int status;
- crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
- + sizeof(struct flash_file_hdr) - 32*1024;
+
+ crc_offset = hdr_size + img_start + image_size - 4;
+
p += crc_offset;
- status = be_cmd_get_flash_crc(adapter, flashed_crc);
+
+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
+ (img_start + image_size - 4));
if (status) {
dev_err(&adapter->pdev->dev,
"could not get crc from flash, not flashing redboot\n");
@@ -1818,102 +1836,124 @@ static bool be_flash_redboot(struct be_adapter *adapter,
return false;
else
return true;
-
}
-static int be_flash_image(struct be_adapter *adapter,
+static int be_flash_data(struct be_adapter *adapter,
const struct firmware *fw,
- struct be_dma_mem *flash_cmd, u32 flash_type)
+ struct be_dma_mem *flash_cmd, int num_of_images)
+
{
- int status;
- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
+ int status = 0, i, filehdr_size = 0;
+ u32 total_bytes = 0, flash_op;
int num_bytes;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
-
- switch (flash_type) {
- case FLASHROM_TYPE_ISCSI_ACTIVE:
- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_ISCSI_BACKUP:
- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_BACKUP:
- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_BIOS:
- image_offset = FLASH_iSCSI_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_BIOS:
- image_offset = FLASH_FCoE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_PXE_BIOS:
- image_offset = FLASH_PXE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_REDBOOT:
- if (!be_flash_redboot(adapter, fw->data))
- return 0;
- image_offset = FLASH_REDBOOT_ISM_START;
- image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
- break;
- default:
- return 0;
+ struct flash_comp *pflashcomp;
+
+ struct flash_comp gen3_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3}
+ };
+ struct flash_comp gen2_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2}
+ };
+
+ if (adapter->generation == BE_GEN3) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
}
-
- p += sizeof(struct flash_file_hdr) + image_offset;
- if (p + image_size > fw->data + fw->size)
+ for (i = 0; i < 8; i++) {
+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
+ (!be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size,
+ filehdr_size)))
+ continue;
+ p = fw->data;
+ p += filehdr_size + pflashcomp[i].offset
+ + (num_of_images * sizeof(struct image_hdr));
+ if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
-
- total_bytes = image_size;
-
- while (total_bytes) {
- if (total_bytes > 32*1024)
- num_bytes = 32*1024;
- else
- num_bytes = total_bytes;
- total_bytes -= num_bytes;
-
- if (!total_bytes)
- flash_op = FLASHROM_OPER_FLASH;
- else
- flash_op = FLASHROM_OPER_SAVE;
- memcpy(req->params.data_buf, p, num_bytes);
- p += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd,
- flash_type, flash_op, num_bytes);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed. type/op %d/%d\n",
- flash_type, flash_op);
- return -1;
+ total_bytes = pflashcomp[i].size;
+ while (total_bytes) {
+ if (total_bytes > 32*1024)
+ num_bytes = 32*1024;
+ else
+ num_bytes = total_bytes;
+ total_bytes -= num_bytes;
+
+ if (!total_bytes)
+ flash_op = FLASHROM_OPER_FLASH;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ memcpy(req->params.data_buf, p, num_bytes);
+ p += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd,
+ pflashcomp[i].optype, flash_op, num_bytes);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return -1;
+ }
+ yield();
}
- yield();
}
-
return 0;
}
+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ return 0;
+ if (fhdr->build[0] == '3')
+ return BE_GEN3;
+ else if (fhdr->build[0] == '2')
+ return BE_GEN2;
+ else
+ return 0;
+}
+
int be_load_fw(struct be_adapter *adapter, u8 *func)
{
char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
const struct firmware *fw;
- struct flash_file_hdr *fhdr;
- struct flash_section_info *fsec = NULL;
+ struct flash_file_hdr_g2 *fhdr;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr = NULL;
struct be_dma_mem flash_cmd;
- int status;
+ int status, i = 0;
const u8 *p;
- bool entry_found = false;
- int flash_type;
char fw_ver[FW_VER_LEN];
char fw_cfg;
@@ -1931,34 +1971,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
p = fw->data;
- fhdr = (struct flash_file_hdr *) p;
- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
- dev_err(&adapter->pdev->dev,
- "Firmware(%s) load error (signature did not match)\n",
- fw_file);
- status = -1;
- goto fw_exit;
- }
-
+ fhdr = (struct flash_file_hdr_g2 *) p;
dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
- p += sizeof(struct flash_file_hdr);
- while (p < (fw->data + fw->size)) {
- fsec = (struct flash_section_info *)p;
- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
- entry_found = true;
- break;
- }
- p += 32;
- }
-
- if (!entry_found) {
- status = -1;
- dev_err(&adapter->pdev->dev,
- "Flash cookie not found in firmware image\n");
- goto fw_exit;
- }
-
flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
&flash_cmd.dma);
@@ -1969,12 +1984,26 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
}
- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
- status = be_flash_image(adapter, fw, &flash_cmd,
- flash_type);
- if (status)
- break;
+ if ((adapter->generation == BE_GEN3) &&
+ (get_ufigen_type(fhdr) == BE_GEN3)) {
+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
+ for (i = 0; i < fhdr3->num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *) (fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (img_hdr_ptr->imageid == 1) {
+ status = be_flash_data(adapter, fw,
+ &flash_cmd, fhdr3->num_imgs);
+ }
+
+ }
+ } else if ((adapter->generation == BE_GEN2) &&
+ (get_ufigen_type(fhdr) == BE_GEN2)) {
+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ status = -1;
}
pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
@@ -2136,6 +2165,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
+ pci_save_state(adapter->pdev);
return 0;
free_mbox:
@@ -2222,6 +2252,11 @@ static int be_get_config(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ if (adapter->cap & 0x400)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
return 0;
}
@@ -2394,13 +2429,102 @@ static int be_resume(struct pci_dev *pdev)
return 0;
}
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ adapter->eeh_err = true;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_clear(adapter);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+ adapter->eeh_err = false;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ status = be_cmd_POST(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto err;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ goto err;
+ }
+ netif_device_attach(netdev);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+ return;
+}
+
+static struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
static struct pci_driver be_driver = {
.name = DRV_NAME,
.id_table = be_dev_ids,
.probe = be_probe,
.remove = be_remove,
.suspend = be_suspend,
- .resume = be_resume
+ .resume = be_resume,
+ .err_handler = &be_eeh_handlers
};
static int __init be_init_module(void)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b23bc4f56c..ef7f77113e2 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -819,7 +819,7 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
emac_hashhi = emac_hashlo = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -862,7 +862,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* set up multicast hash table */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= HM;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9b587c34419..189fa69c209 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -973,7 +973,7 @@ static void bmac_set_multicast(struct net_device *dev)
{
struct dev_mc_list *dmi;
struct bmac_data *bp = netdev_priv(dev);
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
int i;
@@ -982,7 +982,7 @@ static void bmac_set_multicast(struct net_device *dev)
XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1021,7 +1021,7 @@ static void bmac_set_multicast(struct net_device *dev)
unsigned short rx_cfg;
u32 crc;
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
bmwrite(dev, BHASH0, 0xffff);
bmwrite(dev, BHASH1, 0xffff);
bmwrite(dev, BHASH2, 0xffff);
@@ -1039,7 +1039,7 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- for(i = 0; i < dev->mc_count; i++) {
+ for(i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 5917b941aca..c7f5515ddaa 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3560,7 +3560,7 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index ffc7381969a..6d8559052ee 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -11471,7 +11471,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) ||
- ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { /* some multicasts */
@@ -11482,7 +11483,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
bnx2x_sp(bp, mcast_config);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
config->config_table[i].
@@ -11554,7 +11555,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
+ mclist && (i < netdev_mc_count(dev));
i++, mclist = mclist->next) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index f6462b54f82..bb159d9603b 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -97,13 +97,13 @@ struct t1_rx_mode {
#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
-#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
+#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
{
u8 *addr = NULL;
- if (rm->idx++ < rm->dev->mc_count) {
+ if (rm->idx++ < t1_rx_mode_mc_cnt(rm)) {
addr = rm->list->dmi_addr;
rm->list = rm->list->next;
}
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 8d0be26f94e..c9c537be4ab 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -340,7 +340,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- for (i = 0, iter = dev->mc_list; i < dev->mc_count;
+ for (i = 0, iter = dev->mc_list; i < netdev_mc_count(dev);
i++, iter = iter->next) {
bit = 0;
tmp = iter->dmi_addr[0];
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index a24be34a3f7..c9309eadebc 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1564,7 +1564,7 @@ static void
set_multicast_list(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
- int num_addr = dev->mc_count;
+ int num_addr = netdev_mc_count(dev);
unsigned long int lo_bits;
unsigned long int hi_bits;
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 032cfe06557..3ab9f51918a 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1262,7 +1262,8 @@ void t3_link_changed(struct adapter *adapter, int port_id)
lc->fc = fc;
}
- t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+ t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
+ speed, duplex, fc);
}
void t3_link_fault(struct adapter *adapter, int port_id)
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index 0109ee4f2f9..0c08de5d09f 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -353,6 +353,9 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
* packet size register includes header, but not FCS.
*/
mtu += 14;
+ if (mtu > 1536)
+ mtu += 4;
+
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index faffad40998..d1e03b5984c 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -956,11 +956,11 @@ static void emac_dev_mcast_set(struct net_device *ndev)
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) {
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
- if (ndev->mc_count > 0) {
+ if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mc_ptr;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 45794f6cb0f..a0a6830b5e6 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -464,7 +464,7 @@ static int de620_close(struct net_device *dev)
static void de620_set_multicast_list(struct net_device *dev)
{
- if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{ /* Enable promiscuous mode */
de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
}
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index be9590253aa..76e0de6a426 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -960,7 +960,7 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 98da085445e..5adb1e03956 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2227,7 +2227,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
* perfect filtering will be used.
*/
- if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
+ if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
{
bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
bp->mc_count = 0; /* Don't add mc addrs to CAM */
@@ -2235,7 +2235,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
else
{
bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
- bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */
+ bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
}
/* Copy addresses to multicast address table, then update adapter CAM */
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 0c1f491d20b..314bc96689f 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1287,7 +1287,7 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev); i++) { /* for each address in the list */
addrs = dmi->dmi_addr;
dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 2a8b6a7c0b8..dea40953ed1 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1128,17 +1128,17 @@ set_multicast (struct net_device *dev)
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > multicast_filter_limit)) {
+ (netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *mclist;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i=0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist=mclist->next)
{
int bit, index = 0;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index b3773006568..da0985a7a87 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -725,7 +725,7 @@ dm9000_hash_table(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
struct dev_mc_list *mcptr = dev->mc_list;
- int mc_cnt = dev->mc_count;
+ int mc_cnt = netdev_mc_count(dev);
int i, oft;
u32 hash_val;
u16 hash_table[4];
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 5c7a155e849..e8c0e823a06 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1538,7 +1538,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
struct dev_mc_list *list = netdev->mc_list;
- u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
+ u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
@@ -1552,7 +1552,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
struct nic *nic = netdev_priv(netdev);
DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev->mc_count, netdev->flags);
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1560,7 +1560,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
nic->flags &= ~promiscuous;
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
+ netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
nic->flags |= multicast_all;
else
nic->flags &= ~multicast_all;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 94c59498cdb..488bc13cc7e 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1288,8 +1288,9 @@ set_multicast_list(struct net_device *dev)
short ioaddr = dev->base_addr;
unsigned short mode;
struct dev_mc_list *dmi=dev->mc_list;
+ int mc_count = mc_count;
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1299,7 +1300,7 @@ set_multicast_list(struct net_device *dev)
eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
}
- else if (dev->mc_count==0 )
+ else if (mc_count == 0)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1329,9 +1330,9 @@ set_multicast_list(struct net_device *dev)
outw(MC_SETUP, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
- outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+ outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- for (i = 0; i < dev->mc_count; i++)
+ for (i = 0; i < mc_count; i++)
{
eaddrs=(unsigned short *)dmi->dmi_addr;
dmi=dmi->next;
@@ -1348,7 +1349,7 @@ set_multicast_list(struct net_device *dev)
outb(MC_SETUP, ioaddr);
/* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+ i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
if (lp->tx_start != lp->tx_end)
{
@@ -1380,8 +1381,8 @@ set_multicast_list(struct net_device *dev)
break;
} else if ((i & 0x0f) == 0x03) { /* MC-Done */
printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, dev->mc_count,
- dev->mc_count > 1 ? "es":"");
+ dev->name, mc_count,
+ mc_count > 1 ? "es":"");
break;
}
}
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 6fbfc8eee63..d804ff18eda 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1578,7 +1578,7 @@ static void eexp_setup_filter(struct net_device *dev)
{
struct dev_mc_list *dmi;
unsigned short ioaddr = dev->base_addr;
- int count = dev->mc_count;
+ int count = netdev_mc_count(dev);
int i;
if (count > 8) {
printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
@@ -1627,9 +1627,9 @@ eexp_set_multicast(struct net_device *dev)
}
if (!(dev->flags & IFF_PROMISC)) {
eexp_setup_filter(dev);
- if (lp->old_mc_count != dev->mc_count) {
+ if (lp->old_mc_count != netdev_mc_count(dev)) {
kick = 1;
- lp->old_mc_count = dev->mc_count;
+ lp->old_mc_count = netdev_mc_count(dev);
}
}
if (kick) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 7b62336e673..99e4f8360d2 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1981,7 +1981,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
ehea_allmulti(dev, 0);
- if (dev->mc_count) {
+ if (!netdev_mc_empty(dev)) {
ret = ehea_drop_multicast_list(dev);
if (ret) {
/* Dropping the current multicast list failed.
@@ -1990,14 +1990,14 @@ static void ehea_set_multicast_list(struct net_device *dev)
ehea_allmulti(dev, 1);
}
- if (dev->mc_count > port->adapter->max_mc_mac) {
+ if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
ehea_info("Mcast registration limit reached (0x%llx). "
"Use ALLMULTI!",
port->adapter->max_mc_mac);
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+ for (i = 0, k_mcl_entry = dev->mc_list; i < netdev_mc_count(dev); i++,
k_mcl_entry = k_mcl_entry->next)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 66813c91a72..3ee32e58c7e 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1413,7 +1413,7 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
if (netif_msg_link(priv))
dev_info(&dev->dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
- } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) {
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
dev_info(&dev->dev, "%smulticast mode\n",
(dev->flags & IFF_ALLMULTI) ? "all-" : "");
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index c81bc4b1816..94749ebaaea 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -827,11 +827,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
+ unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int mc_count = netdev->mc_count;
unsigned int i, j;
if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 1f8b11449fa..31a3adb6556 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1390,20 +1390,20 @@ static void set_rx_mode(struct net_device *dev)
outl(0x002C, ioaddr + RxCtrl);
/* Unconditionally log net taps. */
memset(mc_filter, 0xff, sizeof(mc_filter));
- } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
/* There is apparently a chip bug, so the multicast filter
is never enabled. */
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outl(0x000C, ioaddr + RxCtrl);
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit_nr =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 71bfeec33a0..d3abeee3f11 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1359,7 +1359,7 @@ static void eth16i_multicast(struct net_device *dev)
{
int ioaddr = dev->base_addr;
- if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{
outb(3, ioaddr + RECEIVE_MODE_REG);
} else {
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index dd4ba01fd92..96817a872f4 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1213,7 +1213,7 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev); i++) { /* for each address in the list */
addrs = dmi->dmi_addr;
dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index e6a98129d78..f95b5ff0587 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1786,7 +1786,7 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
@@ -1796,7 +1796,7 @@ static void __set_rx_mode(struct net_device *dev)
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit;
bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 16a1d58419d..d9d14c83f51 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1128,6 +1128,26 @@ static phy_info_t phy_info_dp83848= {
},
};
+static phy_info_t phy_info_lan8700 = {
+ 0x0007C0C,
+ "LAN8700",
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* act_int */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_end, }
+ },
+};
/* ------------------------------------------------------------------------- */
static phy_info_t const * const phy_info[] = {
@@ -1137,6 +1157,7 @@ static phy_info_t const * const phy_info[] = {
&phy_info_am79c874,
&phy_info_ks8721bl,
&phy_info_dp83848,
+ &phy_info_lan8700,
NULL
};
@@ -1585,7 +1606,7 @@ static void set_multicast_list(struct net_device *dev)
dmi = dev->mc_list;
- for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
+ for (j = 0; j < netdev_mc_count(dev); j++, dmi = dmi->next) {
/* Only support group multicast for now */
if (!(dmi->dmi_addr[0] & 1))
continue;
@@ -1658,6 +1679,7 @@ static int fec_enet_init(struct net_device *dev, int index)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base;
+ struct bufdesc *bdp;
int i;
/* Allocate memory for buffer descriptors. */
@@ -1710,6 +1732,34 @@ static int fec_enet_init(struct net_device *dev, int index)
/* Set MII speed to 2.5 MHz */
fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
/ 2500000) / 2) & 0x3F) << 1;
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
fec_restart(dev, 0);
/* Queue up command to detect the PHY and initialize the
@@ -1730,7 +1780,6 @@ static void
fec_restart(struct net_device *dev, int duplex)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct bufdesc *bdp;
int i;
/* Whack a reset. We should wait for this. */
@@ -1768,33 +1817,6 @@ fec_restart(struct net_device *dev, int duplex)
}
}
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
/* Enable MII mode */
if (duplex) {
/* MII enable / FD enable */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 848e8407ea8..10903b75802 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -581,7 +581,7 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
u32 gaddr2 = 0x00000000;
dmi = dev->mc_list;
- for (i=0; i<dev->mc_count; i++) {
+ for (i=0; i<netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 22e5a847a58..482f27d5f7d 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -218,7 +218,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
W32(ep, fen_gaddrh, 0xffffffff);
W32(ep, fen_gaddrl, 0xffffffff);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ca7bcb8ab3a..ddf13ef8ac8 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -220,7 +220,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
fep->fec.hthi = 0xffffffffU;
fep->fec.htlo = 0xffffffffU;
}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 008cdd9cc53..141dbc91e5e 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -213,7 +213,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
W16(ep, sen_gaddr1, 0xffff);
W16(ep, sen_gaddr2, 0xffff);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 8bd3c9f1753..c9be090485d 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2863,7 +2863,7 @@ static void gfar_set_multi(struct net_device *dev)
em_num = 0;
}
- if (dev->mc_count == 0)
+ if (netdev_mc_empty(dev))
return;
/* Parse the list, and set the appropriate bits */
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index dd72c5025e6..c70b147b4fe 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1854,13 +1854,13 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writew(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
struct dev_mc_list *mclist;
int i;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 0c2f2e8b1c4..debac1bc679 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -2090,7 +2090,7 @@ static void hp100_set_multicast_list(struct net_device *dev)
lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
memset(&lp->hash_bytes, 0xff, 8);
- } else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
@@ -2104,9 +2104,10 @@ static void hp100_set_multicast_list(struct net_device *dev)
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
- printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, dev->mc_count);
+ printk("hp100: %s: computing hash filter - mc_count = %i\n",
+ dev->name, netdev_mc_count(dev));
#endif
- for (i = 0, dmi = dev->mc_list; i < dev->mc_count; i++, dmi = dmi->next) {
+ for (i = 0, dmi = dev->mc_list; i < netdev_mc_count(dev); i++, dmi = dmi->next) {
addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index fb5e019169e..b75d27e82a3 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -391,7 +391,7 @@ static void emac_hash_mc(struct emac_instance *dev)
struct dev_mc_list *dmi;
int i;
- DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
+ DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
@@ -425,9 +425,9 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC)
r |= EMAC_RMR_PME;
else if (ndev->flags & IFF_ALLMULTI ||
- (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
+ (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
r |= EMAC_RMR_PMME;
- else if (ndev->mc_count > 0)
+ else if (!netdev_mc_empty(ndev))
r |= EMAC_RMR_MAE;
return r;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a86693906ac..41b9c0efcbd 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1062,7 +1062,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
unsigned long lpar_rc;
- if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
@@ -1083,7 +1084,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
+ for(i = 0; i < netdev_mc_count(netdev); ++i, mclist = mclist->next) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index d51c9927c81..bb53083ec61 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -219,6 +219,9 @@ struct e1000_adv_tx_context_desc {
#define E1000_VLVF_LVLAN 0x00100000
#define E1000_VLVF_VLANID_ENABLE 0x80000000
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define E1000_IOVCTL 0x05BBC
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index dd4e6ffd29f..abb7333a1fb 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -310,6 +310,7 @@
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index b1c1eb88893..83ea11701f4 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -75,11 +75,14 @@ struct vf_data_storage {
u16 vlans_enabled;
u32 flags;
unsigned long last_nack;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 83cd0d7417d..4fe7b0ba631 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -133,6 +133,12 @@ static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos);
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -1352,6 +1358,10 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_register = igb_vlan_rx_register,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = igb_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
#endif
@@ -2479,7 +2489,8 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
wr32(E1000_RLPML, max_frame_size);
}
-static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+ int vfn, bool aupe)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -2492,8 +2503,11 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
return;
vmolr = rd32(E1000_VMOLR(vfn));
- vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
- E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (aupe)
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ else
+ vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
/* clear all bits that might not be set */
vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
@@ -2564,7 +2578,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_SRRCTL(reg_idx), srrctl);
/* set filtering for VMDQ pools */
- igb_set_vmolr(adapter, reg_idx & 0x7);
+ igb_set_vmolr(adapter, reg_idx & 0x7, true);
/* enable receive descriptor fetching */
rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2848,14 +2862,14 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
u32 vmolr = 0;
int i;
- if (!netdev->mc_count) {
+ if (netdev_mc_empty(netdev)) {
/* nothing to program, so clear mc list */
igb_update_mc_addr_list(hw, NULL, 0);
igb_restore_vf_multicasts(adapter);
return 0;
}
- mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
@@ -2865,7 +2879,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
/* The shared function expects a packed array of only addresses. */
mc_ptr = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
if (!mc_ptr)
break;
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
@@ -2874,7 +2888,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
- return netdev->mc_count;
+ return netdev_mc_count(netdev);
}
/**
@@ -4490,10 +4504,57 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
reg |= size;
wr32(E1000_VMOLR(vf), reg);
}
- return 0;
}
}
- return -1;
+ return 0;
+}
+
+static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (vid)
+ wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+ else
+ wr32(E1000_VMVIR(vf), 0);
+}
+
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+ if (err)
+ goto out;
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vlan);
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
+ igb_set_vmvir(adapter, vlan, vf);
+ igb_set_vmolr(adapter, vf, true);
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ }
+out:
+ return err;
}
static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -4506,15 +4567,21 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{
- /* clear all flags */
- adapter->vf_data[vf].flags = 0;
+ /* clear flags */
+ adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
adapter->vf_data[vf].last_nack = jiffies;
/* reset offloads to defaults */
- igb_set_vmolr(adapter, vf);
+ igb_set_vmolr(adapter, vf, true);
/* reset vlans for device */
igb_clear_vf_vfta(adapter, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_ndo_set_vf_vlan(adapter->netdev, vf,
+ adapter->vf_data[vf].pf_vlan,
+ adapter->vf_data[vf].pf_qos);
+ else
+ igb_clear_vf_vfta(adapter, vf);
/* reset multicast table array for vf */
adapter->vf_data[vf].num_vf_mc_hashes = 0;
@@ -4528,7 +4595,8 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
/* generate a new mac address as we were hotplug removed/added */
- random_ether_addr(vf_mac);
+ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+ random_ether_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -4641,7 +4709,10 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
break;
case E1000_VF_SET_VLAN:
- retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ retval = -1;
+ else
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
break;
default:
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
@@ -6003,6 +6074,43 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
return 0;
}
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+ return -EINVAL;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return igb_set_vf_mac(adapter, vf, mac);
+}
+
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igb_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vf_data[vf].pf_vlan;
+ ivi->qos = adapter->vf_data[vf].pf_qos;
+ return 0;
+}
+
static void igb_vmm_control(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 23ce07d3de0..6029c400f2b 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1403,8 +1403,8 @@ static void igbvf_set_multi(struct net_device *netdev)
u8 *mta_list = NULL;
int i;
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list) {
dev_err(&adapter->pdev->dev,
"failed to allocate multicast filter list\n");
@@ -1415,7 +1415,7 @@ static void igbvf_set_multi(struct net_device *netdev)
/* prepare a packed array of only addresses. */
mc_ptr = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
if (!mc_ptr)
break;
memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 81a4c5d3073..0bd5fef22d4 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1681,14 +1681,15 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
(void) ioc3_r_emcr();
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
multicast packets anyway, so skip computing all the
hashes and just accept all packets. */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addr = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 49f35e2ed19..dbdebd5efe8 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -585,11 +585,11 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
receivemode = IPG_RM_RECEIVEALLFRAMES;
} else if ((dev->flags & IFF_ALLMULTI) ||
((dev->flags & IFF_MULTICAST) &&
- (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
+ (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
/* NIC to be configured to receive all multicast
* frames. */
receivemode |= IPG_RM_RECEIVEMULTICAST;
- } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) {
+ } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
/* NIC to be configured to receive selected
* multicast addresses. */
receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index 04d0502726c..bb4a3cda6e4 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -655,14 +655,15 @@ set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
outw(MULTICAST|PROMISC, ioaddr);
}
- else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
+ else if ((dev->flags&IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > HW_MAX_ADDRS)
{
/* Disable promiscuous mode, use normal mode. */
hardware_set_filter(NULL);
outw(MULTICAST, ioaddr);
}
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
/* Walk the address list, and load the filter */
hardware_set_filter(dev->mc_list);
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 16c91910d6c..ff015e15f5d 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -958,7 +958,7 @@ static void veth_set_multicast_list(struct net_device *dev)
write_lock_irqsave(&port->mcast_gate, flags);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > VETH_MAX_MCAST)) {
+ (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
struct dev_mc_list *dmi = dev->mc_list;
@@ -969,7 +969,7 @@ static void veth_set_multicast_list(struct net_device *dev)
/* Update table */
port->num_mcast = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
u8 *addr = dmi->dmi_addr;
u64 xaddr = 0;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c56ea69762c..6a7b2ccef72 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1077,7 +1077,7 @@ ixgb_set_multi(struct net_device *netdev)
rctl |= IXGB_RCTL_VFE;
}
- if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
@@ -1092,7 +1092,7 @@ ixgb_set_multi(struct net_device *netdev)
memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
- ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
+ ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
}
@@ -1616,7 +1616,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
return;
if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index bffbe0d52d3..19e94ee155a 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -453,6 +453,10 @@ extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
u8 queue);
+extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue);
extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
u16 vlan_id);
extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index d4ed6adb797..1f30e163bd9 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1439,6 +1439,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+ /* Initialize the drop queue to Rx queue 127 */
+ fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@@ -1680,8 +1683,8 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
* @src_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 src_addr_1, u32 src_addr_2,
- u32 src_addr_3, u32 src_addr_4)
+ u32 src_addr_1, u32 src_addr_2,
+ u32 src_addr_3, u32 src_addr_4)
{
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
@@ -1723,8 +1726,8 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
* @dst_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 dst_addr_1, u32 dst_addr_2,
- u32 dst_addr_3, u32 dst_addr_4)
+ u32 dst_addr_1, u32 dst_addr_2,
+ u32 dst_addr_3, u32 dst_addr_4)
{
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
@@ -1802,7 +1805,7 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
* @vm_pool: the Virtual Machine pool to load
**/
s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
- u8 vm_pool)
+ u8 vm_pool)
{
input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
@@ -1826,8 +1829,7 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
* @input: input stream to search
* @vlan: the VLAN id to load
**/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 *vlan)
+static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
{
*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
@@ -2083,23 +2085,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
+ * @input_masks: bitwise masks for relevant fields
+ * @soft_id: software index into the silicon hash tables for filter storage
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
- u16 soft_id,
- u8 queue)
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4, dst_ipv4;
+ u32 src_ipv4 = 0, dst_ipv4 = 0;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
u16 src_port, dst_port, vlan_id, flex_bytes;
u16 bucket_hash;
u8 l4type;
+ u8 fdirm = 0;
/* Get our input values */
ixgbe_atr_get_l4type_82599(input, &l4type);
@@ -2154,7 +2159,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@@ -2163,7 +2167,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+ (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+ if (src_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
+
+ if (dst_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
+
+ switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+ case IXGBE_ATR_L4TYPE_TCP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (input_masks->dst_port_mask << 16)));
+ break;
+ case IXGBE_ATR_L4TYPE_UDP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (input_masks->src_port_mask << 16)));
+ break;
+ default:
+ /* this already would have failed above */
+ break;
+ }
+
+ /* Program the last mask register, FDIRM */
+ if (input_masks->vlan_id_mask || !vlan_id)
+ /* Mask both VLAN and VLANP - bits 0 and 1 */
+ fdirm |= 0x3;
+
+ if (input_masks->data_mask || !flex_bytes)
+ /* Flex bytes need masking, so mask the whole thing - bit 4 */
+ fdirm |= 0x10;
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ fdirm |= 0x24;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 07a9410c08d..0d234346a4e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -979,6 +979,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
+ case ETH_SS_NTUPLE_FILTERS:
+ return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
+ ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
default:
return -EOPNOTSUPP;
}
@@ -2150,23 +2153,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
static int ixgbe_set_flags(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
ethtool_op_set_flags(netdev, data);
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
- return 0;
-
/* if state changes we need to update adapter->flags and reset */
if ((!!(data & ETH_FLAG_LRO)) !=
(!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ need_reset = true;
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
+ (!(data & ETH_FLAG_NTUPLE))) {
+ /* turn off Flow Director perfect, set hash and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
+ (data & ETH_FLAG_NTUPLE)) {
+ /* turn off Flow Director hash, enable perfect and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ } else {
+ /* no state change */
+ }
+
+ if (need_reset) {
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
+
return 0;
+}
+
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+ struct ethtool_rx_ntuple *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
+ struct ixgbe_atr_input input_struct;
+ struct ixgbe_atr_input_masks input_masks;
+ int target_queue;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return -EOPNOTSUPP;
+
+ /*
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Tx queues.
+ */
+ if ((fs.action >= adapter->num_tx_queues) ||
+ (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+ return -EINVAL;
+
+ memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+
+ input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
+ input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
+ input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
+ input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
+ input_masks.vlan_id_mask = fs.vlan_tag_mask;
+ /* only use the lowest 2 bytes for flex bytes */
+ input_masks.data_mask = (fs.data_mask & 0xffff);
+
+ switch (fs.flow_type) {
+ case TCP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ break;
+ case UDP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ break;
+ case SCTP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ break;
+ default:
+ return -1;
+ }
+ /* Mask bits from the inputs based on user-supplied mask */
+ ixgbe_atr_set_src_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
+ ixgbe_atr_set_dst_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
+ /* 82599 expects these to be byte-swapped for perfect filtering */
+ ixgbe_atr_set_src_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
+ ixgbe_atr_set_dst_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+
+ /* VLAN and Flex bytes are either completely masked or not */
+ if (!fs.vlan_tag_mask)
+ ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+
+ if (!input_masks.data_mask)
+ /* make sure we only use the first 2 bytes of user data */
+ ixgbe_atr_set_flex_byte_82599(&input_struct,
+ (fs.data & 0xffff));
+
+ /* determine if we need to drop or route the packet */
+ if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ target_queue = MAX_RX_QUEUES - 1;
+ else
+ target_queue = fs.action;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
+ &input_masks, 0, target_queue);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2204,6 +2308,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ixgbe_set_flags,
+ .set_rx_ntuple = ixgbe_set_rx_ntuple,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2a3c8315e35..0792f151de9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -51,7 +51,7 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.44-k2"
+#define DRV_VERSION "2.0.62-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
@@ -2571,7 +2571,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
@@ -3253,6 +3253,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
+ /* clear n-tuple filters that are cached */
+ ethtool_ntuple_flush(netdev);
+
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
@@ -4187,6 +4190,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -4214,10 +4218,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ if (dev->features & NETIF_F_NTUPLE) {
+ /* Flow Director perfect filter enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ } else {
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ }
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->atr_sample_rate = 20;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 0db67c19b2c..2be90746659 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2129,6 +2129,15 @@ struct ixgbe_atr_input {
u8 byte_stream[42];
};
+struct ixgbe_atr_input_masks {
+ u32 src_ip_mask;
+ u32 dst_ip_mask;
+ u16 src_port_mask;
+ u16 dst_port_mask;
+ u16 vlan_id_mask;
+ u16 data_mask;
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index b9f10d05049..235b5fd4b8d 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1527,7 +1527,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
int addr_count = 0;
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
if (hw->mac.ops.update_mc_addr_list)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 26eed49d320..558b6a0b15f 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -288,7 +288,7 @@ jme_set_rx_pcc(struct jme_adapter *jme, int p)
wmb();
if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
- msg_rx_status(jme, "Switched to PCC_P%d\n", p);
+ netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
}
static void
@@ -483,13 +483,13 @@ jme_check_link(struct net_device *netdev, int testonly)
strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
"MDI-X" :
"MDI");
- msg_link(jme, "Link is up at %s.\n", linkmsg);
+ netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
netif_carrier_on(netdev);
} else {
if (testonly)
goto out;
- msg_link(jme, "Link is down.\n");
+ netif_info(jme, link, jme->dev, "Link is down.\n");
jme->phylink = 0;
netif_carrier_off(netdev);
}
@@ -883,20 +883,20 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
== RXWBFLAG_TCPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "TCP Checksum error\n");
+ netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
== RXWBFLAG_UDPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "UDP Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
== RXWBFLAG_IPV4)) {
- msg_rx_err(jme, "IPv4 Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
return false;
}
@@ -1186,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
while (!atomic_dec_and_test(&jme->link_changing)) {
atomic_inc(&jme->link_changing);
- msg_intr(jme, "Get link change lock failed.\n");
+ netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
while (atomic_read(&jme->link_changing) != 1)
- msg_intr(jme, "Waiting link change lock.\n");
+ netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
}
if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1305,7 +1305,7 @@ jme_rx_empty_tasklet(unsigned long arg)
if (unlikely(!netif_carrier_ok(jme->dev)))
return;
- msg_rx_status(jme, "RX Queue Full!\n");
+ netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
jme_rx_clean_tasklet(arg);
@@ -1325,7 +1325,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
smp_wmb();
if (unlikely(netif_queue_stopped(jme->dev) &&
atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
- msg_tx_done(jme, "TX Queue Waked.\n");
+ netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
netif_wake_queue(jme->dev);
}
@@ -1835,7 +1835,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
*flags |= TXFLAG_UDPCS;
break;
default:
- msg_tx_err(jme, "Error upper layer protocol.\n");
+ netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
break;
}
}
@@ -1910,12 +1910,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
smp_wmb();
if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Paused.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
smp_wmb();
if (atomic_read(&txring->nr_free)
>= (jme->tx_wake_threshold)) {
netif_wake_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Fast Waked.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
}
}
@@ -1923,7 +1923,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
txbi->skb)) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
}
}
@@ -1946,7 +1946,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(idx < 0)) {
netif_stop_queue(netdev);
- msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
+ netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -2013,7 +2013,7 @@ jme_set_multi(struct net_device *netdev)
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
for (i = 0, mclist = netdev->mc_list;
- mclist && i < netdev->mc_count;
+ mclist && i < netdev_mc_count(netdev);
++i, mclist = mclist->next) {
bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
@@ -2473,7 +2473,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2489,7 +2489,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2509,7 +2509,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2526,7 +2526,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2876,14 +2876,14 @@ jme_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
- msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
- "JMC250 Gigabit Ethernet" :
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
- "JMC260 Fast Ethernet" : "Unknown",
- (jme->fpgaver != 0) ? " (FPGA)" : "",
- (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
+ "JMC250 Gigabit Ethernet" :
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
+ "JMC260 Fast Ethernet" : "Unknown",
+ (jme->fpgaver != 0) ? " (FPGA)" : "",
+ (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
+ jme->rev, netdev->dev_addr);
return 0;
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 251abed3817..c19db9146a2 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -45,43 +45,16 @@
printk(KERN_ERR PFX fmt, ## args)
#ifdef TX_DEBUG
-#define tx_dbg(priv, fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
+#define tx_dbg(priv, fmt, args...) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
#else
-#define tx_dbg(priv, fmt, args...)
+#define tx_dbg(priv, fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args); \
+} while (0)
#endif
-#define jme_msg(msglvl, type, priv, fmt, args...) \
- if (netif_msg_##type(priv)) \
- printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
-
-#define msg_probe(priv, fmt, args...) \
- jme_msg(KERN_INFO, probe, priv, fmt, ## args)
-
-#define msg_link(priv, fmt, args...) \
- jme_msg(KERN_INFO, link, priv, fmt, ## args)
-
-#define msg_intr(priv, fmt, args...) \
- jme_msg(KERN_INFO, intr, priv, fmt, ## args)
-
-#define msg_rx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
-
-#define msg_rx_status(priv, fmt, args...) \
- jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
-
-#define msg_tx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
-
-#define msg_tx_done(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
-
-#define msg_tx_queued(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
-
-#define msg_hw(priv, fmt, args...) \
- jme_msg(KERN_ERR, hw, priv, fmt, ## args)
-
/*
* Extra PCI Configuration space interface
*/
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 25e2af6997e..af0c764130e 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -490,19 +490,19 @@ static void korina_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
recognise |= ETH_ARC_PRO;
- else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
+ else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
/* All multicast and broadcast */
recognise |= ETH_ARC_AM;
/* Build the hash table */
- if (dev->mc_count > 4) {
+ if (netdev_mc_count(dev) > 4) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 6d3ac65bc35..9845ab1e557 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -965,14 +965,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
- } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
+ } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
struct dev_mc_list *mcptr = dev->mc_list;
u32 crc;
int i;
/* accept some multicast */
- for (i = dev->mc_count; i > 0; i--) {
+ for (i = netdev_mc_count(dev); i > 0; i--) {
crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
crc >>= (32 - 6); /* get top six bits */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c0ceebccaa4..ffffb388970 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -1193,8 +1193,8 @@ static void ks_set_rx_mode(struct net_device *netdev)
else
ks_set_promis(ks, false);
- if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
- if (netdev->mc_count <= MAX_MCAST_LST) {
+ if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
+ if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
if (!(*ptr->dmi_addr & 1))
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
new file mode 100644
index 00000000000..6f187c7e61f
--- /dev/null
+++ b/drivers/net/ksz884x.c
@@ -0,0 +1,7335 @@
+/**
+ * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
+ *
+ * Copyright (c) 2009-2010 Micrel, Inc.
+ * Tristram Ha <Tristram.Ha@micrel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+
+
+/* DMA Registers */
+
+#define KS_DMA_TX_CTRL 0x0000
+#define DMA_TX_ENABLE 0x00000001
+#define DMA_TX_CRC_ENABLE 0x00000002
+#define DMA_TX_PAD_ENABLE 0x00000004
+#define DMA_TX_LOOPBACK 0x00000100
+#define DMA_TX_FLOW_ENABLE 0x00000200
+#define DMA_TX_CSUM_IP 0x00010000
+#define DMA_TX_CSUM_TCP 0x00020000
+#define DMA_TX_CSUM_UDP 0x00040000
+#define DMA_TX_BURST_SIZE 0x3F000000
+
+#define KS_DMA_RX_CTRL 0x0004
+#define DMA_RX_ENABLE 0x00000001
+#define KS884X_DMA_RX_MULTICAST 0x00000002
+#define DMA_RX_PROMISCUOUS 0x00000004
+#define DMA_RX_ERROR 0x00000008
+#define DMA_RX_UNICAST 0x00000010
+#define DMA_RX_ALL_MULTICAST 0x00000020
+#define DMA_RX_BROADCAST 0x00000040
+#define DMA_RX_FLOW_ENABLE 0x00000200
+#define DMA_RX_CSUM_IP 0x00010000
+#define DMA_RX_CSUM_TCP 0x00020000
+#define DMA_RX_CSUM_UDP 0x00040000
+#define DMA_RX_BURST_SIZE 0x3F000000
+
+#define DMA_BURST_SHIFT 24
+#define DMA_BURST_DEFAULT 8
+
+#define KS_DMA_TX_START 0x0008
+#define KS_DMA_RX_START 0x000C
+#define DMA_START 0x00000001
+
+#define KS_DMA_TX_ADDR 0x0010
+#define KS_DMA_RX_ADDR 0x0014
+
+#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
+#define DMA_ADDR_LIST_SHIFT 2
+
+/* MTR0 */
+#define KS884X_MULTICAST_0_OFFSET 0x0020
+#define KS884X_MULTICAST_1_OFFSET 0x0021
+#define KS884X_MULTICAST_2_OFFSET 0x0022
+#define KS884x_MULTICAST_3_OFFSET 0x0023
+/* MTR1 */
+#define KS884X_MULTICAST_4_OFFSET 0x0024
+#define KS884X_MULTICAST_5_OFFSET 0x0025
+#define KS884X_MULTICAST_6_OFFSET 0x0026
+#define KS884X_MULTICAST_7_OFFSET 0x0027
+
+/* Interrupt Registers */
+
+/* INTEN */
+#define KS884X_INTERRUPTS_ENABLE 0x0028
+/* INTST */
+#define KS884X_INTERRUPTS_STATUS 0x002C
+
+#define KS884X_INT_RX_STOPPED 0x02000000
+#define KS884X_INT_TX_STOPPED 0x04000000
+#define KS884X_INT_RX_OVERRUN 0x08000000
+#define KS884X_INT_TX_EMPTY 0x10000000
+#define KS884X_INT_RX 0x20000000
+#define KS884X_INT_TX 0x40000000
+#define KS884X_INT_PHY 0x80000000
+
+#define KS884X_INT_RX_MASK \
+ (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
+#define KS884X_INT_TX_MASK \
+ (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
+#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
+
+/* MAC Additional Station Address */
+
+/* MAAL0 */
+#define KS_ADD_ADDR_0_LO 0x0080
+/* MAAH0 */
+#define KS_ADD_ADDR_0_HI 0x0084
+/* MAAL1 */
+#define KS_ADD_ADDR_1_LO 0x0088
+/* MAAH1 */
+#define KS_ADD_ADDR_1_HI 0x008C
+/* MAAL2 */
+#define KS_ADD_ADDR_2_LO 0x0090
+/* MAAH2 */
+#define KS_ADD_ADDR_2_HI 0x0094
+/* MAAL3 */
+#define KS_ADD_ADDR_3_LO 0x0098
+/* MAAH3 */
+#define KS_ADD_ADDR_3_HI 0x009C
+/* MAAL4 */
+#define KS_ADD_ADDR_4_LO 0x00A0
+/* MAAH4 */
+#define KS_ADD_ADDR_4_HI 0x00A4
+/* MAAL5 */
+#define KS_ADD_ADDR_5_LO 0x00A8
+/* MAAH5 */
+#define KS_ADD_ADDR_5_HI 0x00AC
+/* MAAL6 */
+#define KS_ADD_ADDR_6_LO 0x00B0
+/* MAAH6 */
+#define KS_ADD_ADDR_6_HI 0x00B4
+/* MAAL7 */
+#define KS_ADD_ADDR_7_LO 0x00B8
+/* MAAH7 */
+#define KS_ADD_ADDR_7_HI 0x00BC
+/* MAAL8 */
+#define KS_ADD_ADDR_8_LO 0x00C0
+/* MAAH8 */
+#define KS_ADD_ADDR_8_HI 0x00C4
+/* MAAL9 */
+#define KS_ADD_ADDR_9_LO 0x00C8
+/* MAAH9 */
+#define KS_ADD_ADDR_9_HI 0x00CC
+/* MAAL10 */
+#define KS_ADD_ADDR_A_LO 0x00D0
+/* MAAH10 */
+#define KS_ADD_ADDR_A_HI 0x00D4
+/* MAAL11 */
+#define KS_ADD_ADDR_B_LO 0x00D8
+/* MAAH11 */
+#define KS_ADD_ADDR_B_HI 0x00DC
+/* MAAL12 */
+#define KS_ADD_ADDR_C_LO 0x00E0
+/* MAAH12 */
+#define KS_ADD_ADDR_C_HI 0x00E4
+/* MAAL13 */
+#define KS_ADD_ADDR_D_LO 0x00E8
+/* MAAH13 */
+#define KS_ADD_ADDR_D_HI 0x00EC
+/* MAAL14 */
+#define KS_ADD_ADDR_E_LO 0x00F0
+/* MAAH14 */
+#define KS_ADD_ADDR_E_HI 0x00F4
+/* MAAL15 */
+#define KS_ADD_ADDR_F_LO 0x00F8
+/* MAAH15 */
+#define KS_ADD_ADDR_F_HI 0x00FC
+
+#define ADD_ADDR_HI_MASK 0x0000FFFF
+#define ADD_ADDR_ENABLE 0x80000000
+#define ADD_ADDR_INCR 8
+
+/* Miscellaneous Registers */
+
+/* MARL */
+#define KS884X_ADDR_0_OFFSET 0x0200
+#define KS884X_ADDR_1_OFFSET 0x0201
+/* MARM */
+#define KS884X_ADDR_2_OFFSET 0x0202
+#define KS884X_ADDR_3_OFFSET 0x0203
+/* MARH */
+#define KS884X_ADDR_4_OFFSET 0x0204
+#define KS884X_ADDR_5_OFFSET 0x0205
+
+/* OBCR */
+#define KS884X_BUS_CTRL_OFFSET 0x0210
+
+#define BUS_SPEED_125_MHZ 0x0000
+#define BUS_SPEED_62_5_MHZ 0x0001
+#define BUS_SPEED_41_66_MHZ 0x0002
+#define BUS_SPEED_25_MHZ 0x0003
+
+/* EEPCR */
+#define KS884X_EEPROM_CTRL_OFFSET 0x0212
+
+#define EEPROM_CHIP_SELECT 0x0001
+#define EEPROM_SERIAL_CLOCK 0x0002
+#define EEPROM_DATA_OUT 0x0004
+#define EEPROM_DATA_IN 0x0008
+#define EEPROM_ACCESS_ENABLE 0x0010
+
+/* MBIR */
+#define KS884X_MEM_INFO_OFFSET 0x0214
+
+#define RX_MEM_TEST_FAILED 0x0008
+#define RX_MEM_TEST_FINISHED 0x0010
+#define TX_MEM_TEST_FAILED 0x0800
+#define TX_MEM_TEST_FINISHED 0x1000
+
+/* GCR */
+#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
+#define GLOBAL_SOFTWARE_RESET 0x0001
+
+#define KS8841_POWER_MANAGE_OFFSET 0x0218
+
+/* WFCR */
+#define KS8841_WOL_CTRL_OFFSET 0x021A
+#define KS8841_WOL_MAGIC_ENABLE 0x0080
+#define KS8841_WOL_FRAME3_ENABLE 0x0008
+#define KS8841_WOL_FRAME2_ENABLE 0x0004
+#define KS8841_WOL_FRAME1_ENABLE 0x0002
+#define KS8841_WOL_FRAME0_ENABLE 0x0001
+
+/* WF0 */
+#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
+#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
+#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
+
+/* IACR */
+#define KS884X_IACR_P 0x04A0
+#define KS884X_IACR_OFFSET KS884X_IACR_P
+
+/* IADR1 */
+#define KS884X_IADR1_P 0x04A2
+#define KS884X_IADR2_P 0x04A4
+#define KS884X_IADR3_P 0x04A6
+#define KS884X_IADR4_P 0x04A8
+#define KS884X_IADR5_P 0x04AA
+
+#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
+#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
+
+#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
+#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
+#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
+#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
+#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
+#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
+#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
+#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
+#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
+
+/* P1MBCR */
+#define KS884X_P1MBCR_P 0x04D0
+#define KS884X_P1MBSR_P 0x04D2
+#define KS884X_PHY1ILR_P 0x04D4
+#define KS884X_PHY1IHR_P 0x04D6
+#define KS884X_P1ANAR_P 0x04D8
+#define KS884X_P1ANLPR_P 0x04DA
+
+/* P2MBCR */
+#define KS884X_P2MBCR_P 0x04E0
+#define KS884X_P2MBSR_P 0x04E2
+#define KS884X_PHY2ILR_P 0x04E4
+#define KS884X_PHY2IHR_P 0x04E6
+#define KS884X_P2ANAR_P 0x04E8
+#define KS884X_P2ANLPR_P 0x04EA
+
+#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
+#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
+
+#define KS884X_PHY_CTRL_OFFSET 0x00
+
+/* Mode Control Register */
+#define PHY_REG_CTRL 0
+
+#define PHY_RESET 0x8000
+#define PHY_LOOPBACK 0x4000
+#define PHY_SPEED_100MBIT 0x2000
+#define PHY_AUTO_NEG_ENABLE 0x1000
+#define PHY_POWER_DOWN 0x0800
+#define PHY_MII_DISABLE 0x0400
+#define PHY_AUTO_NEG_RESTART 0x0200
+#define PHY_FULL_DUPLEX 0x0100
+#define PHY_COLLISION_TEST 0x0080
+#define PHY_HP_MDIX 0x0020
+#define PHY_FORCE_MDIX 0x0010
+#define PHY_AUTO_MDIX_DISABLE 0x0008
+#define PHY_REMOTE_FAULT_DISABLE 0x0004
+#define PHY_TRANSMIT_DISABLE 0x0002
+#define PHY_LED_DISABLE 0x0001
+
+#define KS884X_PHY_STATUS_OFFSET 0x02
+
+/* Mode Status Register */
+#define PHY_REG_STATUS 1
+
+#define PHY_100BT4_CAPABLE 0x8000
+#define PHY_100BTX_FD_CAPABLE 0x4000
+#define PHY_100BTX_CAPABLE 0x2000
+#define PHY_10BT_FD_CAPABLE 0x1000
+#define PHY_10BT_CAPABLE 0x0800
+#define PHY_MII_SUPPRESS_CAPABLE 0x0040
+#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
+#define PHY_REMOTE_FAULT 0x0010
+#define PHY_AUTO_NEG_CAPABLE 0x0008
+#define PHY_LINK_STATUS 0x0004
+#define PHY_JABBER_DETECT 0x0002
+#define PHY_EXTENDED_CAPABILITY 0x0001
+
+#define KS884X_PHY_ID_1_OFFSET 0x04
+#define KS884X_PHY_ID_2_OFFSET 0x06
+
+/* PHY Identifier Registers */
+#define PHY_REG_ID_1 2
+#define PHY_REG_ID_2 3
+
+#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
+
+/* Auto-Negotiation Advertisement Register */
+#define PHY_REG_AUTO_NEGOTIATION 4
+
+#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
+#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
+/* Not supported. */
+#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
+#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
+#define PHY_AUTO_NEG_100BT4 0x0200
+#define PHY_AUTO_NEG_100BTX_FD 0x0100
+#define PHY_AUTO_NEG_100BTX 0x0080
+#define PHY_AUTO_NEG_10BT_FD 0x0040
+#define PHY_AUTO_NEG_10BT 0x0020
+#define PHY_AUTO_NEG_SELECTOR 0x001F
+#define PHY_AUTO_NEG_802_3 0x0001
+
+#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
+
+#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
+
+/* Auto-Negotiation Link Partner Ability Register */
+#define PHY_REG_REMOTE_CAPABILITY 5
+
+#define PHY_REMOTE_NEXT_PAGE 0x8000
+#define PHY_REMOTE_ACKNOWLEDGE 0x4000
+#define PHY_REMOTE_REMOTE_FAULT 0x2000
+#define PHY_REMOTE_SYM_PAUSE 0x0400
+#define PHY_REMOTE_100BTX_FD 0x0100
+#define PHY_REMOTE_100BTX 0x0080
+#define PHY_REMOTE_10BT_FD 0x0040
+#define PHY_REMOTE_10BT 0x0020
+
+/* P1VCT */
+#define KS884X_P1VCT_P 0x04F0
+#define KS884X_P1PHYCTRL_P 0x04F2
+
+/* P2VCT */
+#define KS884X_P2VCT_P 0x04F4
+#define KS884X_P2PHYCTRL_P 0x04F6
+
+#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
+#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
+
+#define KS884X_PHY_LINK_MD_OFFSET 0x00
+
+#define PHY_START_CABLE_DIAG 0x8000
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT 0x1000
+#define PHY_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
+
+#define PHY_STAT_REVERSED_POLARITY 0x0020
+#define PHY_STAT_MDIX 0x0010
+#define PHY_FORCE_LINK 0x0008
+#define PHY_POWER_SAVING_DISABLE 0x0004
+#define PHY_REMOTE_LOOPBACK 0x0002
+
+/* SIDER */
+#define KS884X_SIDER_P 0x0400
+#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
+#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
+
+#define REG_FAMILY_ID 0x88
+
+#define REG_CHIP_ID_41 0x8810
+#define REG_CHIP_ID_42 0x8800
+
+#define KS884X_CHIP_ID_MASK_41 0xFF10
+#define KS884X_CHIP_ID_MASK 0xFFF0
+#define KS884X_CHIP_ID_SHIFT 4
+#define KS884X_REVISION_MASK 0x000E
+#define KS884X_REVISION_SHIFT 1
+#define KS8842_START 0x0001
+
+#define CHIP_IP_41_M 0x8810
+#define CHIP_IP_42_M 0x8800
+#define CHIP_IP_61_M 0x8890
+#define CHIP_IP_62_M 0x8880
+
+#define CHIP_IP_41_P 0x8850
+#define CHIP_IP_42_P 0x8840
+#define CHIP_IP_61_P 0x88D0
+#define CHIP_IP_62_P 0x88C0
+
+/* SGCR1 */
+#define KS8842_SGCR1_P 0x0402
+#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
+
+#define SWITCH_PASS_ALL 0x8000
+#define SWITCH_TX_FLOW_CTRL 0x2000
+#define SWITCH_RX_FLOW_CTRL 0x1000
+#define SWITCH_CHECK_LENGTH 0x0800
+#define SWITCH_AGING_ENABLE 0x0400
+#define SWITCH_FAST_AGING 0x0200
+#define SWITCH_AGGR_BACKOFF 0x0100
+#define SWITCH_PASS_PAUSE 0x0008
+#define SWITCH_LINK_AUTO_AGING 0x0001
+
+/* SGCR2 */
+#define KS8842_SGCR2_P 0x0404
+#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
+
+#define SWITCH_VLAN_ENABLE 0x8000
+#define SWITCH_IGMP_SNOOP 0x4000
+#define IPV6_MLD_SNOOP_ENABLE 0x2000
+#define IPV6_MLD_SNOOP_OPTION 0x1000
+#define PRIORITY_SCHEME_SELECT 0x0800
+#define SWITCH_MIRROR_RX_TX 0x0100
+#define UNICAST_VLAN_BOUNDARY 0x0080
+#define MULTICAST_STORM_DISABLE 0x0040
+#define SWITCH_BACK_PRESSURE 0x0020
+#define FAIR_FLOW_CTRL 0x0010
+#define NO_EXC_COLLISION_DROP 0x0008
+#define SWITCH_HUGE_PACKET 0x0004
+#define SWITCH_LEGAL_PACKET 0x0002
+#define SWITCH_BUF_RESERVE 0x0001
+
+/* SGCR3 */
+#define KS8842_SGCR3_P 0x0406
+#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
+
+#define BROADCAST_STORM_RATE_LO 0xFF00
+#define SWITCH_REPEATER 0x0080
+#define SWITCH_HALF_DUPLEX 0x0040
+#define SWITCH_FLOW_CTRL 0x0020
+#define SWITCH_10_MBIT 0x0010
+#define SWITCH_REPLACE_NULL_VID 0x0008
+#define BROADCAST_STORM_RATE_HI 0x0007
+
+#define BROADCAST_STORM_RATE 0x07FF
+
+/* SGCR4 */
+#define KS8842_SGCR4_P 0x0408
+
+/* SGCR5 */
+#define KS8842_SGCR5_P 0x040A
+#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
+
+#define LED_MODE 0x8200
+#define LED_SPEED_DUPLEX_ACT 0x0000
+#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
+#define LED_DUPLEX_10_100 0x0200
+
+/* SGCR6 */
+#define KS8842_SGCR6_P 0x0410
+#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
+
+#define KS8842_PRIORITY_MASK 3
+#define KS8842_PRIORITY_SHIFT 2
+
+/* SGCR7 */
+#define KS8842_SGCR7_P 0x0412
+#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
+
+#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
+#define SWITCH_UNK_DEF_PORT_3 0x0004
+#define SWITCH_UNK_DEF_PORT_2 0x0002
+#define SWITCH_UNK_DEF_PORT_1 0x0001
+
+/* MACAR1 */
+#define KS8842_MACAR1_P 0x0470
+#define KS8842_MACAR2_P 0x0472
+#define KS8842_MACAR3_P 0x0474
+#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
+#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
+#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
+#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
+#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
+#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
+
+/* TOSR1 */
+#define KS8842_TOSR1_P 0x0480
+#define KS8842_TOSR2_P 0x0482
+#define KS8842_TOSR3_P 0x0484
+#define KS8842_TOSR4_P 0x0486
+#define KS8842_TOSR5_P 0x0488
+#define KS8842_TOSR6_P 0x048A
+#define KS8842_TOSR7_P 0x0490
+#define KS8842_TOSR8_P 0x0492
+#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
+#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
+#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
+#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
+#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
+#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
+
+#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
+#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
+
+/* P1CR1 */
+#define KS8842_P1CR1_P 0x0500
+#define KS8842_P1CR2_P 0x0502
+#define KS8842_P1VIDR_P 0x0504
+#define KS8842_P1CR3_P 0x0506
+#define KS8842_P1IRCR_P 0x0508
+#define KS8842_P1ERCR_P 0x050A
+#define KS884X_P1SCSLMD_P 0x0510
+#define KS884X_P1CR4_P 0x0512
+#define KS884X_P1SR_P 0x0514
+
+/* P2CR1 */
+#define KS8842_P2CR1_P 0x0520
+#define KS8842_P2CR2_P 0x0522
+#define KS8842_P2VIDR_P 0x0524
+#define KS8842_P2CR3_P 0x0526
+#define KS8842_P2IRCR_P 0x0528
+#define KS8842_P2ERCR_P 0x052A
+#define KS884X_P2SCSLMD_P 0x0530
+#define KS884X_P2CR4_P 0x0532
+#define KS884X_P2SR_P 0x0534
+
+/* P3CR1 */
+#define KS8842_P3CR1_P 0x0540
+#define KS8842_P3CR2_P 0x0542
+#define KS8842_P3VIDR_P 0x0544
+#define KS8842_P3CR3_P 0x0546
+#define KS8842_P3IRCR_P 0x0548
+#define KS8842_P3ERCR_P 0x054A
+
+#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
+#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
+#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
+
+#define PORT_CTRL_ADDR(port, addr) \
+ (addr = KS8842_PORT_1_CTRL_1 + (port) * \
+ (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
+
+#define KS8842_PORT_CTRL_1_OFFSET 0x00
+
+#define PORT_BROADCAST_STORM 0x0080
+#define PORT_DIFFSERV_ENABLE 0x0040
+#define PORT_802_1P_ENABLE 0x0020
+#define PORT_BASED_PRIORITY_MASK 0x0018
+#define PORT_BASED_PRIORITY_BASE 0x0003
+#define PORT_BASED_PRIORITY_SHIFT 3
+#define PORT_BASED_PRIORITY_0 0x0000
+#define PORT_BASED_PRIORITY_1 0x0008
+#define PORT_BASED_PRIORITY_2 0x0010
+#define PORT_BASED_PRIORITY_3 0x0018
+#define PORT_INSERT_TAG 0x0004
+#define PORT_REMOVE_TAG 0x0002
+#define PORT_PRIO_QUEUE_ENABLE 0x0001
+
+#define KS8842_PORT_CTRL_2_OFFSET 0x02
+
+#define PORT_INGRESS_VLAN_FILTER 0x4000
+#define PORT_DISCARD_NON_VID 0x2000
+#define PORT_FORCE_FLOW_CTRL 0x1000
+#define PORT_BACK_PRESSURE 0x0800
+#define PORT_TX_ENABLE 0x0400
+#define PORT_RX_ENABLE 0x0200
+#define PORT_LEARN_DISABLE 0x0100
+#define PORT_MIRROR_SNIFFER 0x0080
+#define PORT_MIRROR_RX 0x0040
+#define PORT_MIRROR_TX 0x0020
+#define PORT_USER_PRIORITY_CEILING 0x0008
+#define PORT_VLAN_MEMBERSHIP 0x0007
+
+#define KS8842_PORT_CTRL_VID_OFFSET 0x04
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define KS8842_PORT_CTRL_3_OFFSET 0x06
+
+#define PORT_INGRESS_LIMIT_MODE 0x000C
+#define PORT_INGRESS_ALL 0x0000
+#define PORT_INGRESS_UNICAST 0x0004
+#define PORT_INGRESS_MULTICAST 0x0008
+#define PORT_INGRESS_BROADCAST 0x000C
+#define PORT_COUNT_IFG 0x0002
+#define PORT_COUNT_PREAMBLE 0x0001
+
+#define KS8842_PORT_IN_RATE_OFFSET 0x08
+#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
+
+#define PORT_PRIORITY_RATE 0x0F
+#define PORT_PRIORITY_RATE_SHIFT 4
+
+#define KS884X_PORT_LINK_MD 0x10
+
+#define PORT_CABLE_10M_SHORT 0x8000
+#define PORT_CABLE_DIAG_RESULT 0x6000
+#define PORT_CABLE_STAT_NORMAL 0x0000
+#define PORT_CABLE_STAT_OPEN 0x2000
+#define PORT_CABLE_STAT_SHORT 0x4000
+#define PORT_CABLE_STAT_FAILED 0x6000
+#define PORT_START_CABLE_DIAG 0x1000
+#define PORT_FORCE_LINK 0x0800
+#define PORT_POWER_SAVING_DISABLE 0x0400
+#define PORT_PHY_REMOTE_LOOPBACK 0x0200
+#define PORT_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PORT_CTRL_4_OFFSET 0x12
+
+#define PORT_LED_OFF 0x8000
+#define PORT_TX_DISABLE 0x4000
+#define PORT_AUTO_NEG_RESTART 0x2000
+#define PORT_REMOTE_FAULT_DISABLE 0x1000
+#define PORT_POWER_DOWN 0x0800
+#define PORT_AUTO_MDIX_DISABLE 0x0400
+#define PORT_FORCE_MDIX 0x0200
+#define PORT_LOOPBACK 0x0100
+#define PORT_AUTO_NEG_ENABLE 0x0080
+#define PORT_FORCE_100_MBIT 0x0040
+#define PORT_FORCE_FULL_DUPLEX 0x0020
+#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
+#define PORT_AUTO_NEG_100BTX_FD 0x0008
+#define PORT_AUTO_NEG_100BTX 0x0004
+#define PORT_AUTO_NEG_10BT_FD 0x0002
+#define PORT_AUTO_NEG_10BT 0x0001
+
+#define KS884X_PORT_STATUS_OFFSET 0x14
+
+#define PORT_HP_MDIX 0x8000
+#define PORT_REVERSED_POLARITY 0x2000
+#define PORT_RX_FLOW_CTRL 0x0800
+#define PORT_TX_FLOW_CTRL 0x1000
+#define PORT_STATUS_SPEED_100MBIT 0x0400
+#define PORT_STATUS_FULL_DUPLEX 0x0200
+#define PORT_REMOTE_FAULT 0x0100
+#define PORT_MDIX_STATUS 0x0080
+#define PORT_AUTO_NEG_COMPLETE 0x0040
+#define PORT_STATUS_LINK_GOOD 0x0020
+#define PORT_REMOTE_SYM_PAUSE 0x0010
+#define PORT_REMOTE_100BTX_FD 0x0008
+#define PORT_REMOTE_100BTX 0x0004
+#define PORT_REMOTE_10BT_FD 0x0002
+#define PORT_REMOTE_10BT 0x0001
+
+/*
+#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
+#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
+#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
+#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
+#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
+*/
+
+#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
+#define STATIC_MAC_TABLE_VALID 0x00080000
+#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
+#define STATIC_MAC_TABLE_USE_FID 0x00200000
+#define STATIC_MAC_TABLE_FID 0x03C00000
+
+#define STATIC_MAC_FWD_PORTS_SHIFT 16
+#define STATIC_MAC_FID_SHIFT 22
+
+/*
+#define VLAN_TABLE_VID 00-00000000-00000FFF
+#define VLAN_TABLE_FID 00-00000000-0000F000
+#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
+#define VLAN_TABLE_VALID 00-00000000-00080000
+*/
+
+#define VLAN_TABLE_VID 0x00000FFF
+#define VLAN_TABLE_FID 0x0000F000
+#define VLAN_TABLE_MEMBERSHIP 0x00070000
+#define VLAN_TABLE_VALID 0x00080000
+
+#define VLAN_TABLE_FID_SHIFT 12
+#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
+
+/*
+#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
+#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
+#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
+#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
+*/
+
+#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
+#define DYNAMIC_MAC_TABLE_FID 0x000F0000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
+#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
+
+#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
+#define DYNAMIC_MAC_TABLE_RESERVED 0x78
+#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
+
+#define DYNAMIC_MAC_FID_SHIFT 16
+#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
+#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
+#define DYNAMIC_MAC_ENTRIES_SHIFT 24
+#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
+
+/*
+#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+#define MIB_COUNTER_VALID 00-00000000-40000000
+#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
+*/
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+#define MIB_COUNTER_VALID 0x40000000
+#define MIB_COUNTER_OVERFLOW 0x80000000
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define KS_MIB_PACKET_DROPPED_TX_0 0x100
+#define KS_MIB_PACKET_DROPPED_TX_1 0x101
+#define KS_MIB_PACKET_DROPPED_TX 0x102
+#define KS_MIB_PACKET_DROPPED_RX_0 0x103
+#define KS_MIB_PACKET_DROPPED_RX_1 0x104
+#define KS_MIB_PACKET_DROPPED_RX 0x105
+
+/* Change default LED mode. */
+#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
+
+#define MAC_ADDR_LEN 6
+#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+
+#define MAX_ETHERNET_BODY_SIZE 1500
+#define ETHERNET_HEADER_SIZE 14
+
+#define MAX_ETHERNET_PACKET_SIZE \
+ (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
+
+#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
+#define MAX_RX_BUF_SIZE (1912 + 4)
+
+#define ADDITIONAL_ENTRIES 16
+#define MAX_MULTICAST_LIST 32
+
+#define HW_MULTICAST_SIZE 8
+
+#define HW_TO_DEV_PORT(port) (port - 1)
+
+enum {
+ media_connected,
+ media_disconnected
+};
+
+enum {
+ OID_COUNTER_UNKOWN,
+
+ OID_COUNTER_FIRST,
+
+ /* total transmit errors */
+ OID_COUNTER_XMIT_ERROR,
+
+ /* total receive errors */
+ OID_COUNTER_RCV_ERROR,
+
+ OID_COUNTER_LAST
+};
+
+/*
+ * Hardware descriptor definitions
+ */
+
+#define DESC_ALIGNMENT 16
+#define BUFFER_ALIGNMENT 8
+
+#define NUM_OF_RX_DESC 64
+#define NUM_OF_TX_DESC 64
+
+#define KS_DESC_RX_FRAME_LEN 0x000007FF
+#define KS_DESC_RX_FRAME_TYPE 0x00008000
+#define KS_DESC_RX_ERROR_CRC 0x00010000
+#define KS_DESC_RX_ERROR_RUNT 0x00020000
+#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
+#define KS_DESC_RX_ERROR_PHY 0x00080000
+#define KS884X_DESC_RX_PORT_MASK 0x00300000
+#define KS_DESC_RX_MULTICAST 0x01000000
+#define KS_DESC_RX_ERROR 0x02000000
+#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
+#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
+#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
+#define KS_DESC_RX_LAST 0x20000000
+#define KS_DESC_RX_FIRST 0x40000000
+#define KS_DESC_RX_ERROR_COND \
+ (KS_DESC_RX_ERROR_CRC | \
+ KS_DESC_RX_ERROR_RUNT | \
+ KS_DESC_RX_ERROR_PHY | \
+ KS_DESC_RX_ERROR_TOO_LONG)
+
+#define KS_DESC_HW_OWNED 0x80000000
+
+#define KS_DESC_BUF_SIZE 0x000007FF
+#define KS884X_DESC_TX_PORT_MASK 0x00300000
+#define KS_DESC_END_OF_RING 0x02000000
+#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
+#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
+#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
+#define KS_DESC_TX_LAST 0x20000000
+#define KS_DESC_TX_FIRST 0x40000000
+#define KS_DESC_TX_INTERRUPT 0x80000000
+
+#define KS_DESC_PORT_SHIFT 20
+
+#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
+
+#define KS_DESC_TX_MASK \
+ (KS_DESC_TX_INTERRUPT | \
+ KS_DESC_TX_FIRST | \
+ KS_DESC_TX_LAST | \
+ KS_DESC_TX_CSUM_GEN_IP | \
+ KS_DESC_TX_CSUM_GEN_TCP | \
+ KS_DESC_TX_CSUM_GEN_UDP | \
+ KS_DESC_BUF_SIZE)
+
+struct ksz_desc_rx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 first_desc:1;
+ u32 last_desc:1;
+ u32 csum_err_ip:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_udp:1;
+ u32 error:1;
+ u32 multicast:1;
+ u32 src_port:4;
+ u32 err_phy:1;
+ u32 err_too_long:1;
+ u32 err_runt:1;
+ u32 err_crc:1;
+ u32 frame_type:1;
+ u32 reserved1:4;
+ u32 frame_len:11;
+#else
+ u32 frame_len:11;
+ u32 reserved1:4;
+ u32 frame_type:1;
+ u32 err_crc:1;
+ u32 err_runt:1;
+ u32 err_too_long:1;
+ u32 err_phy:1;
+ u32 src_port:4;
+ u32 multicast:1;
+ u32 error:1;
+ u32 csum_err_udp:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_ip:1;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_tx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 reserved1:31;
+#else
+ u32 reserved1:31;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_rx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved4:6;
+ u32 end_of_ring:1;
+ u32 reserved3:14;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:14;
+ u32 end_of_ring:1;
+ u32 reserved4:6;
+#endif
+};
+
+struct ksz_desc_tx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 intr:1;
+ u32 first_seg:1;
+ u32 last_seg:1;
+ u32 csum_gen_ip:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_udp:1;
+ u32 end_of_ring:1;
+ u32 reserved4:1;
+ u32 dest_port:4;
+ u32 reserved3:9;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:9;
+ u32 dest_port:4;
+ u32 reserved4:1;
+ u32 end_of_ring:1;
+ u32 csum_gen_udp:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_ip:1;
+ u32 last_seg:1;
+ u32 first_seg:1;
+ u32 intr:1;
+#endif
+};
+
+union desc_stat {
+ struct ksz_desc_rx_stat rx;
+ struct ksz_desc_tx_stat tx;
+ u32 data;
+};
+
+union desc_buf {
+ struct ksz_desc_rx_buf rx;
+ struct ksz_desc_tx_buf tx;
+ u32 data;
+};
+
+/**
+ * struct ksz_hw_desc - Hardware descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @addr: Physical address of memory buffer.
+ * @next: Pointer to next hardware descriptor.
+ */
+struct ksz_hw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 addr;
+ u32 next;
+};
+
+/**
+ * struct ksz_sw_desc - Software descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @buf_size: Current buffers size value in hardware descriptor.
+ */
+struct ksz_sw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 buf_size;
+};
+
+/**
+ * struct ksz_dma_buf - OS dependent DMA buffer data structure
+ * @skb: Associated socket buffer.
+ * @dma: Associated physical DMA address.
+ * len: Actual len used.
+ */
+struct ksz_dma_buf {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ int len;
+};
+
+/**
+ * struct ksz_desc - Descriptor structure
+ * @phw: Hardware descriptor pointer to uncached physical memory.
+ * @sw: Cached memory to hold hardware descriptor values for
+ * manipulation.
+ * @dma_buf: Operating system dependent data structure to hold physical
+ * memory buffer allocation information.
+ */
+struct ksz_desc {
+ struct ksz_hw_desc *phw;
+ struct ksz_sw_desc sw;
+ struct ksz_dma_buf dma_buf;
+};
+
+#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
+
+/**
+ * struct ksz_desc_info - Descriptor information data structure
+ * @ring: First descriptor in the ring.
+ * @cur: Current descriptor being manipulated.
+ * @ring_virt: First hardware descriptor in the ring.
+ * @ring_phys: The physical address of the first descriptor of the ring.
+ * @size: Size of hardware descriptor.
+ * @alloc: Number of descriptors allocated.
+ * @avail: Number of descriptors available for use.
+ * @last: Index for last descriptor released to hardware.
+ * @next: Index for next descriptor available for use.
+ * @mask: Mask for index wrapping.
+ */
+struct ksz_desc_info {
+ struct ksz_desc *ring;
+ struct ksz_desc *cur;
+ struct ksz_hw_desc *ring_virt;
+ u32 ring_phys;
+ int size;
+ int alloc;
+ int avail;
+ int last;
+ int next;
+ int mask;
+};
+
+/*
+ * KSZ8842 switch definitions
+ */
+
+enum {
+ TABLE_STATIC_MAC = 0,
+ TABLE_VLAN,
+ TABLE_DYNAMIC_MAC,
+ TABLE_MIB
+};
+
+#define LEARNED_MAC_TABLE_ENTRIES 1024
+#define STATIC_MAC_TABLE_ENTRIES 8
+
+/**
+ * struct ksz_mac_table - Static MAC table data structure
+ * @mac_addr: MAC address to filter.
+ * @vid: VID value.
+ * @fid: FID value.
+ * @ports: Port membership.
+ * @override: Override setting.
+ * @use_fid: FID use setting.
+ * @valid: Valid setting indicating the entry is being used.
+ */
+struct ksz_mac_table {
+ u8 mac_addr[MAC_ADDR_LEN];
+ u16 vid;
+ u8 fid;
+ u8 ports;
+ u8 override:1;
+ u8 use_fid:1;
+ u8 valid:1;
+};
+
+#define VLAN_TABLE_ENTRIES 16
+
+/**
+ * struct ksz_vlan_table - VLAN table data structure
+ * @vid: VID value.
+ * @fid: FID value.
+ * @member: Port membership.
+ */
+struct ksz_vlan_table {
+ u16 vid;
+ u8 fid;
+ u8 member;
+};
+
+#define DIFFSERV_ENTRIES 64
+#define PRIO_802_1P_ENTRIES 8
+#define PRIO_QUEUES 4
+
+#define SWITCH_PORT_NUM 2
+#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
+#define HOST_MASK (1 << SWITCH_PORT_NUM)
+#define PORT_MASK 7
+
+#define MAIN_PORT 0
+#define OTHER_PORT 1
+#define HOST_PORT SWITCH_PORT_NUM
+
+#define PORT_COUNTER_NUM 0x20
+#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
+
+#define MIB_COUNTER_RX_LO_PRIORITY 0x00
+#define MIB_COUNTER_RX_HI_PRIORITY 0x01
+#define MIB_COUNTER_RX_UNDERSIZE 0x02
+#define MIB_COUNTER_RX_FRAGMENT 0x03
+#define MIB_COUNTER_RX_OVERSIZE 0x04
+#define MIB_COUNTER_RX_JABBER 0x05
+#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
+#define MIB_COUNTER_RX_CRC_ERR 0x07
+#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
+#define MIB_COUNTER_RX_CTRL_8808 0x09
+#define MIB_COUNTER_RX_PAUSE 0x0A
+#define MIB_COUNTER_RX_BROADCAST 0x0B
+#define MIB_COUNTER_RX_MULTICAST 0x0C
+#define MIB_COUNTER_RX_UNICAST 0x0D
+#define MIB_COUNTER_RX_OCTET_64 0x0E
+#define MIB_COUNTER_RX_OCTET_65_127 0x0F
+#define MIB_COUNTER_RX_OCTET_128_255 0x10
+#define MIB_COUNTER_RX_OCTET_256_511 0x11
+#define MIB_COUNTER_RX_OCTET_512_1023 0x12
+#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
+#define MIB_COUNTER_TX_LO_PRIORITY 0x14
+#define MIB_COUNTER_TX_HI_PRIORITY 0x15
+#define MIB_COUNTER_TX_LATE_COLLISION 0x16
+#define MIB_COUNTER_TX_PAUSE 0x17
+#define MIB_COUNTER_TX_BROADCAST 0x18
+#define MIB_COUNTER_TX_MULTICAST 0x19
+#define MIB_COUNTER_TX_UNICAST 0x1A
+#define MIB_COUNTER_TX_DEFERRED 0x1B
+#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
+#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
+#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
+#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
+
+#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
+#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
+
+/**
+ * struct ksz_port_mib - Port MIB data structure
+ * @cnt_ptr: Current pointer to MIB counter index.
+ * @link_down: Indication the link has just gone down.
+ * @state: Connection status of the port.
+ * @mib_start: The starting counter index. Some ports do not start at 0.
+ * @counter: 64-bit MIB counter value.
+ * @dropped: Temporary buffer to remember last read packet dropped values.
+ *
+ * MIB counters needs to be read periodically so that counters do not get
+ * overflowed and give incorrect values. A right balance is needed to
+ * satisfy this condition and not waste too much CPU time.
+ *
+ * It is pointless to read MIB counters when the port is disconnected. The
+ * @state provides the connection status so that MIB counters are read only
+ * when the port is connected. The @link_down indicates the port is just
+ * disconnected so that all MIB counters are read one last time to update the
+ * information.
+ */
+struct ksz_port_mib {
+ u8 cnt_ptr;
+ u8 link_down;
+ u8 state;
+ u8 mib_start;
+
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+ u32 dropped[2];
+};
+
+/**
+ * struct ksz_port_cfg - Port configuration data structure
+ * @vid: VID value.
+ * @member: Port membership.
+ * @port_prio: Port priority.
+ * @rx_rate: Receive priority rate.
+ * @tx_rate: Transmit priority rate.
+ * @stp_state: Current Spanning Tree Protocol state.
+ */
+struct ksz_port_cfg {
+ u16 vid;
+ u8 member;
+ u8 port_prio;
+ u32 rx_rate[PRIO_QUEUES];
+ u32 tx_rate[PRIO_QUEUES];
+ int stp_state;
+};
+
+/**
+ * struct ksz_switch - KSZ8842 switch data structure
+ * @mac_table: MAC table entries information.
+ * @vlan_table: VLAN table entries information.
+ * @port_cfg: Port configuration information.
+ * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
+ * (bit7 ~ bit2) field.
+ * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
+ * Tag priority field.
+ * @br_addr: Bridge address. Used for STP.
+ * @other_addr: Other MAC address. Used for multiple network device mode.
+ * @broad_per: Broadcast storm percentage.
+ * @member: Current port membership. Used for STP.
+ */
+struct ksz_switch {
+ struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
+ struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
+ struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
+
+ u8 diffserv[DIFFSERV_ENTRIES];
+ u8 p_802_1p[PRIO_802_1P_ENTRIES];
+
+ u8 br_addr[MAC_ADDR_LEN];
+ u8 other_addr[MAC_ADDR_LEN];
+
+ u8 broad_per;
+ u8 member;
+};
+
+#define TX_RATE_UNIT 10000
+
+/**
+ * struct ksz_port_info - Port information data structure
+ * @state: Connection status of the port.
+ * @tx_rate: Transmit rate divided by 10000 to get Mbit.
+ * @duplex: Duplex mode.
+ * @advertised: Advertised auto-negotiation setting. Used to determine link.
+ * @partner: Auto-negotiation partner setting. Used to determine link.
+ * @port_id: Port index to access actual hardware register.
+ * @pdev: Pointer to OS dependent network device.
+ */
+struct ksz_port_info {
+ uint state;
+ uint tx_rate;
+ u8 duplex;
+ u8 advertised;
+ u8 partner;
+ u8 port_id;
+ void *pdev;
+};
+
+#define MAX_TX_HELD_SIZE 52000
+
+/* Hardware features and bug fixes. */
+#define LINK_INT_WORKING (1 << 0)
+#define SMALL_PACKET_TX_BUG (1 << 1)
+#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
+#define IPV6_CSUM_GEN_HACK (1 << 3)
+#define RX_HUGE_FRAME (1 << 4)
+#define STP_SUPPORT (1 << 8)
+
+/* Software overrides. */
+#define PAUSE_FLOW_CTRL (1 << 0)
+#define FAST_AGING (1 << 1)
+
+/**
+ * struct ksz_hw - KSZ884X hardware data structure
+ * @io: Virtual address assigned.
+ * @ksz_switch: Pointer to KSZ8842 switch.
+ * @port_info: Port information.
+ * @port_mib: Port MIB information.
+ * @dev_count: Number of network devices this hardware supports.
+ * @dst_ports: Destination ports in switch for transmission.
+ * @id: Hardware ID. Used for display only.
+ * @mib_cnt: Number of MIB counters this hardware has.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @tx_cfg: Cached transmit control settings.
+ * @rx_cfg: Cached receive control settings.
+ * @intr_mask: Current interrupt mask.
+ * @intr_set: Current interrup set.
+ * @intr_blocked: Interrupt blocked.
+ * @rx_desc_info: Receive descriptor information.
+ * @tx_desc_info: Transmit descriptor information.
+ * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
+ * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
+ * @tx_size: Transmit data size. Used for TX optimization.
+ * The maximum is defined by MAX_TX_HELD_SIZE.
+ * @perm_addr: Permanent MAC address.
+ * @override_addr: Overrided MAC address.
+ * @address: Additional MAC address entries.
+ * @addr_list_size: Additional MAC address list size.
+ * @mac_override: Indication of MAC address overrided.
+ * @promiscuous: Counter to keep track of promiscuous mode set.
+ * @all_multi: Counter to keep track of all multicast mode set.
+ * @multi_list: Multicast address entries.
+ * @multi_bits: Cached multicast hash table settings.
+ * @multi_list_size: Multicast address list size.
+ * @enabled: Indication of hardware enabled.
+ * @rx_stop: Indication of receive process stop.
+ * @features: Hardware features to enable.
+ * @overrides: Hardware features to override.
+ * @parent: Pointer to parent, network device private structure.
+ */
+struct ksz_hw {
+ void __iomem *io;
+
+ struct ksz_switch *ksz_switch;
+ struct ksz_port_info port_info[SWITCH_PORT_NUM];
+ struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
+ int dev_count;
+ int dst_ports;
+ int id;
+ int mib_cnt;
+ int mib_port_cnt;
+
+ u32 tx_cfg;
+ u32 rx_cfg;
+ u32 intr_mask;
+ u32 intr_set;
+ uint intr_blocked;
+
+ struct ksz_desc_info rx_desc_info;
+ struct ksz_desc_info tx_desc_info;
+
+ int tx_int_cnt;
+ int tx_int_mask;
+ int tx_size;
+
+ u8 perm_addr[MAC_ADDR_LEN];
+ u8 override_addr[MAC_ADDR_LEN];
+ u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 addr_list_size;
+ u8 mac_override;
+ u8 promiscuous;
+ u8 all_multi;
+ u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_bits[HW_MULTICAST_SIZE];
+ u8 multi_list_size;
+
+ u8 enabled;
+ u8 rx_stop;
+ u8 reserved2[1];
+
+ uint features;
+ uint overrides;
+
+ void *parent;
+};
+
+enum {
+ PHY_NO_FLOW_CTRL,
+ PHY_FLOW_CTRL,
+ PHY_TX_ONLY,
+ PHY_RX_ONLY
+};
+
+/**
+ * struct ksz_port - Virtual port data structure
+ * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
+ * duplex, and 0 for auto, which normally results in full
+ * duplex.
+ * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
+ * 0 for auto, which normally results in 100 Mbit.
+ * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
+ * force.
+ * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
+ * control, and PHY_FLOW_CTRL for flow control.
+ * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
+ * Mbit PHY.
+ * @first_port: Index of first port this port supports.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @port_cnt: Number of ports this port supports.
+ * @counter: Port statistics counter.
+ * @hw: Pointer to hardware structure.
+ * @linked: Pointer to port information linked to this port.
+ */
+struct ksz_port {
+ u8 duplex;
+ u8 speed;
+ u8 force_link;
+ u8 flow_ctrl;
+
+ int first_port;
+ int mib_port_cnt;
+ int port_cnt;
+ u64 counter[OID_COUNTER_LAST];
+
+ struct ksz_hw *hw;
+ struct ksz_port_info *linked;
+};
+
+/**
+ * struct ksz_timer_info - Timer information data structure
+ * @timer: Kernel timer.
+ * @cnt: Running timer counter.
+ * @max: Number of times to run timer; -1 for infinity.
+ * @period: Timer period in jiffies.
+ */
+struct ksz_timer_info {
+ struct timer_list timer;
+ int cnt;
+ int max;
+ int period;
+};
+
+/**
+ * struct ksz_shared_mem - OS dependent shared memory data structure
+ * @dma_addr: Physical DMA address allocated.
+ * @alloc_size: Allocation size.
+ * @phys: Actual physical address used.
+ * @alloc_virt: Virtual address allocated.
+ * @virt: Actual virtual address used.
+ */
+struct ksz_shared_mem {
+ dma_addr_t dma_addr;
+ uint alloc_size;
+ uint phys;
+ u8 *alloc_virt;
+ u8 *virt;
+};
+
+/**
+ * struct ksz_counter_info - OS dependent counter information data structure
+ * @counter: Wait queue to wakeup after counters are read.
+ * @time: Next time in jiffies to read counter.
+ * @read: Indication of counters read in full or not.
+ */
+struct ksz_counter_info {
+ wait_queue_head_t counter;
+ unsigned long time;
+ int read;
+};
+
+/**
+ * struct dev_info - Network device information data structure
+ * @dev: Pointer to network device.
+ * @pdev: Pointer to PCI device.
+ * @hw: Hardware structure.
+ * @desc_pool: Physical memory used for descriptor pool.
+ * @hwlock: Spinlock to prevent hardware from accessing.
+ * @lock: Mutex lock to prevent device from accessing.
+ * @dev_rcv: Receive process function used.
+ * @last_skb: Socket buffer allocated for descriptor rx fragments.
+ * @skb_index: Buffer index for receiving fragments.
+ * @skb_len: Buffer length for receiving fragments.
+ * @mib_read: Workqueue to read MIB counters.
+ * @mib_timer_info: Timer to read MIB counters.
+ * @counter: Used for MIB reading.
+ * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
+ * the maximum is MAX_RX_BUF_SIZE.
+ * @opened: Counter to keep track of device open.
+ * @rx_tasklet: Receive processing tasklet.
+ * @tx_tasklet: Transmit processing tasklet.
+ * @wol_enable: Wake-on-LAN enable set by ethtool.
+ * @wol_support: Wake-on-LAN support used by ethtool.
+ * @pme_wait: Used for KSZ8841 power management.
+ */
+struct dev_info {
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ struct ksz_hw hw;
+ struct ksz_shared_mem desc_pool;
+
+ spinlock_t hwlock;
+ struct mutex lock;
+
+ int (*dev_rcv)(struct dev_info *);
+
+ struct sk_buff *last_skb;
+ int skb_index;
+ int skb_len;
+
+ struct work_struct mib_read;
+ struct ksz_timer_info mib_timer_info;
+ struct ksz_counter_info counter[TOTAL_PORT_NUM];
+
+ int mtu;
+ int opened;
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ int wol_enable;
+ int wol_support;
+ unsigned long pme_wait;
+};
+
+/**
+ * struct dev_priv - Network device private data structure
+ * @adapter: Adapter device information.
+ * @port: Port information.
+ * @monitor_time_info: Timer to monitor ports.
+ * @stats: Network statistics.
+ * @proc_sem: Semaphore for proc accessing.
+ * @id: Device ID.
+ * @mii_if: MII interface information.
+ * @advertising: Temporary variable to store advertised settings.
+ * @msg_enable: The message flags controlling driver output.
+ * @media_state: The connection status of the device.
+ * @multicast: The all multicast state of the device.
+ * @promiscuous: The promiscuous state of the device.
+ */
+struct dev_priv {
+ struct dev_info *adapter;
+ struct ksz_port port;
+ struct ksz_timer_info monitor_timer_info;
+ struct net_device_stats stats;
+
+ struct semaphore proc_sem;
+ int id;
+
+ struct mii_if_info mii_if;
+ u32 advertising;
+
+ u32 msg_enable;
+ int media_state;
+ int multicast;
+ int promiscuous;
+};
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
+
+#define DRV_NAME "KSZ884X PCI"
+#define DEVICE_NAME "KSZ884x PCI"
+#define DRV_VERSION "1.0.0"
+#define DRV_RELDATE "Feb 8, 2010"
+
+static char version[] __devinitdata =
+ "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
+
+static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
+
+/*
+ * Interrupt processing primary routines
+ */
+
+static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
+{
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
+}
+
+static inline void hw_dis_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = hw->intr_mask;
+ writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
+{
+ hw->intr_set = interrupt;
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_ena_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = 0;
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
+{
+ hw->intr_mask &= ~(bit);
+}
+
+static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr & ~interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw_dis_intr_bit(hw, interrupt);
+}
+
+/**
+ * hw_turn_on_intr - turn on specified interrupts
+ * @hw: The hardware instance.
+ * @bit: The interrupt bits to be on.
+ *
+ * This routine turns on the specified interrupts in the interrupt mask so that
+ * those interrupts will be enabled.
+ */
+static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
+{
+ hw->intr_mask |= bit;
+
+ if (!hw->intr_blocked)
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr | interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
+{
+ *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
+ *status = *status & hw->intr_set;
+}
+
+static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
+{
+ if (interrupt)
+ hw_ena_intr(hw);
+}
+
+/**
+ * hw_block_intr - block hardware interrupts
+ *
+ * This function blocks all interrupts of the hardware and returns the current
+ * interrupt enable mask so that interrupts can be restored later.
+ *
+ * Return the current interrupt enable mask.
+ */
+static uint hw_block_intr(struct ksz_hw *hw)
+{
+ uint interrupt = 0;
+
+ if (!hw->intr_blocked) {
+ hw_dis_intr(hw);
+ interrupt = hw->intr_blocked;
+ }
+ return interrupt;
+}
+
+/*
+ * Hardware descriptor routines
+ */
+
+static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
+{
+ status.rx.hw_owned = 0;
+ desc->phw->ctrl.data = cpu_to_le32(status.data);
+}
+
+static inline void release_desc(struct ksz_desc *desc)
+{
+ desc->sw.ctrl.tx.hw_owned = 1;
+ if (desc->sw.buf_size != desc->sw.buf.data) {
+ desc->sw.buf_size = desc->sw.buf.data;
+ desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
+ }
+ desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
+}
+
+static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->last];
+ info->last++;
+ info->last &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
+}
+
+static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_rx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.rx.buf_size = len;
+}
+
+static inline void get_tx_pkt(struct ksz_desc_info *info,
+ struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->next];
+ info->next++;
+ info->next &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
+}
+
+static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_tx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.tx.buf_size = len;
+}
+
+/* Switch functions */
+
+#define TABLE_READ 0x10
+#define TABLE_SEL_SHIFT 2
+
+#define HW_DELAY(hw, reg) \
+ do { \
+ u16 dummy; \
+ dummy = readw(hw->io + reg); \
+ } while (0)
+
+/**
+ * sw_r_table - read 4 bytes of data from switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data: Buffer to store the read data.
+ *
+ * This routine reads 4 bytes of data from the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_table_64 - write 8 bytes of data to the switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data_hi: The high part of data to be written (bit63 ~ bit32).
+ * @data_lo: The low part of data to be written (bit31 ~ bit0).
+ *
+ * This routine writes 8 bytes of data to the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of written data.
+ */
+static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
+ u32 data_lo)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
+ writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_sta_mac_table - write to the static MAC table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @mac_addr: The MAC address.
+ * @ports: The port members.
+ * @override: The flag to override the port receive/transmit settings.
+ * @valid: The flag to indicate entry is valid.
+ * @use_fid: The flag to indicate the FID is valid.
+ * @fid: The FID value.
+ *
+ * This routine writes an entry of the static MAC table of the switch. It
+ * calls sw_w_table_64() to write the data.
+ */
+static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
+ u8 ports, int override, int valid, int use_fid, u8 fid)
+{
+ u32 data_hi;
+ u32 data_lo;
+
+ data_lo = ((u32) mac_addr[2] << 24) |
+ ((u32) mac_addr[3] << 16) |
+ ((u32) mac_addr[4] << 8) | mac_addr[5];
+ data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
+ data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
+
+ if (override)
+ data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ if (use_fid) {
+ data_hi |= STATIC_MAC_TABLE_USE_FID;
+ data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
+ }
+ if (valid)
+ data_hi |= STATIC_MAC_TABLE_VALID;
+
+ sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
+}
+
+/**
+ * sw_r_vlan_table - read from the VLAN table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @vid: Buffer to store the VID.
+ * @fid: Buffer to store the VID.
+ * @member: Buffer to store the port membership.
+ *
+ * This function reads an entry of the VLAN table of the switch. It calls
+ * sw_r_table() to get the data.
+ *
+ * Return 0 if the entry is valid; otherwise -1.
+ */
+static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
+ u8 *member)
+{
+ u32 data;
+
+ sw_r_table(hw, TABLE_VLAN, addr, &data);
+ if (data & VLAN_TABLE_VALID) {
+ *vid = (u16)(data & VLAN_TABLE_VID);
+ *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
+ *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
+ VLAN_TABLE_MEMBERSHIP_SHIFT);
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * port_r_mib_cnt - read MIB counter
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @addr: The address of the counter.
+ * @cnt: Buffer to store the counter.
+ *
+ * This routine reads a MIB counter of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
+{
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int timeout;
+
+ ctrl_addr = addr + PORT_COUNTER_NUM * port;
+
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ for (timeout = 100; timeout > 0; timeout--) {
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ if (data & MIB_COUNTER_VALID) {
+ if (data & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * port_r_mib_pkt - read dropped packet counts
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @cnt: Buffer to store the receive and transmit dropped packet counts.
+ *
+ * This routine reads the dropped packet counts of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
+{
+ u32 cur;
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int index;
+
+ index = KS_MIB_PACKET_DROPPED_RX_0 + port;
+ do {
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr = (u16) index;
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
+ << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = *last;
+ if (data != cur) {
+ *last = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+ ++last;
+ ++cnt;
+ index -= KS_MIB_PACKET_DROPPED_TX -
+ KS_MIB_PACKET_DROPPED_TX_0 + 1;
+ } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
+}
+
+/**
+ * port_r_cnt - read MIB counters periodically
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to read the counters of the port periodically to avoid
+ * counter overflow. The hardware should be acquired first before calling this
+ * routine.
+ *
+ * Return non-zero when not all counters not read.
+ */
+static int port_r_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ while (mib->cnt_ptr < PORT_COUNTER_NUM) {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ mib->cnt_ptr = 0;
+ return 0;
+}
+
+/**
+ * port_init_cnt - initialize MIB counter values
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to initialize all counters to zero if the hardware
+ * cannot do it after reset.
+ */
+static void port_init_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ mib->cnt_ptr = 0;
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ do {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ } while (mib->cnt_ptr < PORT_COUNTER_NUM);
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ mib->cnt_ptr = 0;
+}
+
+/*
+ * Port functions
+ */
+
+/**
+ * port_chk - check port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the port register are set
+ * or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * port_cfg - set port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This routine sets or resets the specified bits of the port register.
+ */
+static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
+ int set)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_chk_shift - check port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ *
+ * This function checks whether the specified port is set in the register or
+ * not.
+ *
+ * Return 0 if the port is not set.
+ */
+static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
+{
+ u16 data;
+ u16 bit = 1 << port;
+
+ data = readw(hw->io + addr);
+ data >>= shift;
+ return (data & bit) == bit;
+}
+
+/**
+ * port_cfg_shift - set port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ * @set: The flag indicating whether the port is to be set or not.
+ *
+ * This routine sets or resets the specified port in the register.
+ */
+static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
+ int set)
+{
+ u16 data;
+ u16 bits = 1 << port;
+
+ data = readw(hw->io + addr);
+ bits <<= shift;
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_r8 - read byte from port register
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a byte from the port register.
+ */
+static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readb(hw->io + addr);
+}
+
+/**
+ * port_r16 - read word from port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a word from the port register.
+ */
+static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readw(hw->io + addr);
+}
+
+/**
+ * port_w16 - write word to port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Data to write.
+ *
+ * This routine writes a word to the port register.
+ */
+static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * sw_chk - check switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the switch register are
+ * set or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * sw_cfg - set switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This function sets or resets the specified bits of the switch register.
+ */
+static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/* Bandwidth */
+
+static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
+}
+
+static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
+}
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROTECTION_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+/**
+ * sw_cfg_broad_storm - configure broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ */
+static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ u16 data;
+ u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
+
+ if (value > BROADCAST_STORM_RATE)
+ value = BROADCAST_STORM_RATE;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
+ data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+}
+
+/**
+ * sw_get_board_storm - get broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Buffer to store the broadcast storm threshold percentage.
+ *
+ * This routine retrieves the broadcast storm threshold of the switch.
+ */
+static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
+{
+ int num;
+ u16 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ num = (data & BROADCAST_STORM_RATE_HI);
+ num <<= 8;
+ num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
+ num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
+ *percent = (u8) num;
+}
+
+/**
+ * sw_dis_broad_storm - disable broadstorm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the broadcast storm limit function of the switch.
+ */
+static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
+{
+ port_cfg_broad_storm(hw, port, 0);
+}
+
+/**
+ * sw_ena_broad_storm - enable broadcast storm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine enables the broadcast storm limit function of the switch.
+ */
+static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
+{
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ port_cfg_broad_storm(hw, port, 1);
+}
+
+/**
+ * sw_init_broad_storm - initialize broadcast storm
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the broadcast storm limit function of the switch.
+ */
+static void sw_init_broad_storm(struct ksz_hw *hw)
+{
+ int port;
+
+ hw->ksz_switch->broad_per = 1;
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ for (port = 0; port < TOTAL_PORT_NUM; port++)
+ sw_dis_broad_storm(hw, port);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
+}
+
+/**
+ * hw_cfg_broad_storm - configure broadcast storm
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ * It is called by user functions. The hardware should be acquired first.
+ */
+static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ if (percent > 100)
+ percent = 100;
+
+ sw_cfg_broad_storm(hw, percent);
+ sw_get_broad_storm(hw, &percent);
+ hw->ksz_switch->broad_per = percent;
+}
+
+/**
+ * sw_dis_prio_rate - disable switch priority rate
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the priority rate function of the switch.
+ */
+static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_IN_RATE_OFFSET;
+ writel(0, hw->io + addr);
+}
+
+/**
+ * sw_init_prio_rate - initialize switch prioirty rate
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the priority rate function of the switch.
+ */
+static void sw_init_prio_rate(struct ksz_hw *hw)
+{
+ int port;
+ int prio;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ for (prio = 0; prio < PRIO_QUEUES; prio++) {
+ sw->port_cfg[port].rx_rate[prio] =
+ sw->port_cfg[port].tx_rate[prio] = 0;
+ }
+ sw_dis_prio_rate(hw, port);
+ }
+}
+
+/* Communication */
+
+static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
+}
+
+static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
+}
+
+static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
+}
+
+static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
+}
+
+/* Spanning Tree */
+
+static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
+}
+
+static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
+}
+
+static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
+}
+
+static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
+}
+
+static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
+{
+ if (!(hw->overrides & FAST_AGING)) {
+ sw_cfg_fast_aging(hw, 1);
+ mdelay(1);
+ sw_cfg_fast_aging(hw, 0);
+ }
+}
+
+/* VLAN */
+
+static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
+}
+
+static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
+}
+
+static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
+}
+
+static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
+}
+
+static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
+}
+
+static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
+}
+
+static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
+}
+
+static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
+}
+
+/* Mirroring */
+
+static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
+}
+
+static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
+}
+
+static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
+}
+
+static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
+}
+
+static void sw_init_mirror(struct ksz_hw *hw)
+{
+ int port;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_cfg_mirror_sniffer(hw, port, 0);
+ port_cfg_mirror_rx(hw, port, 0);
+ port_cfg_mirror_tx(hw, port, 0);
+ }
+ sw_cfg_mirror_rx_tx(hw, 0);
+}
+
+static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE, set);
+}
+
+static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
+{
+ return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE);
+}
+
+static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
+}
+
+static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
+{
+ return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
+}
+
+/* Priority */
+
+static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
+}
+
+static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
+}
+
+static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
+}
+
+static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
+}
+
+static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
+}
+
+static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
+}
+
+static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
+}
+
+static inline int port_chk_prio(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
+}
+
+/**
+ * sw_dis_diffserv - disable switch DiffServ priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the DiffServ priority function of the switch.
+ */
+static void sw_dis_diffserv(struct ksz_hw *hw, int port)
+{
+ port_cfg_diffserv(hw, port, 0);
+}
+
+/**
+ * sw_dis_802_1p - disable switch 802.1p priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the 802.1p priority function of the switch.
+ */
+static void sw_dis_802_1p(struct ksz_hw *hw, int port)
+{
+ port_cfg_802_1p(hw, port, 0);
+}
+
+/**
+ * sw_cfg_replace_null_vid -
+ * @hw: The hardware instance.
+ * @set: The flag to disable or enable.
+ *
+ */
+static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
+}
+
+/**
+ * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @set: The flag to disable or enable.
+ *
+ * This routine enables the 802.1p priority re-mapping function of the switch.
+ * That allows 802.1p priority field to be replaced with the port's default
+ * tag's priority value if the ingress packet's 802.1p priority has a higher
+ * priority than port's default tag's priority.
+ */
+static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_replace_vid(hw, port, set);
+}
+
+/**
+ * sw_cfg_port_based - configure switch port based priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @prio: The priority to set.
+ *
+ * This routine configures the port based priority of the switch.
+ */
+static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
+{
+ u16 data;
+
+ if (prio > PORT_BASED_PRIORITY_BASE)
+ prio = PORT_BASED_PRIORITY_BASE;
+
+ hw->ksz_switch->port_cfg[port].port_prio = prio;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
+ data &= ~PORT_BASED_PRIORITY_MASK;
+ data |= prio << PORT_BASED_PRIORITY_SHIFT;
+ port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
+}
+
+/**
+ * sw_dis_multi_queue - disable transmit multiple queues
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the transmit multiple queues selection of the switch
+ * port. Only single transmit queue on the port.
+ */
+static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
+{
+ port_cfg_prio(hw, port, 0);
+}
+
+/**
+ * sw_init_prio - initialize switch priority
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the switch QoS priority functions.
+ */
+static void sw_init_prio(struct ksz_hw *hw)
+{
+ int port;
+ int tos;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /*
+ * Init all the 802.1p tag priority value to be assigned to different
+ * priority queue.
+ */
+ sw->p_802_1p[0] = 0;
+ sw->p_802_1p[1] = 0;
+ sw->p_802_1p[2] = 1;
+ sw->p_802_1p[3] = 1;
+ sw->p_802_1p[4] = 2;
+ sw->p_802_1p[5] = 2;
+ sw->p_802_1p[6] = 3;
+ sw->p_802_1p[7] = 3;
+
+ /*
+ * Init all the DiffServ priority value to be assigned to priority
+ * queue 0.
+ */
+ for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
+ sw->diffserv[tos] = 0;
+
+ /* All QoS functions disabled. */
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ sw_dis_multi_queue(hw, port);
+ sw_dis_diffserv(hw, port);
+ sw_dis_802_1p(hw, port);
+ sw_cfg_replace_vid(hw, port, 0);
+
+ sw->port_cfg[port].port_prio = 0;
+ sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
+ }
+ sw_cfg_replace_null_vid(hw, 0);
+}
+
+/**
+ * port_get_def_vid - get port default VID.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @vid: Buffer to store the VID.
+ *
+ * This routine retrieves the default VID of the port.
+ */
+static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_VID_OFFSET;
+ *vid = readw(hw->io + addr);
+}
+
+/**
+ * sw_init_vlan - initialize switch VLAN
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the VLAN function of the switch.
+ */
+static void sw_init_vlan(struct ksz_hw *hw)
+{
+ int port;
+ int entry;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* Read 16 VLAN entries from device's VLAN table. */
+ for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
+ sw_r_vlan_table(hw, entry,
+ &sw->vlan_table[entry].vid,
+ &sw->vlan_table[entry].fid,
+ &sw->vlan_table[entry].member);
+ }
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
+ sw->port_cfg[port].member = PORT_MASK;
+ }
+}
+
+/**
+ * sw_cfg_port_base_vlan - configure port-based VLAN membership
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @member: The port-based VLAN membership.
+ *
+ * This routine configures the port-based VLAN membership of the port.
+ */
+static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
+{
+ u32 addr;
+ u8 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_2_OFFSET;
+
+ data = readb(hw->io + addr);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & PORT_MASK);
+ writeb(data, hw->io + addr);
+
+ hw->ksz_switch->port_cfg[port].member = member;
+}
+
+/**
+ * sw_get_addr - get the switch MAC address.
+ * @hw: The hardware instance.
+ * @mac_addr: Buffer to store the MAC address.
+ *
+ * This function retrieves the MAC address of the switch.
+ */
+static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_addr - configure switch MAC address
+ * @hw: The hardware instance.
+ * @mac_addr: The MAC address.
+ *
+ * This function configures the MAC address of the switch.
+ */
+static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_global_ctrl - set switch global control
+ * @hw: The hardware instance.
+ *
+ * This routine sets the global control of the switch function.
+ */
+static void sw_set_global_ctrl(struct ksz_hw *hw)
+{
+ u16 data;
+
+ /* Enable switch MII flow control. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data |= SWITCH_FLOW_CTRL;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ data |= SWITCH_AGGR_BACKOFF;
+
+ /* Enable automatic fast aging when link changed detected. */
+ data |= SWITCH_AGING_ENABLE;
+ data |= SWITCH_LINK_AUTO_AGING;
+
+ if (hw->overrides & FAST_AGING)
+ data |= SWITCH_FAST_AGING;
+ else
+ data &= ~SWITCH_FAST_AGING;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+
+ /* Enable no excessive collision drop. */
+ data |= NO_EXC_COLLISION_DROP;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+}
+
+enum {
+ STP_STATE_DISABLED = 0,
+ STP_STATE_LISTENING,
+ STP_STATE_LEARNING,
+ STP_STATE_FORWARDING,
+ STP_STATE_BLOCKED,
+ STP_STATE_SIMPLE
+};
+
+/**
+ * port_set_stp_state - configure port spanning tree state
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @state: The spanning tree state.
+ *
+ * This routine configures the spanning tree state of the port.
+ */
+static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
+{
+ u16 data;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
+ switch (state) {
+ case STP_STATE_DISABLED:
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LISTENING:
+/*
+ * No need to turn on transmit because of port direct mode.
+ * Turning on receive is required if static MAC table is not setup.
+ */
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LEARNING:
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_BLOCKED:
+/*
+ * Need to setup static MAC table with override to keep receiving BPDU
+ * messages. See sw_init_stp routine.
+ */
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_SIMPLE:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ }
+ port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
+ hw->ksz_switch->port_cfg[port].stp_state = state;
+}
+
+#define STP_ENTRY 0
+#define BROADCAST_ENTRY 1
+#define BRIDGE_ADDR_ENTRY 2
+#define IPV6_ADDR_ENTRY 3
+
+/**
+ * sw_clr_sta_mac_table - clear static MAC table
+ * @hw: The hardware instance.
+ *
+ * This routine clears the static MAC table.
+ */
+static void sw_clr_sta_mac_table(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, 0,
+ entry->use_fid, entry->fid);
+ }
+}
+
+/**
+ * sw_init_stp - initialize switch spanning tree support
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the spanning tree support of the switch.
+ */
+static void sw_init_stp(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+
+ entry = &hw->ksz_switch->mac_table[STP_ENTRY];
+ entry->mac_addr[0] = 0x01;
+ entry->mac_addr[1] = 0x80;
+ entry->mac_addr[2] = 0xC2;
+ entry->mac_addr[3] = 0x00;
+ entry->mac_addr[4] = 0x00;
+ entry->mac_addr[5] = 0x00;
+ entry->ports = HOST_MASK;
+ entry->override = 1;
+ entry->valid = 1;
+ sw_w_sta_mac_table(hw, STP_ENTRY,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+}
+
+/**
+ * sw_block_addr - block certain packets from the host port
+ * @hw: The hardware instance.
+ *
+ * This routine blocks certain packets from reaching to the host port.
+ */
+static void sw_block_addr(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ entry->valid = 0;
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+ }
+}
+
+#define PHY_LINK_SUPPORT \
+ (PHY_AUTO_NEG_ASYM_PAUSE | \
+ PHY_AUTO_NEG_SYM_PAUSE | \
+ PHY_AUTO_NEG_100BT4 | \
+ PHY_AUTO_NEG_100BTX_FD | \
+ PHY_AUTO_NEG_100BTX | \
+ PHY_AUTO_NEG_10BT_FD | \
+ PHY_AUTO_NEG_10BT)
+
+static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
+}
+
+static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
+}
+
+static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+/**
+ * hw_r_phy - read data from PHY register
+ * @hw: The hardware instance.
+ * @port: Port to read.
+ * @reg: PHY register to read.
+ * @val: Buffer to store the read data.
+ *
+ * This routine reads data from the PHY register.
+ */
+static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ *val = readw(hw->io + phy);
+}
+
+/**
+ * port_w_phy - write data to PHY register
+ * @hw: The hardware instance.
+ * @port: Port to write.
+ * @reg: PHY register to write.
+ * @val: Word data to write.
+ *
+ * This routine writes data to the PHY register.
+ */
+static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ writew(val, hw->io + phy);
+}
+
+/*
+ * EEPROM access functions
+ */
+
+#define AT93C_CODE 0
+#define AT93C_WR_OFF 0x00
+#define AT93C_WR_ALL 0x10
+#define AT93C_ER_ALL 0x20
+#define AT93C_WR_ON 0x30
+
+#define AT93C_WRITE 1
+#define AT93C_READ 2
+#define AT93C_ERASE 3
+
+#define EEPROM_DELAY 4
+
+static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data &= ~gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data |= gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ return (u8)(data & gpio);
+}
+
+static void eeprom_clk(struct ksz_hw *hw)
+{
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+}
+
+static u16 spi_r(struct ksz_hw *hw)
+{
+ int i;
+ u16 temp = 0;
+
+ for (i = 15; i >= 0; i--) {
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+
+ temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
+
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ }
+ return temp;
+}
+
+static void spi_w(struct ksz_hw *hw, u16 data)
+{
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
+{
+ int i;
+
+ /* Initial start bit */
+ raise_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+
+ /* AT93C operation */
+ for (i = 1; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+
+ /* Address location */
+ for (i = 5; i >= 0; i--) {
+ (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+#define EEPROM_DATA_RESERVED 0
+#define EEPROM_DATA_MAC_ADDR_0 1
+#define EEPROM_DATA_MAC_ADDR_1 2
+#define EEPROM_DATA_MAC_ADDR_2 3
+#define EEPROM_DATA_SUBSYS_ID 4
+#define EEPROM_DATA_SUBSYS_VEN_ID 5
+#define EEPROM_DATA_PM_CAP 6
+
+/* User defined EEPROM data */
+#define EEPROM_DATA_OTHER_MAC_ADDR 9
+
+/**
+ * eeprom_read - read from AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ *
+ * This function reads a word from the AT93C46 EEPROM.
+ *
+ * Return the data value.
+ */
+static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
+{
+ u16 data;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ spi_reg(hw, AT93C_READ, reg);
+ data = spi_r(hw);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ return data;
+}
+
+/**
+ * eeprom_write - write to AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ * @data: The data value.
+ *
+ * This procedure writes a word to the AT93C46 EEPROM.
+ */
+static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
+{
+ int timeout;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ /* Enable write. */
+ spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Erase the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_ERASE, reg);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Write the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_WRITE, reg);
+ spi_w(hw, data);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Disable write. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+}
+
+/*
+ * Link detection routines
+ */
+
+static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
+{
+ ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
+ switch (port->flow_ctrl) {
+ case PHY_FLOW_CTRL:
+ ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
+ break;
+ /* Not supported. */
+ case PHY_TX_ONLY:
+ case PHY_RX_ONLY:
+ default:
+ break;
+ }
+ return ctrl;
+}
+
+static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
+{
+ u32 rx_cfg;
+ u32 tx_cfg;
+
+ rx_cfg = hw->rx_cfg;
+ tx_cfg = hw->tx_cfg;
+ if (rx)
+ hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
+ else
+ hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
+ if (tx)
+ hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
+ else
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled) {
+ if (rx_cfg != hw->rx_cfg)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ if (tx_cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
+ u16 local, u16 remote)
+{
+ int rx;
+ int tx;
+
+ if (hw->overrides & PAUSE_FLOW_CTRL)
+ return;
+
+ rx = tx = 0;
+ if (port->force_link)
+ rx = tx = 1;
+ if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
+ if (local & PHY_AUTO_NEG_SYM_PAUSE) {
+ rx = tx = 1;
+ } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
+ (local & PHY_AUTO_NEG_PAUSE) ==
+ PHY_AUTO_NEG_ASYM_PAUSE) {
+ tx = 1;
+ }
+ } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
+ if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
+ rx = 1;
+ }
+ if (!hw->ksz_switch)
+ set_flow_ctrl(hw, rx, tx);
+}
+
+static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
+ struct ksz_port_info *info, u16 link_status)
+{
+ if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
+ !(hw->overrides & PAUSE_FLOW_CTRL)) {
+ u32 cfg = hw->tx_cfg;
+
+ /* Disable flow control in the half duplex mode. */
+ if (1 == info->duplex)
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled && cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+/**
+ * port_get_link_speed - get current link status
+ * @port: The port instance.
+ *
+ * This routine reads PHY registers to determine the current link status of the
+ * switch ports.
+ */
+static void port_get_link_speed(struct ksz_port *port)
+{
+ uint interrupt;
+ struct ksz_port_info *info;
+ struct ksz_port_info *linked = NULL;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 status;
+ u8 local;
+ u8 remote;
+ int i;
+ int p;
+ int change = 0;
+
+ interrupt = hw_block_intr(hw);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ /*
+ * Link status is changing all the time even when there is no
+ * cable connection!
+ */
+ remote = status & (PORT_AUTO_NEG_COMPLETE |
+ PORT_STATUS_LINK_GOOD);
+ local = (u8) data;
+
+ /* No change to status. */
+ if (local == info->advertised && remote == info->partner)
+ continue;
+
+ info->advertised = local;
+ info->partner = remote;
+ if (status & PORT_STATUS_LINK_GOOD) {
+
+ /* Remember the first linked port. */
+ if (!linked)
+ linked = info;
+
+ info->tx_rate = 10 * TX_RATE_UNIT;
+ if (status & PORT_STATUS_SPEED_100MBIT)
+ info->tx_rate = 100 * TX_RATE_UNIT;
+
+ info->duplex = 1;
+ if (status & PORT_STATUS_FULL_DUPLEX)
+ info->duplex = 2;
+
+ if (media_connected != info->state) {
+ hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
+ &data);
+ hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
+ &status);
+ determine_flow_ctrl(hw, port, data, status);
+ if (hw->ksz_switch) {
+ port_cfg_back_pressure(hw, p,
+ (1 == info->duplex));
+ }
+ change |= 1 << i;
+ port_cfg_change(hw, port, info, status);
+ }
+ info->state = media_connected;
+ } else {
+ if (media_disconnected != info->state) {
+ change |= 1 << i;
+
+ /* Indicate the link just goes down. */
+ hw->port_mib[p].link_down = 1;
+ }
+ info->state = media_disconnected;
+ }
+ hw->port_mib[p].state = (u8) info->state;
+ }
+
+ if (linked && media_disconnected == port->linked->state)
+ port->linked = linked;
+
+ hw_restore_intr(hw, interrupt);
+}
+
+#define PHY_RESET_TIMEOUT 10
+
+/**
+ * port_set_link_speed - set port speed
+ * @port: The port instance.
+ *
+ * This routine sets the link speed of the switch ports.
+ */
+static void port_set_link_speed(struct ksz_port *port)
+{
+ struct ksz_port_info *info;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 cfg;
+ u8 status;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ cfg = 0;
+ if (status & PORT_STATUS_LINK_GOOD)
+ cfg = data;
+
+ data |= PORT_AUTO_NEG_ENABLE;
+ data = advertised_flow_ctrl(port, data);
+
+ data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
+
+ /* Check if manual configuration is specified by the user. */
+ if (port->speed || port->duplex) {
+ if (10 == port->speed)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX);
+ else if (100 == port->speed)
+ data &= ~(PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (1 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_10BT_FD);
+ else if (2 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT);
+ }
+ if (data != cfg) {
+ data |= PORT_AUTO_NEG_RESTART;
+ port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
+ }
+ }
+}
+
+/**
+ * port_force_link_speed - force port speed
+ * @port: The port instance.
+ *
+ * This routine forces the link speed of the switch ports.
+ */
+static void port_force_link_speed(struct ksz_port *port)
+{
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ int i;
+ int phy;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
+ hw_r_phy_ctrl(hw, phy, &data);
+
+ data &= ~PHY_AUTO_NEG_ENABLE;
+
+ if (10 == port->speed)
+ data &= ~PHY_SPEED_100MBIT;
+ else if (100 == port->speed)
+ data |= PHY_SPEED_100MBIT;
+ if (1 == port->duplex)
+ data &= ~PHY_FULL_DUPLEX;
+ else if (2 == port->duplex)
+ data |= PHY_FULL_DUPLEX;
+ hw_w_phy_ctrl(hw, phy, data);
+ }
+}
+
+static void port_set_power_saving(struct ksz_port *port, int enable)
+{
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
+ port_cfg(hw, p,
+ KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
+}
+
+/*
+ * KSZ8841 power management functions
+ */
+
+/**
+ * hw_chk_wol_pme_status - check PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This function is used to check PMEN pin is asserted.
+ *
+ * Return 1 if PMEN pin is asserted; otherwise, 0.
+ */
+static int hw_chk_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return 0;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
+}
+
+/**
+ * hw_clr_wol_pme_status - clear PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This routine is used to clear PME_Status to deassert PMEN pin.
+ */
+static void hw_clr_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+
+ /* Clear PME_Status to deassert PMEN pin. */
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data |= PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol_pme - enable or disable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable Wake-on-LAN.
+ */
+static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data &= ~PCI_PM_CTRL_STATE_MASK;
+ if (set)
+ data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
+ else
+ data &= ~PCI_PM_CTRL_PME_ENABLE;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol - configure Wake-on-LAN features
+ * @hw: The hardware instance.
+ * @frame: The pattern frame bit.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable certain Wake-on-LAN features.
+ */
+static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
+ if (set)
+ data |= frame;
+ else
+ data &= ~frame;
+ writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
+}
+
+/**
+ * hw_set_wol_frame - program Wake-on-LAN pattern
+ * @hw: The hardware instance.
+ * @i: The frame index.
+ * @mask_size: The size of the mask.
+ * @mask: Mask to ignore certain bytes in the pattern.
+ * @frame_size: The size of the frame.
+ * @pattern: The frame data.
+ *
+ * This routine is used to program Wake-on-LAN pattern.
+ */
+static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
+ u8 *mask, uint frame_size, u8 *pattern)
+{
+ int bits;
+ int from;
+ int len;
+ int to;
+ u32 crc;
+ u8 data[64];
+ u8 val = 0;
+
+ if (frame_size > mask_size * 8)
+ frame_size = mask_size * 8;
+ if (frame_size > 64)
+ frame_size = 64;
+
+ i *= 0x10;
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
+
+ bits = len = from = to = 0;
+ do {
+ if (bits) {
+ if ((val & 1))
+ data[to++] = pattern[from];
+ val >>= 1;
+ ++from;
+ --bits;
+ } else {
+ val = mask[len];
+ writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
+ + len);
+ ++len;
+ if (val)
+ bits = 8;
+ else
+ from += 8;
+ }
+ } while (from < (int) frame_size);
+ if (val) {
+ bits = mask[len - 1];
+ val <<= (from % 8);
+ bits &= ~val;
+ writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
+ 1);
+ }
+ crc = ether_crc(to, data);
+ writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
+}
+
+/**
+ * hw_add_wol_arp - add ARP pattern
+ * @hw: The hardware instance.
+ * @ip_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to add ARP pattern for waking up the host.
+ */
+static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
+{
+ u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
+ u8 pattern[42] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x06,
+ 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[38], ip_addr, 4);
+ hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
+}
+
+/**
+ * hw_add_wol_bcast - add broadcast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add broadcast pattern for waking up the host.
+ */
+static void hw_add_wol_bcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+}
+
+/**
+ * hw_add_wol_mcast - add multicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add multicast pattern for waking up the host.
+ *
+ * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
+ * by IPv6 ping command. Note that multicast packets are filtred through the
+ * multicast hash table, so not all multicast packets can wake up the host.
+ */
+static void hw_add_wol_mcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[3], &hw->override_addr[3], 3);
+ hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
+}
+
+/**
+ * hw_add_wol_ucast - add unicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add unicast pattern to wakeup the host.
+ *
+ * It is assumed the unicast packet is directed to the device, as the hardware
+ * can only receive them in normal case.
+ */
+static void hw_add_wol_ucast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+
+ hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+}
+
+/**
+ * hw_enable_wol - enable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @wol_enable: The Wake-on-LAN settings.
+ * @net_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to enable Wake-on-LAN depending on driver settings.
+ */
+static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
+{
+ hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
+ hw_add_wol_ucast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
+ hw_add_wol_mcast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
+ hw_add_wol_arp(hw, net_addr);
+}
+
+/**
+ * hw_init - check driver is correct for the hardware
+ * @hw: The hardware instance.
+ *
+ * This function checks the hardware is correct for this driver and sets the
+ * hardware up for proper initialization.
+ *
+ * Return number of ports or 0 if not right.
+ */
+static int hw_init(struct ksz_hw *hw)
+{
+ int rc = 0;
+ u16 data;
+ u16 revision;
+
+ /* Set bus speed to 125MHz. */
+ writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
+
+ /* Check KSZ884x chip ID. */
+ data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
+
+ revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
+ data &= KS884X_CHIP_ID_MASK_41;
+ if (REG_CHIP_ID_41 == data)
+ rc = 1;
+ else if (REG_CHIP_ID_42 == data)
+ rc = 2;
+ else
+ return 0;
+
+ /* Setup hardware features or bug workarounds. */
+ if (revision <= 1) {
+ hw->features |= SMALL_PACKET_TX_BUG;
+ if (1 == rc)
+ hw->features |= HALF_DUPLEX_SIGNAL_BUG;
+ }
+ hw->features |= IPV6_CSUM_GEN_HACK;
+ return rc;
+}
+
+/**
+ * hw_reset - reset the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine resets the hardware.
+ */
+static void hw_reset(struct ksz_hw *hw)
+{
+ writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+
+ /* Wait for device to reset. */
+ mdelay(10);
+
+ /* Write 0 to clear device reset. */
+ writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+}
+
+/**
+ * hw_setup - setup the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware for proper operation.
+ */
+static void hw_setup(struct ksz_hw *hw)
+{
+#if SET_DEFAULT_LED
+ u16 data;
+
+ /* Change default LED mode. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+ data &= ~LED_MODE;
+ data |= SET_DEFAULT_LED;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+#endif
+
+ /* Setup transmit control. */
+ hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
+
+ /* Setup receive control. */
+ hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
+ hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
+
+ /* Hardware cannot handle UDP packet in IP fragments. */
+ hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
+
+ if (hw->all_multi)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ if (hw->promiscuous)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+}
+
+/**
+ * hw_setup_intr - setup interrupt mask
+ * @hw: The hardware instance.
+ *
+ * This routine setup the interrupt mask for proper operation.
+ */
+static void hw_setup_intr(struct ksz_hw *hw)
+{
+ hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
+}
+
+static void ksz_check_desc_num(struct ksz_desc_info *info)
+{
+#define MIN_DESC_SHIFT 2
+
+ int alloc = info->alloc;
+ int shift;
+
+ shift = 0;
+ while (!(alloc & 1)) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (alloc != 1 || shift < MIN_DESC_SHIFT) {
+ printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ while (alloc) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (shift < MIN_DESC_SHIFT)
+ shift = MIN_DESC_SHIFT;
+ alloc = 1 << shift;
+ info->alloc = alloc;
+ }
+ info->mask = info->alloc - 1;
+}
+
+static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ int i;
+ u32 phys = desc_info->ring_phys;
+ struct ksz_hw_desc *desc = desc_info->ring_virt;
+ struct ksz_desc *cur = desc_info->ring;
+ struct ksz_desc *previous = NULL;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ cur->phw = desc++;
+ phys += desc_info->size;
+ previous = cur++;
+ previous->phw->next = cpu_to_le32(phys);
+ }
+ previous->phw->next = cpu_to_le32(desc_info->ring_phys);
+ previous->sw.buf.rx.end_of_ring = 1;
+ previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
+
+ desc_info->avail = desc_info->alloc;
+ desc_info->last = desc_info->next = 0;
+
+ desc_info->cur = desc_info->ring;
+}
+
+/**
+ * hw_set_desc_base - set descriptor base addresses
+ * @hw: The hardware instance.
+ * @tx_addr: The transmit descriptor base.
+ * @rx_addr: The receive descriptor base.
+ *
+ * This routine programs the descriptor base addresses after reset.
+ */
+static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
+{
+ /* Set base address of Tx/Rx descriptors. */
+ writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
+ writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
+}
+
+static void hw_reset_pkts(struct ksz_desc_info *info)
+{
+ info->cur = info->ring;
+ info->avail = info->alloc;
+ info->last = info->next = 0;
+}
+
+static inline void hw_resume_rx(struct ksz_hw *hw)
+{
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+}
+
+/**
+ * hw_start_rx - start receiving
+ * @hw: The hardware instance.
+ *
+ * This routine starts the receive function of the hardware.
+ */
+static void hw_start_rx(struct ksz_hw *hw)
+{
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+
+ /* Notify when the receive stops. */
+ hw->intr_mask |= KS884X_INT_RX_STOPPED;
+
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+ hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
+ hw->rx_stop++;
+
+ /* Variable overflows. */
+ if (0 == hw->rx_stop)
+ hw->rx_stop = 2;
+}
+
+/*
+ * hw_stop_rx - stop receiving
+ * @hw: The hardware instance.
+ *
+ * This routine stops the receive function of the hardware.
+ */
+static void hw_stop_rx(struct ksz_hw *hw)
+{
+ hw->rx_stop = 0;
+ hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
+ writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
+}
+
+/**
+ * hw_start_tx - start transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine starts the transmit function of the hardware.
+ */
+static void hw_start_tx(struct ksz_hw *hw)
+{
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_stop_tx - stop transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine stops the transmit function of the hardware.
+ */
+static void hw_stop_tx(struct ksz_hw *hw)
+{
+ writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_disable - disable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine disables the hardware.
+ */
+static void hw_disable(struct ksz_hw *hw)
+{
+ hw_stop_rx(hw);
+ hw_stop_tx(hw);
+ hw->enabled = 0;
+}
+
+/**
+ * hw_enable - enable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine enables the hardware.
+ */
+static void hw_enable(struct ksz_hw *hw)
+{
+ hw_start_tx(hw);
+ hw_start_rx(hw);
+ hw->enabled = 1;
+}
+
+/**
+ * hw_alloc_pkt - allocate enough descriptors for transmission
+ * @hw: The hardware instance.
+ * @length: The length of the packet.
+ * @physical: Number of descriptors required.
+ *
+ * This function allocates descriptors for transmission.
+ *
+ * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
+ */
+static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
+{
+ /* Always leave one descriptor free. */
+ if (hw->tx_desc_info.avail <= 1)
+ return 0;
+
+ /* Allocate a descriptor for transmission and mark it current. */
+ get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
+ hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
+
+ /* Keep track of number of transmit descriptors used so far. */
+ ++hw->tx_int_cnt;
+ hw->tx_size += length;
+
+ /* Cannot hold on too much data. */
+ if (hw->tx_size >= MAX_TX_HELD_SIZE)
+ hw->tx_int_cnt = hw->tx_int_mask + 1;
+
+ if (physical > hw->tx_desc_info.avail)
+ return 1;
+
+ return hw->tx_desc_info.avail;
+}
+
+/**
+ * hw_send_pkt - mark packet for transmission
+ * @hw: The hardware instance.
+ *
+ * This routine marks the packet for transmission in PCI version.
+ */
+static void hw_send_pkt(struct ksz_hw *hw)
+{
+ struct ksz_desc *cur = hw->tx_desc_info.cur;
+
+ cur->sw.buf.tx.last_seg = 1;
+
+ /* Interrupt only after specified number of descriptors used. */
+ if (hw->tx_int_cnt > hw->tx_int_mask) {
+ cur->sw.buf.tx.intr = 1;
+ hw->tx_int_cnt = 0;
+ hw->tx_size = 0;
+ }
+
+ /* KSZ8842 supports port directed transmission. */
+ cur->sw.buf.tx.dest_port = hw->dst_ports;
+
+ release_desc(cur);
+
+ writel(0, hw->io + KS_DMA_TX_START);
+}
+
+static int empty_addr(u8 *addr)
+{
+ u32 *addr1 = (u32 *) addr;
+ u16 *addr2 = (u16 *) &addr[4];
+
+ return 0 == *addr1 && 0 == *addr2;
+}
+
+/**
+ * hw_set_addr - set MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine programs the MAC address of the hardware when the address is
+ * overrided.
+ */
+static void hw_set_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
+ hw->io + KS884X_ADDR_0_OFFSET + i);
+
+ sw_set_addr(hw, hw->override_addr);
+}
+
+/**
+ * hw_read_addr - read MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine retrieves the MAC address of the hardware.
+ */
+static void hw_read_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
+ KS884X_ADDR_0_OFFSET + i);
+
+ if (!hw->mac_override) {
+ memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ if (empty_addr(hw->override_addr)) {
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ hw->override_addr[5] += hw->id;
+ hw_set_addr(hw);
+ }
+ }
+}
+
+static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
+{
+ int i;
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ mac_addr_hi = 0;
+ for (i = 0; i < 2; i++) {
+ mac_addr_hi <<= 8;
+ mac_addr_hi |= mac_addr[i];
+ }
+ mac_addr_hi |= ADD_ADDR_ENABLE;
+ mac_addr_lo = 0;
+ for (i = 2; i < 6; i++) {
+ mac_addr_lo <<= 8;
+ mac_addr_lo |= mac_addr[i];
+ }
+ index *= ADD_ADDR_INCR;
+
+ writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
+ writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
+}
+
+static void hw_set_add_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
+ if (empty_addr(hw->address[i]))
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ else
+ hw_ena_add_addr(hw, i, hw->address[i]);
+ }
+}
+
+static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+ int j = ADDITIONAL_ENTRIES;
+
+ if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ return 0;
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ return 0;
+ if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
+ j = i;
+ }
+ if (j < ADDITIONAL_ENTRIES) {
+ memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ hw_ena_add_addr(hw, j, hw->address[j]);
+ return 0;
+ }
+ return -1;
+}
+
+static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
+ memset(hw->address[i], 0, MAC_ADDR_LEN);
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/**
+ * hw_clr_multicast - clear multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine removes all multicast addresses set in the hardware.
+ */
+static void hw_clr_multicast(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++) {
+ hw->multi_bits[i] = 0;
+
+ writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
+ }
+}
+
+/**
+ * hw_set_grp_addr - set multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine programs multicast addresses for the hardware to accept those
+ * addresses.
+ */
+static void hw_set_grp_addr(struct ksz_hw *hw)
+{
+ int i;
+ int index;
+ int position;
+ int value;
+
+ memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
+
+ for (i = 0; i < hw->multi_list_size; i++) {
+ position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
+ index = position >> 3;
+ value = 1 << (position & 7);
+ hw->multi_bits[index] |= (u8) value;
+ }
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++)
+ writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
+ i);
+}
+
+/**
+ * hw_set_multicast - enable or disable all multicast receiving
+ * @hw: The hardware instance.
+ * @multicast: To turn on or off the all multicast feature.
+ *
+ * This routine enables/disables the hardware to accept all multicast packets.
+ */
+static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (multicast)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ else
+ hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * hw_set_promiscuous - enable or disable promiscuous receiving
+ * @hw: The hardware instance.
+ * @prom: To turn on or off the promiscuous feature.
+ *
+ * This routine enables/disables the hardware to accept all packets.
+ */
+static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (prom)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+ else
+ hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * sw_enable - enable the switch
+ * @hw: The hardware instance.
+ * @enable: The flag to enable or disable the switch
+ *
+ * This routine is used to enable/disable the switch in KSZ8842.
+ */
+static void sw_enable(struct ksz_hw *hw, int enable)
+{
+ int port;
+
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (hw->dev_count > 1) {
+ /* Set port-base vlan membership with host port. */
+ sw_cfg_port_base_vlan(hw, port,
+ HOST_MASK | (1 << port));
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ } else {
+ sw_cfg_port_base_vlan(hw, port, PORT_MASK);
+ port_set_stp_state(hw, port, STP_STATE_FORWARDING);
+ }
+ }
+ if (hw->dev_count > 1)
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ else
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
+
+ if (enable)
+ enable = KS8842_START;
+ writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
+}
+
+/**
+ * sw_setup - setup the switch
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware switch engine for default operation.
+ */
+static void sw_setup(struct ksz_hw *hw)
+{
+ int port;
+
+ sw_set_global_ctrl(hw);
+
+ /* Enable switch broadcast storm protection at 10% percent rate. */
+ sw_init_broad_storm(hw);
+ hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
+ for (port = 0; port < SWITCH_PORT_NUM; port++)
+ sw_ena_broad_storm(hw, port);
+
+ sw_init_prio(hw);
+
+ sw_init_mirror(hw);
+
+ sw_init_prio_rate(hw);
+
+ sw_init_vlan(hw);
+
+ if (hw->features & STP_SUPPORT)
+ sw_init_stp(hw);
+ if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ sw_enable(hw, 1);
+}
+
+/**
+ * ksz_start_timer - start kernel timer
+ * @info: Kernel timer information.
+ * @time: The time tick.
+ *
+ * This routine starts the kernel timer after the specified time tick.
+ */
+static void ksz_start_timer(struct ksz_timer_info *info, int time)
+{
+ info->cnt = 0;
+ info->timer.expires = jiffies + time;
+ add_timer(&info->timer);
+
+ /* infinity */
+ info->max = -1;
+}
+
+/**
+ * ksz_stop_timer - stop kernel timer
+ * @info: Kernel timer information.
+ *
+ * This routine stops the kernel timer.
+ */
+static void ksz_stop_timer(struct ksz_timer_info *info)
+{
+ if (info->max) {
+ info->max = 0;
+ del_timer_sync(&info->timer);
+ }
+}
+
+static void ksz_init_timer(struct ksz_timer_info *info, int period,
+ void (*function)(unsigned long), void *data)
+{
+ info->max = 0;
+ info->period = period;
+ init_timer(&info->timer);
+ info->timer.function = function;
+ info->timer.data = (unsigned long) data;
+}
+
+static void ksz_update_timer(struct ksz_timer_info *info)
+{
+ ++info->cnt;
+ if (info->max > 0) {
+ if (info->cnt < info->max) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ } else
+ info->max = 0;
+ } else if (info->max < 0) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ }
+}
+
+/**
+ * ksz_alloc_soft_desc - allocate software descriptors
+ * @desc_info: Descriptor information structure.
+ * @transmit: Indication that descriptors are for transmit.
+ *
+ * This local function allocates software descriptors for manipulation in
+ * memory.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
+ if (!desc_info->ring)
+ return 1;
+ memset((void *) desc_info->ring, 0,
+ sizeof(struct ksz_desc) * desc_info->alloc);
+ hw_init_desc(desc_info, transmit);
+ return 0;
+}
+
+/**
+ * ksz_alloc_desc - allocate hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local function allocates hardware descriptors for receiving and
+ * transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+ int offset;
+
+ /* Allocate memory for RX & TX descriptors. */
+ adapter->desc_pool.alloc_size =
+ hw->rx_desc_info.size * hw->rx_desc_info.alloc +
+ hw->tx_desc_info.size * hw->tx_desc_info.alloc +
+ DESC_ALIGNMENT;
+
+ adapter->desc_pool.alloc_virt =
+ pci_alloc_consistent(
+ adapter->pdev, adapter->desc_pool.alloc_size,
+ &adapter->desc_pool.dma_addr);
+ if (adapter->desc_pool.alloc_virt == NULL) {
+ adapter->desc_pool.alloc_size = 0;
+ return 1;
+ }
+ memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
+
+ /* Align to the next cache line boundary. */
+ offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
+ (DESC_ALIGNMENT -
+ ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
+ adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
+ adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
+
+ /* Allocate receive/transmit descriptors. */
+ hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ adapter->desc_pool.virt;
+ hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
+ offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
+ hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ (adapter->desc_pool.virt + offset);
+ hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
+
+ if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
+ return 1;
+ if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * free_dma_buf - release DMA buffer resources
+ * @adapter: Adapter information structure.
+ *
+ * This routine is just a helper function to release the DMA buffer resources.
+ */
+static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
+ int direction)
+{
+ pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
+ dev_kfree_skb(dma_buf->skb);
+ dma_buf->skb = NULL;
+ dma_buf->dma = 0;
+}
+
+/**
+ * ksz_init_rx_buffers - initialize receive descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This routine initializes DMA buffers for receiving.
+ */
+static void ksz_init_rx_buffers(struct dev_info *adapter)
+{
+ int i;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_hw *hw = &adapter->hw;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+
+ for (i = 0; i < hw->rx_desc_info.alloc; i++) {
+ get_rx_pkt(info, &desc);
+
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb && dma_buf->len != adapter->mtu)
+ free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
+ dma_buf->len = adapter->mtu;
+ if (!dma_buf->skb)
+ dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
+ if (dma_buf->skb && !dma_buf->dma) {
+ dma_buf->skb->dev = adapter->dev;
+ dma_buf->dma = pci_map_single(
+ adapter->pdev,
+ skb_tail_pointer(dma_buf->skb),
+ dma_buf->len,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ /* Set descriptor. */
+ set_rx_buf(desc, dma_buf->dma);
+ set_rx_len(desc, dma_buf->len);
+ release_desc(desc);
+ }
+}
+
+/**
+ * ksz_alloc_mem - allocate memory for hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This function allocates memory for use by hardware descriptors for receiving
+ * and transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_mem(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Determine the number of receive and transmit descriptors. */
+ hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
+ hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
+
+ /* Determine how many descriptors to skip transmit interrupt. */
+ hw->tx_int_cnt = 0;
+ hw->tx_int_mask = NUM_OF_TX_DESC / 4;
+ if (hw->tx_int_mask > 8)
+ hw->tx_int_mask = 8;
+ while (hw->tx_int_mask) {
+ hw->tx_int_cnt++;
+ hw->tx_int_mask >>= 1;
+ }
+ if (hw->tx_int_cnt) {
+ hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
+ hw->tx_int_cnt = 0;
+ }
+
+ /* Determine the descriptor size. */
+ hw->rx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ hw->tx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
+ printk(KERN_ALERT
+ "Hardware descriptor size not right!\n");
+ ksz_check_desc_num(&hw->rx_desc_info);
+ ksz_check_desc_num(&hw->tx_desc_info);
+
+ /* Allocate descriptors. */
+ if (ksz_alloc_desc(adapter))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ksz_free_desc - free software and hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees the software and hardware descriptors allocated by
+ * ksz_alloc_desc().
+ */
+static void ksz_free_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Reset descriptor. */
+ hw->rx_desc_info.ring_virt = NULL;
+ hw->tx_desc_info.ring_virt = NULL;
+ hw->rx_desc_info.ring_phys = 0;
+ hw->tx_desc_info.ring_phys = 0;
+
+ /* Free memory. */
+ if (adapter->desc_pool.alloc_virt)
+ pci_free_consistent(
+ adapter->pdev,
+ adapter->desc_pool.alloc_size,
+ adapter->desc_pool.alloc_virt,
+ adapter->desc_pool.dma_addr);
+
+ /* Reset resource pool. */
+ adapter->desc_pool.alloc_size = 0;
+ adapter->desc_pool.alloc_virt = NULL;
+
+ kfree(hw->rx_desc_info.ring);
+ hw->rx_desc_info.ring = NULL;
+ kfree(hw->tx_desc_info.ring);
+ hw->tx_desc_info.ring = NULL;
+}
+
+/**
+ * ksz_free_buffers - free buffers used in the descriptors
+ * @adapter: Adapter information structure.
+ * @desc_info: Descriptor information structure.
+ *
+ * This local routine frees buffers used in the DMA buffers.
+ */
+static void ksz_free_buffers(struct dev_info *adapter,
+ struct ksz_desc_info *desc_info, int direction)
+{
+ int i;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_desc *desc = desc_info->ring;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb)
+ free_dma_buf(adapter, dma_buf, direction);
+ desc++;
+ }
+}
+
+/**
+ * ksz_free_mem - free all resources used by descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees all the resources allocated by ksz_alloc_mem().
+ */
+static void ksz_free_mem(struct dev_info *adapter)
+{
+ /* Free transmit buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
+ PCI_DMA_TODEVICE);
+
+ /* Free receive buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
+ PCI_DMA_FROMDEVICE);
+
+ /* Free descriptors. */
+ ksz_free_desc(adapter);
+}
+
+static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
+ u64 *counter)
+{
+ int i;
+ int mib;
+ int port;
+ struct ksz_port_mib *port_mib;
+
+ memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ for (i = 0, port = first; i < cnt; i++, port++) {
+ port_mib = &hw->port_mib[port];
+ for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
+ counter[mib] += port_mib->counter[mib];
+ }
+}
+
+/**
+ * send_packet - send packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This routine is used to send a packet out to the network.
+ */
+static void send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ksz_desc *desc;
+ struct ksz_desc *first;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_dma_buf *dma_buf;
+ int len;
+ int last_frag = skb_shinfo(skb)->nr_frags;
+
+ /*
+ * KSZ8842 with multiple device interfaces needs to be told which port
+ * to send.
+ */
+ if (hw->dev_count > 1)
+ hw->dst_ports = 1 << priv->port.first_port;
+
+ /* Hardware will pad the length to 60. */
+ len = skb->len;
+
+ /* Remember the very first descriptor. */
+ first = info->cur;
+ desc = first;
+
+ dma_buf = DMA_BUFFER(desc);
+ if (last_frag) {
+ int frag;
+ skb_frag_t *this_frag;
+
+ dma_buf->len = skb->len - skb->data_len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag = 0;
+ do {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+
+ /* Get a new descriptor. */
+ get_tx_pkt(info, &desc);
+
+ /* Keep track of descriptors used so far. */
+ ++hw->tx_int_cnt;
+
+ dma_buf = DMA_BUFFER(desc);
+ dma_buf->len = this_frag->size;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev,
+ page_address(this_frag->page) +
+ this_frag->page_offset,
+ dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag++;
+ if (frag == last_frag)
+ break;
+
+ /* Do not release the last descriptor here. */
+ release_desc(desc);
+ } while (1);
+
+ /* current points to the last descriptor. */
+ info->cur = desc;
+
+ /* Release the first descriptor. */
+ release_desc(first);
+ } else {
+ dma_buf->len = len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ (desc)->sw.buf.tx.csum_gen_tcp = 1;
+ (desc)->sw.buf.tx.csum_gen_udp = 1;
+ }
+
+ /*
+ * The last descriptor holds the packet so that it can be returned to
+ * network subsystem after all descriptors are transmitted.
+ */
+ dma_buf->skb = skb;
+
+ hw_send_pkt(hw);
+
+ /* Update transmit statistics. */
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+}
+
+/**
+ * transmit_cleanup - clean up transmit descriptors
+ * @dev: Network device.
+ *
+ * This routine is called to clean up the transmitted buffers.
+ */
+static void transmit_cleanup(struct dev_info *hw_priv, int normal)
+{
+ int last;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct net_device *dev = NULL;
+
+ spin_lock(&hw_priv->hwlock);
+ last = info->last;
+
+ while (info->avail < info->alloc) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[last];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.tx.hw_owned) {
+ if (normal)
+ break;
+ else
+ reset_desc(desc, status);
+ }
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_unmap_single(
+ hw_priv->pdev, dma_buf->dma, dma_buf->len,
+ PCI_DMA_TODEVICE);
+
+ /* This descriptor contains the last buffer in the packet. */
+ if (dma_buf->skb) {
+ dev = dma_buf->skb->dev;
+
+ /* Release the packet back to network subsystem. */
+ dev_kfree_skb_irq(dma_buf->skb);
+ dma_buf->skb = NULL;
+ }
+
+ /* Free the transmitted descriptor. */
+ last++;
+ last &= info->mask;
+ info->avail++;
+ }
+ info->last = last;
+ spin_unlock(&hw_priv->hwlock);
+
+ /* Notify the network subsystem that the packet has been sent. */
+ if (dev)
+ dev->trans_start = jiffies;
+}
+
+/**
+ * transmit_done - transmit done processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit interrupt is triggered, indicating
+ * either a packet is sent successfully or there are transmit errors.
+ */
+static void tx_done(struct dev_info *hw_priv)
+{
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ transmit_cleanup(hw_priv, 1);
+
+ for (port = 0; port < hw->dev_count; port++) {
+ struct net_device *dev = hw->port_info[port].pdev;
+
+ if (netif_running(dev) && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
+}
+
+static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
+{
+ skb->dev = old->dev;
+ skb->protocol = old->protocol;
+ skb->ip_summed = old->ip_summed;
+ skb->csum = old->csum;
+ skb_set_network_header(skb, ETH_HLEN);
+
+ dev_kfree_skb(old);
+}
+
+/**
+ * netdev_tx - send out packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This function is used by the upper network layer to send out a packet.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int left;
+ int num = 1;
+ int rc = 0;
+
+ if (hw->features & SMALL_PACKET_TX_BUG) {
+ struct sk_buff *org_skb = skb;
+
+ if (skb->len <= 48) {
+ if (skb_end_pointer(skb) - skb->data >= 50) {
+ memset(&skb->data[skb->len], 0, 50 - skb->len);
+ skb->len = 50;
+ } else {
+ skb = dev_alloc_skb(50);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ memcpy(skb->data, org_skb->data, org_skb->len);
+ memset(&skb->data[org_skb->len], 0,
+ 50 - org_skb->len);
+ skb->len = 50;
+ copy_old_skb(org_skb, skb);
+ }
+ }
+ }
+
+ spin_lock_irq(&hw_priv->hwlock);
+
+ num = skb_shinfo(skb)->nr_frags + 1;
+ left = hw_alloc_pkt(hw, skb->len, num);
+ if (left) {
+ if (left < num ||
+ ((hw->features & IPV6_CSUM_GEN_HACK) &&
+ (CHECKSUM_PARTIAL == skb->ip_summed) &&
+ (ETH_P_IPV6 == htons(skb->protocol)))) {
+ struct sk_buff *org_skb = skb;
+
+ skb = dev_alloc_skb(org_skb->len);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ skb_copy_and_csum_dev(org_skb, skb->data);
+ org_skb->ip_summed = 0;
+ skb->len = org_skb->len;
+ copy_old_skb(org_skb, skb);
+ }
+ send_packet(skb, dev);
+ if (left <= num)
+ netif_stop_queue(dev);
+ } else {
+ /* Stop the transmit queue until packet is allocated. */
+ netif_stop_queue(dev);
+ rc = NETDEV_TX_BUSY;
+ }
+
+ spin_unlock_irq(&hw_priv->hwlock);
+
+ return rc;
+}
+
+/**
+ * netdev_tx_timeout - transmit timeout processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit timer expires. That indicates the
+ * hardware is not running correctly because transmit interrupts are not
+ * triggered to free up resources so that the transmit routine can continue
+ * sending out packets. The hardware is reset to correct the problem.
+ */
+static void netdev_tx_timeout(struct net_device *dev)
+{
+ static unsigned long last_reset;
+
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ if (hw->dev_count > 1) {
+ /*
+ * Only reset the hardware if time between calls is long
+ * enough.
+ */
+ if (jiffies - last_reset <= dev->watchdog_timeo)
+ hw_priv = NULL;
+ }
+
+ last_reset = jiffies;
+ if (hw_priv) {
+ hw_dis_intr(hw);
+ hw_disable(hw);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+ ksz_init_rx_buffers(hw_priv);
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys,
+ hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ if (hw->all_multi)
+ hw_set_multicast(hw, hw->all_multi);
+ else if (hw->multi_list_size)
+ hw_set_grp_addr(hw);
+
+ if (hw->dev_count > 1) {
+ hw_set_add_addr(hw);
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ struct net_device *port_dev;
+
+ port_set_stp_state(hw, port,
+ STP_STATE_DISABLED);
+
+ port_dev = hw->port_info[port].pdev;
+ if (netif_running(port_dev))
+ port_set_stp_state(hw, port,
+ STP_STATE_SIMPLE);
+ }
+ }
+
+ hw_enable(hw);
+ hw_ena_intr(hw);
+ }
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static inline void csum_verified(struct sk_buff *skb)
+{
+ unsigned short protocol;
+ struct iphdr *iph;
+
+ protocol = skb->protocol;
+ skb_reset_network_header(skb);
+ iph = (struct iphdr *) skb_network_header(skb);
+ if (protocol == htons(ETH_P_8021Q)) {
+ protocol = iph->tot_len;
+ skb_set_network_header(skb, VLAN_HLEN);
+ iph = (struct iphdr *) skb_network_header(skb);
+ }
+ if (protocol == htons(ETH_P_IP)) {
+ if (iph->protocol == IPPROTO_TCP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
+ struct ksz_desc *desc, union desc_stat status)
+{
+ int packet_len;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_dma_buf *dma_buf;
+ struct sk_buff *skb;
+ int rx_status;
+
+ /* Received length includes 4-byte CRC. */
+ packet_len = status.rx.frame_len - 4;
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_dma_sync_single_for_cpu(
+ hw_priv->pdev, dma_buf->dma, packet_len + 4,
+ PCI_DMA_FROMDEVICE);
+
+ do {
+ /* skb->data != skb->head */
+ skb = dev_alloc_skb(packet_len + 2);
+ if (!skb) {
+ priv->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ /*
+ * Align socket buffer in 4-byte boundary for better
+ * performance.
+ */
+ skb_reserve(skb, 2);
+
+ memcpy(skb_put(skb, packet_len),
+ dma_buf->skb->data, packet_len);
+ } while (0);
+
+ skb->dev = dev;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
+ csum_verified(skb);
+
+ /* Update receive statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += packet_len;
+
+ /* Notify upper layer for received packet. */
+ dev->last_rx = jiffies;
+
+ rx_status = netif_rx(skb);
+
+ return 0;
+}
+
+static int dev_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int port_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int dev_rcv_special(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ /*
+ * Receive without error. With receive errors
+ * disabled, packets with receive errors will be
+ * dropped, so no need to check the error bit.
+ */
+ if (!status.rx.error || (status.data &
+ KS_DESC_RX_ERROR_COND) ==
+ KS_DESC_RX_ERROR_TOO_LONG) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ } else {
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* Update receive error statistics. */
+ priv->port.counter[OID_COUNTER_RCV_ERROR]++;
+ }
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static void rx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (!hw->enabled)
+ return;
+ if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
+
+ /* In case receive process is suspended because of overrun. */
+ hw_resume_rx(hw);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
+ spin_unlock_irq(&hw_priv->hwlock);
+ } else {
+ hw_ack_intr(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+}
+
+static void tx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_ack_intr(hw, KS884X_INT_TX_MASK);
+
+ tx_done(hw_priv);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_TX);
+ spin_unlock_irq(&hw_priv->hwlock);
+}
+
+static inline void handle_rx_stop(struct ksz_hw *hw)
+{
+ /* Receive just has been stopped. */
+ if (0 == hw->rx_stop)
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ else if (hw->rx_stop > 1) {
+ if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
+ hw_start_rx(hw);
+ } else {
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ hw->rx_stop = 0;
+ }
+ } else
+ /* Receive just has been started. */
+ hw->rx_stop++;
+}
+
+/**
+ * netdev_intr - interrupt handling
+ * @irq: Interrupt number.
+ * @dev_id: Network device.
+ *
+ * This function is called by upper network layer to signal interrupt.
+ *
+ * Return IRQ_HANDLED if interrupt is handled.
+ */
+static irqreturn_t netdev_intr(int irq, void *dev_id)
+{
+ uint int_enable = 0;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_read_intr(hw, &int_enable);
+
+ /* Not our interrupt! */
+ if (!int_enable)
+ return IRQ_NONE;
+
+ do {
+ hw_ack_intr(hw, int_enable);
+ int_enable &= hw->intr_mask;
+
+ if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
+ hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
+ tasklet_schedule(&hw_priv->tx_tasklet);
+ }
+
+ if (likely(int_enable & KS884X_INT_RX)) {
+ hw_dis_intr_bit(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
+ priv->stats.rx_fifo_errors++;
+ hw_resume_rx(hw);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_PHY)) {
+ struct ksz_port *port = &priv->port;
+
+ hw->features |= LINK_INT_WORKING;
+ port_get_link_speed(port);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
+ handle_rx_stop(hw);
+ break;
+ }
+
+ if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
+ u32 data;
+
+ hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
+ printk(KERN_INFO "Tx stopped\n");
+ data = readl(hw->io + KS_DMA_TX_CTRL);
+ if (!(data & DMA_TX_ENABLE))
+ printk(KERN_INFO "Tx disabled\n");
+ break;
+ }
+ } while (0);
+
+ hw_ena_intr(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Linux network device functions
+ */
+
+static unsigned long next_jiffies;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netdev_netpoll(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ hw_dis_intr(&hw_priv->hw);
+ netdev_intr(dev->irq, dev);
+}
+#endif
+
+static void bridge_change(struct ksz_hw *hw)
+{
+ int port;
+ u8 member;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* No ports in forwarding state. */
+ if (!sw->member) {
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ sw_block_addr(hw);
+ }
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
+ member = HOST_MASK | sw->member;
+ else
+ member = HOST_MASK | (1 << port);
+ if (member != sw->port_cfg[port].member)
+ sw_cfg_port_base_vlan(hw, port, member);
+ }
+}
+
+/**
+ * netdev_close - close network device
+ * @dev: Network device.
+ *
+ * This function process the close operation of network device. This is caused
+ * by the user command "ifconfig ethX down."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int pi;
+
+ netif_stop_queue(dev);
+
+ ksz_stop_timer(&priv->monitor_timer_info);
+
+ /* Need to shut the port manually in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
+
+ /* Port is closed. Need to change bridge setting. */
+ if (hw->features & STP_SUPPORT) {
+ pi = 1 << port->first_port;
+ if (hw->ksz_switch->member & pi) {
+ hw->ksz_switch->member &= ~pi;
+ bridge_change(hw);
+ }
+ }
+ }
+ if (port->first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ if (!hw_priv->wol_enable)
+ port_set_power_saving(port, true);
+
+ if (priv->multicast)
+ --hw->all_multi;
+ if (priv->promiscuous)
+ --hw->promiscuous;
+
+ hw_priv->opened--;
+ if (!(hw_priv->opened)) {
+ ksz_stop_timer(&hw_priv->mib_timer_info);
+ flush_work(&hw_priv->mib_read);
+
+ hw_dis_intr(hw);
+ hw_disable(hw);
+ hw_clr_multicast(hw);
+
+ /* Delay for receive task to stop scheduling itself. */
+ msleep(2000 / HZ);
+
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+ free_irq(dev->irq, hw_priv->dev);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+
+ /* Clean out static MAC table when the switch is shutdown. */
+ if (hw->features & STP_SUPPORT)
+ sw_clr_sta_mac_table(hw);
+ }
+
+ return 0;
+}
+
+static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
+{
+ if (hw->ksz_switch) {
+ u32 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ if (hw->features & RX_HUGE_FRAME)
+ data |= SWITCH_HUGE_PACKET;
+ else
+ data &= ~SWITCH_HUGE_PACKET;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ }
+ if (hw->features & RX_HUGE_FRAME) {
+ hw->rx_cfg |= DMA_RX_ERROR;
+ hw_priv->dev_rcv = dev_rcv_special;
+ } else {
+ hw->rx_cfg &= ~DMA_RX_ERROR;
+ if (hw->dev_count > 1)
+ hw_priv->dev_rcv = port_rcv_packets;
+ else
+ hw_priv->dev_rcv = dev_rcv_packets;
+ }
+}
+
+static int prepare_hardware(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int rc = 0;
+
+ /* Remember the network device that requests interrupts. */
+ hw_priv->dev = dev;
+ rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ return rc;
+ tasklet_enable(&hw_priv->rx_tasklet);
+ tasklet_enable(&hw_priv->tx_tasklet);
+
+ hw->promiscuous = 0;
+ hw->all_multi = 0;
+ hw->multi_list_size = 0;
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ hw_cfg_huge_frame(hw_priv, hw);
+ ksz_init_rx_buffers(hw_priv);
+ return 0;
+}
+
+/**
+ * netdev_open - open network device
+ * @dev: Network device.
+ *
+ * This function process the open operation of network device. This is caused
+ * by the user command "ifconfig ethX up."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int i;
+ int p;
+ int rc = 0;
+
+ priv->multicast = 0;
+ priv->promiscuous = 0;
+
+ /* Reset device statistics. */
+ memset(&priv->stats, 0, sizeof(struct net_device_stats));
+ memset((void *) port->counter, 0,
+ (sizeof(u64) * OID_COUNTER_LAST));
+
+ if (!(hw_priv->opened)) {
+ rc = prepare_hardware(dev);
+ if (rc)
+ return rc;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ if (next_jiffies < jiffies)
+ next_jiffies = jiffies + HZ * 2;
+ else
+ next_jiffies += HZ * 1;
+ hw_priv->counter[i].time = next_jiffies;
+ hw->port_mib[i].state = media_disconnected;
+ port_init_cnt(hw, i);
+ }
+ if (hw->ksz_switch)
+ hw->port_mib[HOST_PORT].state = media_connected;
+ else {
+ hw_add_wol_bcast(hw);
+ hw_cfg_wol_pme(hw, 0);
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ }
+ }
+ port_set_power_saving(port, false);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ /*
+ * Initialize to invalid value so that link detection
+ * is done.
+ */
+ hw->port_info[p].partner = 0xFF;
+ hw->port_info[p].state = media_disconnected;
+ }
+
+ /* Need to open the port in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
+ if (port->first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ }
+
+ port_get_link_speed(port);
+ if (port->force_link)
+ port_force_link_speed(port);
+ else
+ port_set_link_speed(port);
+
+ if (!(hw_priv->opened)) {
+ hw_setup_intr(hw);
+ hw_enable(hw);
+ hw_ena_intr(hw);
+
+ if (hw->mib_port_cnt)
+ ksz_start_timer(&hw_priv->mib_timer_info,
+ hw_priv->mib_timer_info.period);
+ }
+
+ hw_priv->opened++;
+
+ ksz_start_timer(&priv->monitor_timer_info,
+ priv->monitor_timer_info.period);
+
+ priv->media_state = port->linked->state;
+
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* RX errors = rx_errors */
+/* RX dropped = rx_dropped */
+/* RX overruns = rx_fifo_errors */
+/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
+/* TX errors = tx_errors */
+/* TX dropped = tx_dropped */
+/* TX overruns = tx_fifo_errors */
+/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
+/* collisions = collisions */
+
+/**
+ * netdev_query_statistics - query network device statistics
+ * @dev: Network device.
+ *
+ * This function returns the statistics of the network device. The device
+ * needs not be opened.
+ *
+ * Return network device statistics.
+ */
+static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &priv->adapter->hw;
+ struct ksz_port_mib *mib;
+ int i;
+ int p;
+
+ priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
+ priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
+
+ /* Reset to zero to add count later. */
+ priv->stats.multicast = 0;
+ priv->stats.collisions = 0;
+ priv->stats.rx_length_errors = 0;
+ priv->stats.rx_crc_errors = 0;
+ priv->stats.rx_frame_errors = 0;
+ priv->stats.tx_window_errors = 0;
+
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ mib = &hw->port_mib[p];
+
+ priv->stats.multicast += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_MULTICAST];
+
+ priv->stats.collisions += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
+
+ priv->stats.rx_length_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
+ mib->counter[MIB_COUNTER_RX_FRAGMENT] +
+ mib->counter[MIB_COUNTER_RX_OVERSIZE] +
+ mib->counter[MIB_COUNTER_RX_JABBER]);
+ priv->stats.rx_crc_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_CRC_ERR];
+ priv->stats.rx_frame_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
+ mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
+
+ priv->stats.tx_window_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * netdev_set_mac_address - set network device MAC address
+ * @dev: Network device.
+ * @addr: Buffer of MAC address.
+ *
+ * This function is used to set the MAC address of the network device.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct sockaddr *mac = addr;
+ uint interrupt;
+
+ if (priv->port.first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ else {
+ hw->mac_override = 1;
+ memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ }
+
+ memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+
+ interrupt = hw_block_intr(hw);
+
+ if (priv->port.first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ else
+ hw_set_addr(hw);
+ hw_restore_intr(hw, interrupt);
+
+ return 0;
+}
+
+static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_hw *hw, int promiscuous)
+{
+ if (promiscuous != priv->promiscuous) {
+ u8 prev_state = hw->promiscuous;
+
+ if (promiscuous)
+ ++hw->promiscuous;
+ else
+ --hw->promiscuous;
+ priv->promiscuous = promiscuous;
+
+ /* Turn on/off promiscuous mode. */
+ if (hw->promiscuous <= 1 && prev_state <= 1)
+ hw_set_promiscuous(hw, hw->promiscuous);
+
+ /*
+ * Port is not in promiscuous mode, meaning it is released
+ * from the bridge.
+ */
+ if ((hw->features & STP_SUPPORT) && !promiscuous &&
+ dev->br_port) {
+ struct ksz_switch *sw = hw->ksz_switch;
+ int port = priv->port.first_port;
+
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ port = 1 << port;
+ if (sw->member & port) {
+ sw->member &= ~port;
+ bridge_change(hw);
+ }
+ }
+ }
+}
+
+static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
+ int multicast)
+{
+ if (multicast != priv->multicast) {
+ u8 all_multi = hw->all_multi;
+
+ if (multicast)
+ ++hw->all_multi;
+ else
+ --hw->all_multi;
+ priv->multicast = multicast;
+
+ /* Turn on/off all multicast mode. */
+ if (hw->all_multi <= 1 && all_multi <= 1)
+ hw_set_multicast(hw, hw->all_multi);
+ }
+}
+
+/**
+ * netdev_set_rx_mode
+ * @dev: Network device.
+ *
+ * This routine is used to set multicast addresses or put the network device
+ * into promiscuous mode.
+ */
+static void netdev_set_rx_mode(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct dev_mc_list *mc_ptr;
+ int multicast = (dev->flags & IFF_ALLMULTI);
+
+ dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
+
+ if (hw_priv->hw.dev_count > 1)
+ multicast |= (dev->flags & IFF_MULTICAST);
+ dev_set_multicast(priv, hw, multicast);
+
+ /* Cannot use different hashes in multiple device interfaces mode. */
+ if (hw_priv->hw.dev_count > 1)
+ return;
+
+ if ((dev->flags & IFF_MULTICAST) && dev->mc_count) {
+ int i = 0;
+
+ /* List too big to support so turn on all multicast mode. */
+ if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (MAX_MULTICAST_LIST != hw->multi_list_size) {
+ hw->multi_list_size = MAX_MULTICAST_LIST;
+ ++hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ return;
+ }
+
+ for (mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ if (!(*mc_ptr->dmi_addr & 1))
+ continue;
+ if (i >= MAX_MULTICAST_LIST)
+ break;
+ memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
+ MAC_ADDR_LEN);
+ }
+ hw->multi_list_size = (u8) i;
+ hw_set_grp_addr(hw);
+ } else {
+ if (MAX_MULTICAST_LIST == hw->multi_list_size) {
+ --hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ hw->multi_list_size = 0;
+ hw_clr_multicast(hw);
+ }
+}
+
+static int netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int hw_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Cannot use different MTU in multiple device interfaces mode. */
+ if (hw->dev_count > 1)
+ if (dev != hw_priv->dev)
+ return 0;
+ if (new_mtu < 60)
+ return -EINVAL;
+
+ if (dev->mtu != new_mtu) {
+ hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
+ if (hw_mtu > MAX_RX_BUF_SIZE)
+ return -EINVAL;
+ if (hw_mtu > REGULAR_RX_BUF_SIZE) {
+ hw->features |= RX_HUGE_FRAME;
+ hw_mtu = MAX_RX_BUF_SIZE;
+ } else {
+ hw->features &= ~RX_HUGE_FRAME;
+ hw_mtu = REGULAR_RX_BUF_SIZE;
+ }
+ hw_mtu = (hw_mtu + 3) & ~3;
+ hw_priv->mtu = hw_mtu;
+ dev->mtu = new_mtu;
+ }
+ return 0;
+}
+
+/**
+ * netdev_ioctl - I/O control processing
+ * @dev: Network device.
+ * @ifr: Interface request structure.
+ * @cmd: I/O control code.
+ *
+ * This function is used to process I/O control calls.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int rc;
+ int result = 0;
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (down_interruptible(&priv->proc_sem))
+ return -ERESTARTSYS;
+
+ /* assume success */
+ rc = 0;
+ switch (cmd) {
+ /* Get address of MII PHY in use. */
+ case SIOCGMIIPHY:
+ data->phy_id = priv->id;
+
+ /* Fallthrough... */
+
+ /* Read MII PHY register. */
+ case SIOCGMIIREG:
+ if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_r_phy(hw, port->linked->port_id, data->reg_num,
+ &data->val_out);
+ break;
+
+ /* Write MII PHY register. */
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ result = -EPERM;
+ else if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_w_phy(hw, port->linked->port_id, data->reg_num,
+ data->val_in);
+ break;
+
+ default:
+ result = -EOPNOTSUPP;
+ }
+
+ up(&priv->proc_sem);
+
+ return result;
+}
+
+/*
+ * MII support
+ */
+
+/**
+ * mdio_read - read PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ *
+ * This function returns the PHY register value.
+ *
+ * Return the register value.
+ */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ u16 val_out;
+
+ hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
+ return val_out;
+}
+
+/**
+ * mdio_write - set PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ * @val: The register value.
+ *
+ * This procedure sets the PHY register value.
+ */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int pi;
+
+ for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
+ hw_w_phy(hw, pi, reg_num << 1, val);
+}
+
+/*
+ * ethtool support
+ */
+
+#define EEPROM_SIZE 0x40
+
+static u16 eeprom_data[EEPROM_SIZE] = { 0 };
+
+#define ADVERTISED_ALL \
+ (ADVERTISED_10baseT_Half | \
+ ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | \
+ ADVERTISED_100baseT_Full)
+
+/* These functions use the MII functions in mii.c. */
+
+/**
+ * netdev_get_settings - get network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function queries the PHY and returns its state in the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ mutex_lock(&hw_priv->lock);
+ mii_ethtool_gset(&priv->mii_if, cmd);
+ cmd->advertising |= SUPPORTED_TP;
+ mutex_unlock(&hw_priv->lock);
+
+ /* Save advertised settings for workaround in next function. */
+ priv->advertising = cmd->advertising;
+ return 0;
+}
+
+/**
+ * netdev_set_settings - set network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function sets the PHY according to the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ int rc;
+
+ /*
+ * ethtool utility does not change advertised setting if auto
+ * negotiation is not specified explicitly.
+ */
+ if (cmd->autoneg && priv->advertising == cmd->advertising) {
+ cmd->advertising |= ADVERTISED_ALL;
+ if (10 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half);
+ else if (100 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half);
+ if (0 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else if (1 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ mutex_lock(&hw_priv->lock);
+ if (cmd->autoneg &&
+ (cmd->advertising & ADVERTISED_ALL) ==
+ ADVERTISED_ALL) {
+ port->duplex = 0;
+ port->speed = 0;
+ port->force_link = 0;
+ } else {
+ port->duplex = cmd->duplex + 1;
+ if (cmd->speed != 1000)
+ port->speed = cmd->speed;
+ if (cmd->autoneg)
+ port->force_link = 0;
+ else
+ port->force_link = 1;
+ }
+ rc = mii_ethtool_sset(&priv->mii_if, cmd);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_nway_reset - restart auto-negotiation
+ * @dev: Network device.
+ *
+ * This function restarts the PHY for auto-negotiation.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ int rc;
+
+ mutex_lock(&hw_priv->lock);
+ rc = mii_nway_restart(&priv->mii_if);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_get_link - get network device link status
+ * @dev: Network device.
+ *
+ * This function gets the link status from the PHY.
+ *
+ * Return true if PHY is linked and false otherwise.
+ */
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int rc;
+
+ rc = mii_link_ok(&priv->mii_if);
+ return rc;
+}
+
+/**
+ * netdev_get_drvinfo - get network driver information
+ * @dev: Network device.
+ * @info: Ethtool driver info data structure.
+ *
+ * This procedure returns the driver information.
+ */
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(hw_priv->pdev));
+}
+
+/**
+ * netdev_get_regs_len - get length of register dump
+ * @dev: Network device.
+ *
+ * This function returns the length of the register dump.
+ *
+ * Return length of the register dump.
+ */
+static struct hw_regs {
+ int start;
+ int end;
+} hw_regs_range[] = {
+ { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
+ { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
+ { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
+ { KS884X_SIDER_P, KS8842_SGCR7_P },
+ { KS8842_MACAR1_P, KS8842_TOSR8_P },
+ { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
+ { 0, 0 }
+};
+
+static int netdev_get_regs_len(struct net_device *dev)
+{
+ struct hw_regs *range = hw_regs_range;
+ int regs_len = 0x10 * sizeof(u32);
+
+ while (range->end > range->start) {
+ regs_len += (range->end - range->start + 3) / 4 * 4;
+ range++;
+ }
+ return regs_len;
+}
+
+/**
+ * netdev_get_regs - get register dump
+ * @dev: Network device.
+ * @regs: Ethtool registers data structure.
+ * @ptr: Buffer to store the register values.
+ *
+ * This procedure dumps the register values in the provided buffer.
+ */
+static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int *buf = (int *) ptr;
+ struct hw_regs *range = hw_regs_range;
+ int len;
+
+ mutex_lock(&hw_priv->lock);
+ regs->version = 0;
+ for (len = 0; len < 0x40; len += 4) {
+ pci_read_config_dword(hw_priv->pdev, len, buf);
+ buf++;
+ }
+ while (range->end > range->start) {
+ for (len = range->start; len < range->end; len += 4) {
+ *buf = readl(hw->io + len);
+ buf++;
+ }
+ range++;
+ }
+ mutex_unlock(&hw_priv->lock);
+}
+
+#define WOL_SUPPORT \
+ (WAKE_PHY | WAKE_MAGIC | \
+ WAKE_UCAST | WAKE_MCAST | \
+ WAKE_BCAST | WAKE_ARP)
+
+/**
+ * netdev_get_wol - get Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This procedure returns Wake-on-LAN support.
+ */
+static void netdev_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ wol->supported = hw_priv->wol_support;
+ wol->wolopts = hw_priv->wol_enable;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/**
+ * netdev_set_wol - set Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This function sets Wake-on-LAN support.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ if (wol->wolopts & ~hw_priv->wol_support)
+ return -EINVAL;
+
+ hw_priv->wol_enable = wol->wolopts;
+
+ /* Link wakeup cannot really be disabled. */
+ if (wol->wolopts)
+ hw_priv->wol_enable |= WAKE_PHY;
+ hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
+ return 0;
+}
+
+/**
+ * netdev_get_msglevel - get debug message level
+ * @dev: Network device.
+ *
+ * This function returns current debug message level.
+ *
+ * Return current debug message flags.
+ */
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+/**
+ * netdev_set_msglevel - set debug message level
+ * @dev: Network device.
+ * @value: Debug message flags.
+ *
+ * This procedure sets debug message level.
+ */
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = value;
+}
+
+/**
+ * netdev_get_eeprom_len - get EEPROM length
+ * @dev: Network device.
+ *
+ * This function returns the length of the EEPROM.
+ *
+ * Return length of the EEPROM.
+ */
+static int netdev_get_eeprom_len(struct net_device *dev)
+{
+ return EEPROM_SIZE * 2;
+}
+
+/**
+ * netdev_get_eeprom - get EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Buffer to store the EEPROM data.
+ *
+ * This function dumps the EEPROM data in the provided buffer.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+#define EEPROM_MAGIC 0x10A18842
+
+static int netdev_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u8 *eeprom_byte = (u8 *) eeprom_data;
+ int i;
+ int len;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ eeprom->magic = EEPROM_MAGIC;
+ memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
+
+ return 0;
+}
+
+/**
+ * netdev_set_eeprom - write EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Data buffer.
+ *
+ * This function modifies the EEPROM data one byte at a time.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u16 eeprom_word[EEPROM_SIZE];
+ u8 *eeprom_byte = (u8 *) eeprom_word;
+ int i;
+ int len;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return 1;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
+ memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
+ for (i = 0; i < EEPROM_SIZE; i++)
+ if (eeprom_word[i] != eeprom_data[i]) {
+ eeprom_data[i] = eeprom_word[i];
+ eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * netdev_get_pauseparam - get flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This procedure returns the PAUSE control flow settings.
+ */
+static void netdev_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
+ if (!hw->ksz_switch) {
+ pause->rx_pause =
+ (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
+ pause->tx_pause =
+ (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
+ } else {
+ pause->rx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
+ pause->tx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
+ }
+}
+
+/**
+ * netdev_set_pauseparam - set flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This function sets the PAUSE control flow settings.
+ * Not implemented yet.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ mutex_lock(&hw_priv->lock);
+ if (pause->autoneg) {
+ if (!pause->rx_pause && !pause->tx_pause)
+ port->flow_ctrl = PHY_NO_FLOW_CTRL;
+ else
+ port->flow_ctrl = PHY_FLOW_CTRL;
+ hw->overrides &= ~PAUSE_FLOW_CTRL;
+ port->force_link = 0;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, 1);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, 1);
+ }
+ port_set_link_speed(port);
+ } else {
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, pause->rx_pause);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, pause->tx_pause);
+ } else
+ set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ return 0;
+}
+
+/**
+ * netdev_get_ringparam - get tx/rx ring parameters
+ * @dev: Network device.
+ * @pause: Ethtool RING settings data structure.
+ *
+ * This procedure returns the TX/RX ring settings.
+ */
+static void netdev_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ ring->tx_max_pending = (1 << 9);
+ ring->tx_pending = hw->tx_desc_info.alloc;
+ ring->rx_max_pending = (1 << 9);
+ ring->rx_pending = hw->rx_desc_info.alloc;
+}
+
+#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
+
+static struct {
+ char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[STATS_LEN] = {
+ { "rx_lo_priority_octets" },
+ { "rx_hi_priority_octets" },
+ { "rx_undersize_packets" },
+ { "rx_fragments" },
+ { "rx_oversize_packets" },
+ { "rx_jabbers" },
+ { "rx_symbol_errors" },
+ { "rx_crc_errors" },
+ { "rx_align_errors" },
+ { "rx_mac_ctrl_packets" },
+ { "rx_pause_packets" },
+ { "rx_bcast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_ucast_packets" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+
+ { "tx_lo_priority_octets" },
+ { "tx_hi_priority_octets" },
+ { "tx_late_collisions" },
+ { "tx_pause_packets" },
+ { "tx_bcast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_ucast_packets" },
+ { "tx_deferred" },
+ { "tx_total_collisions" },
+ { "tx_excessive_collisions" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+/**
+ * netdev_get_strings - get statistics identity strings
+ * @dev: Network device.
+ * @stringset: String set identifier.
+ * @buf: Buffer to store the strings.
+ *
+ * This procedure returns the strings used to identify the statistics.
+ */
+static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (ETH_SS_STATS == stringset)
+ memcpy(buf, &ethtool_stats_keys,
+ ETH_GSTRING_LEN * hw->mib_cnt);
+}
+
+/**
+ * netdev_get_sset_count - get statistics size
+ * @dev: Network device.
+ * @sset: The statistics set number.
+ *
+ * This function returns the size of the statistics to be reported.
+ *
+ * Return size of the statistics to be reported.
+ */
+static int netdev_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return hw->mib_cnt;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * netdev_get_ethtool_stats - get network device statistics
+ * @dev: Network device.
+ * @stats: Ethtool statistics data structure.
+ * @data: Buffer to store the statistics.
+ *
+ * This procedure returns the statistics.
+ */
+static void netdev_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int n_stats = stats->n_stats;
+ int i;
+ int n;
+ int p;
+ int rc;
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+
+ mutex_lock(&hw_priv->lock);
+ n = SWITCH_PORT_NUM;
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ if (media_connected == hw->port_mib[p].state) {
+ hw_priv->counter[p].read = 1;
+
+ /* Remember first port that requests read. */
+ if (n == SWITCH_PORT_NUM)
+ n = p;
+ }
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ if (n < SWITCH_PORT_NUM)
+ schedule_work(&hw_priv->mib_read);
+
+ if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
+ p = n;
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ } else
+ for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
+ if (0 == i) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 2);
+ } else if (hw->port_mib[p].cnt_ptr) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ }
+ }
+
+ get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
+ n = hw->mib_cnt;
+ if (n > n_stats)
+ n = n_stats;
+ n_stats -= n;
+ for (i = 0; i < n; i++)
+ *data++ = counter[i];
+}
+
+/**
+ * netdev_get_rx_csum - get receive checksum support
+ * @dev: Network device.
+ *
+ * This function gets receive checksum support setting.
+ *
+ * Return true if receive checksum is enabled; false otherwise.
+ */
+static u32 netdev_get_rx_csum(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ return hw->rx_cfg &
+ (DMA_RX_CSUM_UDP |
+ DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+}
+
+/**
+ * netdev_set_rx_csum - set receive checksum support
+ * @dev: Network device.
+ * @data: Zero to disable receive checksum support.
+ *
+ * This function sets receive checksum support setting.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ u32 new_setting = hw->rx_cfg;
+
+ if (data)
+ new_setting |=
+ (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ else
+ new_setting &=
+ ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ new_setting &= ~DMA_RX_CSUM_UDP;
+ mutex_lock(&hw_priv->lock);
+ if (new_setting != hw->rx_cfg) {
+ hw->rx_cfg = new_setting;
+ if (hw->enabled)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ }
+ mutex_unlock(&hw_priv->lock);
+ return 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_regs_len = netdev_get_regs_len,
+ .get_regs = netdev_get_regs,
+ .get_wol = netdev_get_wol,
+ .set_wol = netdev_set_wol,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_eeprom_len = netdev_get_eeprom_len,
+ .get_eeprom = netdev_get_eeprom,
+ .set_eeprom = netdev_set_eeprom,
+ .get_pauseparam = netdev_get_pauseparam,
+ .set_pauseparam = netdev_set_pauseparam,
+ .get_ringparam = netdev_get_ringparam,
+ .get_strings = netdev_get_strings,
+ .get_sset_count = netdev_get_sset_count,
+ .get_ethtool_stats = netdev_get_ethtool_stats,
+ .get_rx_csum = netdev_get_rx_csum,
+ .set_rx_csum = netdev_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+};
+
+/*
+ * Hardware monitoring
+ */
+
+static void update_link(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_port *port)
+{
+ if (priv->media_state != port->linked->state) {
+ priv->media_state = port->linked->state;
+ if (netif_running(dev)) {
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+ }
+ }
+}
+
+static void mib_read_work(struct work_struct *work)
+{
+ struct dev_info *hw_priv =
+ container_of(work, struct dev_info, mib_read);
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port_mib *mib;
+ int i;
+
+ next_jiffies = jiffies;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ mib = &hw->port_mib[i];
+
+ /* Reading MIB counters or requested to read. */
+ if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
+
+ /* Need to process receive interrupt. */
+ if (port_r_cnt(hw, i))
+ break;
+ hw_priv->counter[i].read = 0;
+
+ /* Finish reading counters. */
+ if (0 == mib->cnt_ptr) {
+ hw_priv->counter[i].read = 2;
+ wake_up_interruptible(
+ &hw_priv->counter[i].counter);
+ }
+ } else if (jiffies >= hw_priv->counter[i].time) {
+ /* Only read MIB counters when the port is connected. */
+ if (media_connected == mib->state)
+ hw_priv->counter[i].read = 1;
+ next_jiffies += HZ * 1 * hw->mib_port_cnt;
+ hw_priv->counter[i].time = next_jiffies;
+
+ /* Port is just disconnected. */
+ } else if (mib->link_down) {
+ mib->link_down = 0;
+
+ /* Read counters one last time after link is lost. */
+ hw_priv->counter[i].read = 1;
+ }
+ }
+}
+
+static void mib_monitor(unsigned long ptr)
+{
+ struct dev_info *hw_priv = (struct dev_info *) ptr;
+
+ mib_read_work(&hw_priv->mib_read);
+
+ /* This is used to verify Wake-on-LAN is working. */
+ if (hw_priv->pme_wait) {
+ if (hw_priv->pme_wait <= jiffies) {
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ hw_priv->pme_wait = 0;
+ }
+ } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
+
+ /* PME is asserted. Wait 2 seconds to clear it. */
+ hw_priv->pme_wait = jiffies + HZ * 2;
+ }
+
+ ksz_update_timer(&hw_priv->mib_timer_info);
+}
+
+/**
+ * dev_monitor - periodic monitoring
+ * @ptr: Network device pointer.
+ *
+ * This routine is run in a kernel timer to monitor the network device.
+ */
+static void dev_monitor(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ if (!(hw->features & LINK_INT_WORKING))
+ port_get_link_speed(port);
+ update_link(dev, priv, port);
+
+ ksz_update_timer(&priv->monitor_timer_info);
+}
+
+/*
+ * Linux network device interface functions
+ */
+
+/* Driver exported variables */
+
+static int msg_enable;
+
+static char *macaddr = ":";
+static char *mac1addr = ":";
+
+/*
+ * This enables multiple network device mode for KSZ8842, which contains a
+ * switch with two physical ports. Some users like to take control of the
+ * ports for running Spanning Tree Protocol. The driver will create an
+ * additional eth? device for the other port.
+ *
+ * Some limitations are the network devices cannot have different MTU and
+ * multicast hash tables.
+ */
+static int multi_dev;
+
+/*
+ * As most users select multiple network device mode to use Spanning Tree
+ * Protocol, this enables a feature in which most unicast and multicast packets
+ * are forwarded inside the switch and not passed to the host. Only packets
+ * that need the host's attention are passed to it. This prevents the host
+ * wasting CPU time to examine each and every incoming packets and do the
+ * forwarding itself.
+ *
+ * As the hack requires the private bridge header, the driver cannot compile
+ * with just the kernel headers.
+ *
+ * Enabling STP support also turns on multiple network device mode.
+ */
+static int stp;
+
+/*
+ * This enables fast aging in the KSZ8842 switch. Not sure what situation
+ * needs that. However, fast aging is used to flush the dynamic MAC table when
+ * STP suport is enabled.
+ */
+static int fast_aging;
+
+/**
+ * netdev_init - initalize network device.
+ * @dev: Network device.
+ *
+ * This function initializes the network device.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int __init netdev_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
+ dev_monitor, dev);
+
+ /* 500 ms timeout */
+ dev->watchdog_timeo = HZ / 2;
+
+ dev->features |= NETIF_F_IP_CSUM;
+
+ /*
+ * Hardware does not really support IPv6 checksum generation, but
+ * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK.
+ */
+ dev->features |= NETIF_F_IPV6_CSUM;
+ dev->features |= NETIF_F_SG;
+
+ sema_init(&priv->proc_sem, 1);
+
+ priv->mii_if.phy_id_mask = 0x1;
+ priv->mii_if.reg_num_mask = 0x7;
+ priv->mii_if.dev = dev;
+ priv->mii_if.mdio_read = mdio_read;
+ priv->mii_if.mdio_write = mdio_write;
+ priv->mii_if.phy_id = priv->port.first_port + 1;
+
+ priv->msg_enable = netif_msg_init(msg_enable,
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_init = netdev_init,
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_get_stats = netdev_query_statistics,
+ .ndo_start_xmit = netdev_tx,
+ .ndo_tx_timeout = netdev_tx_timeout,
+ .ndo_change_mtu = netdev_change_mtu,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_set_rx_mode = netdev_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = netdev_netpoll,
+#endif
+};
+
+static void netdev_free(struct net_device *dev)
+{
+ if (dev->watchdog_timeo)
+ unregister_netdev(dev);
+
+ free_netdev(dev);
+}
+
+struct platform_info {
+ struct dev_info dev_info;
+ struct net_device *netdev[SWITCH_PORT_NUM];
+};
+
+static int net_device_present;
+
+static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
+{
+ int i;
+ int j;
+ int got_num;
+ int num;
+
+ i = j = num = got_num = 0;
+ while (j < MAC_ADDR_LEN) {
+ if (macaddr[i]) {
+ got_num = 1;
+ if ('0' <= macaddr[i] && macaddr[i] <= '9')
+ num = num * 16 + macaddr[i] - '0';
+ else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
+ num = num * 16 + 10 + macaddr[i] - 'A';
+ else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
+ num = num * 16 + 10 + macaddr[i] - 'a';
+ else if (':' == macaddr[i])
+ got_num = 2;
+ else
+ break;
+ } else if (got_num)
+ got_num = 2;
+ else
+ break;
+ if (2 == got_num) {
+ if (MAIN_PORT == port) {
+ hw_priv->hw.override_addr[j++] = (u8) num;
+ hw_priv->hw.override_addr[5] +=
+ hw_priv->hw.id;
+ } else {
+ hw_priv->hw.ksz_switch->other_addr[j++] =
+ (u8) num;
+ hw_priv->hw.ksz_switch->other_addr[5] +=
+ hw_priv->hw.id;
+ }
+ num = got_num = 0;
+ }
+ i++;
+ }
+ if (MAC_ADDR_LEN == j) {
+ if (MAIN_PORT == port)
+ hw_priv->hw.mac_override = 1;
+ }
+}
+
+#define KS884X_DMA_MASK (~0x0UL)
+
+static void read_other_addr(struct ksz_hw *hw)
+{
+ int i;
+ u16 data[3];
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (i = 0; i < 3; i++)
+ data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
+ if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
+ sw->other_addr[5] = (u8) data[0];
+ sw->other_addr[4] = (u8)(data[0] >> 8);
+ sw->other_addr[3] = (u8) data[1];
+ sw->other_addr[2] = (u8)(data[1] >> 8);
+ sw->other_addr[1] = (u8) data[2];
+ sw->other_addr[0] = (u8)(data[2] >> 8);
+ }
+}
+
+#ifndef PCI_VENDOR_ID_MICREL_KS
+#define PCI_VENDOR_ID_MICREL_KS 0x16c6
+#endif
+
+static int __init pcidev_init(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct dev_priv *priv;
+ struct dev_info *hw_priv;
+ struct ksz_hw *hw;
+ struct platform_info *info;
+ struct ksz_port *port;
+ unsigned long reg_base;
+ unsigned long reg_len;
+ int cnt;
+ int i;
+ int mib_port_count;
+ int pi;
+ int port_count;
+ int result;
+ char banner[80];
+ struct ksz_switch *sw = NULL;
+
+ result = pci_enable_device(pdev);
+ if (result)
+ return result;
+
+ result = -ENODEV;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return result;
+
+ reg_base = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
+ return result;
+
+ if (!request_mem_region(reg_base, reg_len, DRV_NAME))
+ return result;
+ pci_set_master(pdev);
+
+ result = -ENOMEM;
+
+ info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!info)
+ goto pcidev_init_dev_err;
+ memset(info, 0, sizeof(struct platform_info));
+
+ hw_priv = &info->dev_info;
+ hw_priv->pdev = pdev;
+
+ hw = &hw_priv->hw;
+
+ hw->io = ioremap(reg_base, reg_len);
+ if (!hw->io)
+ goto pcidev_init_io_err;
+
+ cnt = hw_init(hw);
+ if (!cnt) {
+ if (msg_enable & NETIF_MSG_PROBE)
+ printk(KERN_ALERT "chip not detected\n");
+ result = -ENODEV;
+ goto pcidev_init_alloc_err;
+ }
+
+ sprintf(banner, "%s\n", version);
+ banner[13] = cnt + '0';
+ ks_info(hw_priv, "%s", banner);
+ ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+
+ /* Assume device is KSZ8841. */
+ hw->dev_count = 1;
+ port_count = 1;
+ mib_port_count = 1;
+ hw->addr_list_size = 0;
+ hw->mib_cnt = PORT_COUNTER_NUM;
+ hw->mib_port_cnt = 1;
+
+ /* KSZ8842 has a switch with multiple ports. */
+ if (2 == cnt) {
+ if (fast_aging)
+ hw->overrides |= FAST_AGING;
+
+ hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
+
+ /* Multiple network device interfaces are required. */
+ if (multi_dev) {
+ hw->dev_count = SWITCH_PORT_NUM;
+ hw->addr_list_size = SWITCH_PORT_NUM - 1;
+ }
+
+ /* Single network device has multiple ports. */
+ if (1 == hw->dev_count) {
+ port_count = SWITCH_PORT_NUM;
+ mib_port_count = SWITCH_PORT_NUM;
+ }
+ hw->mib_port_cnt = TOTAL_PORT_NUM;
+ hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ if (!hw->ksz_switch)
+ goto pcidev_init_alloc_err;
+ memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
+
+ sw = hw->ksz_switch;
+ }
+ for (i = 0; i < hw->mib_port_cnt; i++)
+ hw->port_mib[i].mib_start = 0;
+
+ hw->parent = hw_priv;
+
+ /* Default MTU is 1500. */
+ hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
+
+ if (ksz_alloc_mem(hw_priv))
+ goto pcidev_init_mem_err;
+
+ hw_priv->hw.id = net_device_present;
+
+ spin_lock_init(&hw_priv->hwlock);
+ mutex_init(&hw_priv->lock);
+
+ /* tasklet is enabled. */
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
+
+ /* tasklet_enable will decrement the atomic counter. */
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+
+ for (i = 0; i < TOTAL_PORT_NUM; i++)
+ init_waitqueue_head(&hw_priv->counter[i].counter);
+
+ if (macaddr[0] != ':')
+ get_mac_addr(hw_priv, macaddr, MAIN_PORT);
+
+ /* Read MAC address and initialize override address if not overrided. */
+ hw_read_addr(hw);
+
+ /* Multiple device interfaces mode requires a second MAC address. */
+ if (hw->dev_count > 1) {
+ memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ read_other_addr(hw);
+ if (mac1addr[0] != ':')
+ get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
+ }
+
+ hw_setup(hw);
+ if (hw->ksz_switch)
+ sw_setup(hw);
+ else {
+ hw_priv->wol_support = WOL_SUPPORT;
+ hw_priv->wol_enable = 0;
+ }
+
+ INIT_WORK(&hw_priv->mib_read, mib_read_work);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
+ mib_monitor, hw_priv);
+
+ for (i = 0; i < hw->dev_count; i++) {
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ if (!dev)
+ goto pcidev_init_reg_err;
+ info->netdev[i] = dev;
+
+ priv = netdev_priv(dev);
+ priv->adapter = hw_priv;
+ priv->id = net_device_present++;
+
+ port = &priv->port;
+ port->port_cnt = port_count;
+ port->mib_port_cnt = mib_port_count;
+ port->first_port = i;
+ port->flow_ctrl = PHY_FLOW_CTRL;
+
+ port->hw = hw;
+ port->linked = &hw->port_info[port->first_port];
+
+ for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
+ hw->port_info[pi].port_id = pi;
+ hw->port_info[pi].pdev = dev;
+ hw->port_info[pi].state = media_disconnected;
+ }
+
+ dev->mem_start = (unsigned long) hw->io;
+ dev->mem_end = dev->mem_start + reg_len - 1;
+ dev->irq = pdev->irq;
+ if (MAIN_PORT == i)
+ memcpy(dev->dev_addr, hw_priv->hw.override_addr,
+ MAC_ADDR_LEN);
+ else {
+ memcpy(dev->dev_addr, sw->other_addr,
+ MAC_ADDR_LEN);
+ if (!memcmp(sw->other_addr, hw->override_addr,
+ MAC_ADDR_LEN))
+ dev->dev_addr[5] += port->first_port;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ if (register_netdev(dev))
+ goto pcidev_init_reg_err;
+ port_set_power_saving(port, true);
+ }
+
+ pci_dev_get(hw_priv->pdev);
+ pci_set_drvdata(pdev, info);
+ return 0;
+
+pcidev_init_reg_err:
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ netdev_free(info->netdev[i]);
+ info->netdev[i] = NULL;
+ }
+ }
+
+pcidev_init_mem_err:
+ ksz_free_mem(hw_priv);
+ kfree(hw->ksz_switch);
+
+pcidev_init_alloc_err:
+ iounmap(hw->io);
+
+pcidev_init_io_err:
+ kfree(info);
+
+pcidev_init_dev_err:
+ release_mem_region(reg_base, reg_len);
+
+ return result;
+}
+
+static void pcidev_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+
+ pci_set_drvdata(pdev, NULL);
+
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ for (i = 0; i < hw_priv->hw.dev_count; i++) {
+ if (info->netdev[i])
+ netdev_free(info->netdev[i]);
+ }
+ if (hw_priv->hw.io)
+ iounmap(hw_priv->hw.io);
+ ksz_free_mem(hw_priv);
+ kfree(hw_priv->hw.ksz_switch);
+ pci_dev_put(hw_priv->pdev);
+ kfree(info);
+}
+
+#ifdef CONFIG_PM
+static int pcidev_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ if (hw_priv->wol_enable)
+ hw_cfg_wol_pme(hw, 0);
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netdev_open(dev);
+ netif_device_attach(dev);
+ }
+ }
+ }
+ return 0;
+}
+
+static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ netdev_close(dev);
+ }
+ }
+ }
+ if (hw_priv->wol_enable) {
+ hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
+ hw_cfg_wol_pme(hw, 1);
+ }
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+#endif
+
+static char pcidev_name[] = "ksz884xp";
+
+static struct pci_device_id pcidev_table[] = {
+ { PCI_VENDOR_ID_MICREL_KS, 0x8841,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MICREL_KS, 0x8842,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pcidev_table);
+
+static struct pci_driver pci_device_driver = {
+#ifdef CONFIG_PM
+ .suspend = pcidev_suspend,
+ .resume = pcidev_resume,
+#endif
+ .name = pcidev_name,
+ .id_table = pcidev_table,
+ .probe = pcidev_init,
+ .remove = pcidev_exit
+};
+
+static int __init ksz884x_init_module(void)
+{
+ return pci_register_driver(&pci_device_driver);
+}
+
+static void __exit ksz884x_cleanup_module(void)
+{
+ pci_unregister_driver(&pci_device_driver);
+}
+
+module_init(ksz884x_init_module);
+module_exit(ksz884x_cleanup_module);
+
+MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
+module_param(macaddr, charp, 0);
+module_param(mac1addr, charp, 0);
+module_param(fast_aging, int, 0);
+module_param(multi_dev, int, 0);
+module_param(stp, int, 0);
+MODULE_PARM_DESC(macaddr, "MAC address");
+MODULE_PARM_DESC(mac1addr, "Second MAC address");
+MODULE_PARM_DESC(fast_aging, "Fast aging");
+MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
+MODULE_PARM_DESC(stp, "STP support");
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 8d7d3d4625f..7b9447646f8 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1288,7 +1288,7 @@ static void set_multicast_list(struct net_device *dev)
} else {
short multicast_table[4];
int i;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
if(dev->flags&IFF_ALLMULTI)
num_addrs=1;
/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b60efd4bd01..371b58b1d15 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1380,21 +1380,21 @@ static void set_multicast_list(struct net_device *dev)
}
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT) {
cnt = MAX_MC_CNT;
printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
cmd = &dma->mc_cmd;
cmd->cmd.command = SWAP16(CmdMulticastList);
- cmd->mc_cnt = SWAP16(dev->mc_count * 6);
+ cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
for (dmi = dev->mc_list;
cnt && dmi != NULL;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index a8522bd73ae..8442c47e93e 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -232,7 +232,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_lock(&lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+ netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
* We must make the kernel realise we had to move
* into promisc mode or we start all out war on
@@ -242,9 +242,9 @@ static void temac_set_multicast_list(struct net_device *ndev)
ndev->flags |= IFF_PROMISC;
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mclist = ndev->mc_list;
- for (i = 0; mclist && i < ndev->mc_count; i++) {
+ for (i = 0; mclist && i < netdev_mc_count(ndev); i++) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index e20fefc73c8..b1f5d79af61 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1253,18 +1253,19 @@ static void set_multicast_list(struct net_device *dev) {
if (i596_debug > 1)
printk ("%s: set multicast list %d\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
+ netdev_mc_count(dev) * 6, GFP_ATOMIC);
if (cmd == NULL) {
printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
return;
}
cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
memcpy(cp, dmi,6);
@@ -1277,7 +1278,8 @@ static void set_multicast_list(struct net_device *dev) {
if (lp->set_conf.pa_next != I596_NULL) {
return;
}
- if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ if (netdev_mc_empty(dev) &&
+ !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
lp->i596_config[8] &= ~0x01;
} else {
lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 1d0d4d9ab62..7a5f89728a8 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -189,18 +189,11 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = NULL;
+ struct phy_device *phydev;
struct eth_platform_data *pdata;
- int phy_addr;
-
- /* find the first phy */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (bp->mii_bus->phy_map[phy_addr]) {
- phydev = bp->mii_bus->phy_map[phy_addr];
- break;
- }
- }
+ int ret;
+ phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
printk (KERN_ERR "%s: no PHY found\n", dev->name);
return -1;
@@ -210,17 +203,13 @@ static int macb_mii_probe(struct net_device *dev)
/* TODO : add pin_irq */
/* attach the mac to the phy */
- if (pdata && pdata->is_rmii) {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
- } else {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
- }
-
- if (IS_ERR(phydev)) {
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ pdata && pdata->is_rmii ?
+ PHY_INTERFACE_MODE_RMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
+ return ret;
}
/* mask with MAC supported features */
@@ -901,7 +890,7 @@ static void macb_sethashtable(struct net_device *dev)
mc_filter[0] = mc_filter[1] = 0;
curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, curr = curr->next) {
if (!curr) break; /* unexpected end of list */
bitnr = hash_get_index(curr->dmi_addr);
@@ -934,7 +923,7 @@ static void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, HRB, -1);
macb_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
macb_sethashtable(dev);
cfg |= MACB_BIT(NCFGR_MTI);
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index d9fbad38638..fdb0bbdd678 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -606,7 +606,7 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr);
j = crc >> 26; /* bit number in multicast_filter */
multicast_filter[j >> 3] |= 1 << (j & 7);
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 44f3c2896f2..740accbf080 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -518,7 +518,7 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
crc = ether_crc_le(6, dmi->dmi_addr);
j = crc >> 26; /* bit number in multicast_filter */
multicast_filter[j >> 3] |= 1 << (j & 7);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index ad1f6ef8930..fe7656bf68c 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -70,7 +70,8 @@ static struct cdev macvtap_cdev;
* exists.
*
* The callbacks from macvlan are always done with rcu_read_lock held
- * already, while in the file_operations, we get it ourselves.
+ * already. For calls from file_operations, we use the rcu_read_lock_bh
+ * to get a reference count on the socket and the device.
*
* When destroying a queue, we remove the pointers from the file and
* from the dev and then synchronize_rcu to make sure no thread is
@@ -159,13 +160,21 @@ static void macvtap_del_queues(struct net_device *dev)
static inline struct macvtap_queue *macvtap_file_get_queue(struct file *file)
{
+ struct macvtap_queue *q;
rcu_read_lock_bh();
- return rcu_dereference(file->private_data);
+ q = rcu_dereference(file->private_data);
+ if (q) {
+ sock_hold(&q->sk);
+ dev_hold(q->vlan->dev);
+ }
+ rcu_read_unlock_bh();
+ return q;
}
-static inline void macvtap_file_put_queue(void)
+static inline void macvtap_file_put_queue(struct macvtap_queue *q)
{
- rcu_read_unlock_bh();
+ sock_put(&q->sk);
+ dev_put(q->vlan->dev);
}
/*
@@ -314,8 +323,8 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
sock_writeable(&q->sk)))
mask |= POLLOUT | POLLWRNORM;
+ macvtap_file_put_queue(q);
out:
- macvtap_file_put_queue();
return mask;
}
@@ -366,8 +375,8 @@ static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
result = macvtap_get_user(q, iv, iov_length(iv, count),
file->f_flags & O_NONBLOCK);
+ macvtap_file_put_queue(q);
out:
- macvtap_file_put_queue();
return result;
}
@@ -398,10 +407,8 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
struct sk_buff *skb;
ssize_t len, ret = 0;
- if (!q) {
- ret = -ENOLINK;
- goto out;
- }
+ if (!q)
+ return -ENOLINK;
len = iov_length(iv, count);
if (len < 0) {
@@ -437,7 +444,7 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
remove_wait_queue(q->sk.sk_sleep, &wait);
out:
- macvtap_file_put_queue();
+ macvtap_file_put_queue(q);
return ret;
}
@@ -468,7 +475,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
if (!q)
return -ENOLINK;
memcpy(devname, q->vlan->dev->name, sizeof(devname));
- macvtap_file_put_queue();
+ macvtap_file_put_queue(q);
if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
put_user((TUN_TAP_DEV | TUN_NO_PI), &ifr->ifr_flags))
@@ -485,8 +492,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
q = macvtap_file_get_queue(file);
+ if (!q)
+ return -ENOLINK;
q->sk.sk_sndbuf = u;
- macvtap_file_put_queue();
+ macvtap_file_put_queue(q);
return 0;
case TUNSETOFFLOAD:
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 2d7b3bbfed0..c64e5b0d359 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2488,7 +2488,7 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
@@ -2496,7 +2496,7 @@ static void __set_rx_mode(struct net_device *dev)
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index dd45c7a9122..25f4414cc33 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -554,7 +554,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
return;
}
- if (netdev->mc_count == 0) {
+ if (netdev_mc_empty(netdev)) {
adapter->set_promisc(adapter,
NETXEN_NIU_NON_PROMISC_MODE);
netxen_nic_disable_mcast_filter(adapter);
@@ -563,7 +563,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > adapter->max_mc_count) {
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
netxen_nic_disable_mcast_filter(adapter);
return;
}
@@ -573,7 +573,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++)
netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr);
- if (index != netdev->mc_count)
+ if (index != netdev_mc_count(netdev))
printk(KERN_WARNING "%s: %s multicast address count mismatch\n",
netxen_nic_driver_name, netdev->name);
@@ -704,12 +704,12 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index b42f5e522f9..497c6d514a6 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -597,7 +597,7 @@ static int init586(struct net_device *dev)
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index ae19aafd2c7..9225c76cac4 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -849,7 +849,7 @@ static int ni65_lance_reinit(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
ni65_init_lance(p,dev->dev_addr,0xff,0x0);
else
ni65_init_lance(p,dev->dev_addr,0x00,0x00);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index af9a8647c7e..5e604e305d9 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3,6 +3,8 @@
* Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -33,7 +35,6 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "Nov 14, 2008"
@@ -89,21 +90,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "NIU debug level");
-#define niudbg(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_DEBUG PFX f, ## a); \
-} while (0)
-
-#define niuinfo(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_INFO PFX f, ## a); \
-} while (0)
-
-#define niuwarn(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_WARNING PFX f, ## a); \
-} while (0)
-
#define niu_lock_parent(np, flags) \
spin_lock_irqsave(&np->parent->lock, flags)
#define niu_unlock_parent(np, flags) \
@@ -135,10 +121,9 @@ static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
nw64_mac(reg, bits);
err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_mac(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_mac(reg));
return err;
}
@@ -175,10 +160,9 @@ static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_ipp(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_ipp(reg));
return err;
}
@@ -216,10 +200,9 @@ static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
nw64(reg, bits);
err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64(reg));
return err;
}
@@ -475,9 +458,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -486,9 +468,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -531,8 +512,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -569,9 +550,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -580,9 +560,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -639,9 +618,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- pr_info(PFX "NIU Port %u signal bits [%08x] are not "
- "[%08x] for 10G...trying 1G\n",
- np->port, (int) (sig & mask), (int) val);
+ pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
+ np->port, (int)(sig & mask), (int)val);
/* 10G failed, try initializing at 1G */
err = serdes_init_niu_1g_serdes(np);
@@ -649,8 +627,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES "
- "Link Failed \n", np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -764,9 +742,8 @@ static int esr_reset(struct niu *np)
if (err)
return err;
if (reset != 0) {
- dev_err(np->device, PFX "Port %u ESR_RESET "
- "did not clear [%08x]\n",
- np->port, reset);
+ netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
+ np->port, reset);
return -ENODEV;
}
@@ -890,8 +867,8 @@ static int serdes_init_10g(struct niu *np)
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
return 0;
}
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
@@ -1039,8 +1016,8 @@ static int serdes_init_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -1332,8 +1309,8 @@ static int bcm8704_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u PHY will not reset "
- "(bmcr=%04x)\n", np->port, (err & 0xffff));
+ netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
+ np->port, (err & 0xffff));
return -ENODEV;
}
return 0;
@@ -1515,21 +1492,18 @@ static int xcvr_diag_bcm870x(struct niu *np)
MII_STAT1000);
if (err < 0)
return err;
- pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
if (err < 0)
return err;
- pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
- np->port, err);
+ pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
MII_NWAYTEST);
if (err < 0)
return err;
- pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
#endif
/* XXX dig this out it might not be so useful XXX */
@@ -1555,11 +1529,11 @@ static int xcvr_diag_bcm870x(struct niu *np)
if (analog_stat0 != 0x03fc) {
if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
- pr_info(PFX "Port %u cable not connected "
- "or bad cable.\n", np->port);
+ pr_info("Port %u cable not connected or bad cable\n",
+ np->port);
} else if (analog_stat0 == 0x639c) {
- pr_info(PFX "Port %u optical module is bad "
- "or missing.\n", np->port);
+ pr_info("Port %u optical module is bad or missing\n",
+ np->port);
}
}
@@ -1699,8 +1673,8 @@ static int mii_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u MII would not reset, "
- "bmcr[%04x]\n", np->port, err);
+ netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
+ np->port, err);
return -ENODEV;
}
@@ -1895,7 +1869,7 @@ static int mii_init_common(struct niu *np)
return err;
bmsr = err;
- pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
+ pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
np->port, bmcr, bmsr);
#endif
@@ -1948,16 +1922,12 @@ static int niu_link_status_common(struct niu *np, int link_up)
unsigned long flags;
if (!netif_carrier_ok(dev) && link_up) {
- niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
- dev->name,
- (lp->active_speed == SPEED_10000 ?
- "10Gb/sec" :
- (lp->active_speed == SPEED_1000 ?
- "1Gb/sec" :
- (lp->active_speed == SPEED_100 ?
- "100Mbit/sec" : "10Mbit/sec"))),
- (lp->active_duplex == DUPLEX_FULL ?
- "full" : "half"));
+ netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
+ lp->active_speed == SPEED_10000 ? "10Gb/sec" :
+ lp->active_speed == SPEED_1000 ? "1Gb/sec" :
+ lp->active_speed == SPEED_100 ? "100Mbit/sec" :
+ "10Mbit/sec",
+ lp->active_duplex == DUPLEX_FULL ? "full" : "half");
spin_lock_irqsave(&np->lock, flags);
niu_init_xif(np);
@@ -1966,7 +1936,7 @@ static int niu_link_status_common(struct niu *np, int link_up)
netif_carrier_on(dev);
} else if (netif_carrier_ok(dev) && !link_up) {
- niuwarn(LINK, "%s: Link is down\n", dev->name);
+ netif_warn(np, link, dev, "Link is down\n");
spin_lock_irqsave(&np->lock, flags);
niu_handle_led(np, 0);
spin_unlock_irqrestore(&np->lock, flags);
@@ -2232,8 +2202,8 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
} else {
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
*link_up_p = 0;
- niuwarn(LINK, "%s: Hotplug PHY Removed\n",
- np->dev->name);
+ netif_warn(np, link, np->dev,
+ "Hotplug PHY Removed\n");
}
}
out:
@@ -2531,8 +2501,8 @@ static int serdes_init_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
- np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -3234,23 +3204,22 @@ static int fflp_early_init(struct niu *np)
parent = np->parent;
err = 0;
if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
- niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
- np->port);
if (np->parent->plat_type != PLAT_TYPE_NIU) {
fflp_reset(np);
fflp_set_timings(np);
err = fflp_disable_all_partitions(np);
if (err) {
- niudbg(PROBE, "fflp_disable_all_partitions "
- "failed, err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_disable_all_partitions failed, err=%d\n",
+ err);
goto out;
}
}
err = tcam_early_init(np);
if (err) {
- niudbg(PROBE, "tcam_early_init failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_early_init failed, err=%d\n", err);
goto out;
}
fflp_llcsnap_enable(np, 1);
@@ -3260,22 +3229,22 @@ static int fflp_early_init(struct niu *np)
err = tcam_flush_all(np);
if (err) {
- niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_flush_all failed, err=%d\n", err);
goto out;
}
if (np->parent->plat_type != PLAT_TYPE_NIU) {
err = fflp_hash_clear(np);
if (err) {
- niudbg(PROBE, "fflp_hash_clear failed, "
- "err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_hash_clear failed, err=%d\n",
+ err);
goto out;
}
}
vlan_tbl_clear(np);
- niudbg(PROBE, "fflp_early_init: Success\n");
parent->flags |= PARENT_FLGS_CLS_HWINIT;
}
out:
@@ -3665,8 +3634,8 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
cons = rp->cons;
- niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
- np->dev->name, pkt_cnt, cons);
+ netif_printk(np, tx_done, KERN_DEBUG, np->dev,
+ "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
while (pkt_cnt--)
cons = release_tx_packet(np, rp, cons);
@@ -3714,11 +3683,12 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_errors += misc & RXMISC_COUNT;
if (unlikely(misc & RXMISC_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "RXMISC discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
+ rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
- np->dev->name, rx_channel, misc, misc-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: MISC drop=%u over=%u\n",
+ rx_channel, misc, misc-limit);
}
/* WRED (Weighted Random Early Discard) by hardware */
@@ -3728,11 +3698,11 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
if (unlikely(wred & RED_DIS_CNT_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "WRED discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
- np->dev->name, rx_channel, wred, wred-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: WRED drop=%u over=%u\n",
+ rx_channel, wred, wred-limit);
}
}
@@ -3753,8 +3723,9 @@ static int niu_rx_work(struct napi_struct *napi, struct niu *np,
mbox->rx_dma_ctl_stat = 0;
mbox->rcrstat_a = 0;
- niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
- np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
+ netif_printk(np, rx_status, KERN_DEBUG, np->dev,
+ "%s(chan[%d]), stat[%llx] qlen=%d\n",
+ __func__, rp->rx_channel, (unsigned long long)stat, qlen);
rcr_done = work_done = 0;
qlen = min(qlen, budget);
@@ -3791,8 +3762,8 @@ static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
u32 rx_vec = (v0 & 0xffffffff);
int i, work_done = 0;
- niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
- np->dev->name, (unsigned long long) v0);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -3837,39 +3808,38 @@ static int niu_poll(struct napi_struct *napi, int budget)
static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
u64 stat)
{
- dev_err(np->device, PFX "%s: RX channel %u errors ( ",
- np->dev->name, rp->rx_channel);
+ netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
- printk("RBR_TMOUT ");
+ pr_cont("RBR_TMOUT ");
if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
- printk("RSP_CNT ");
+ pr_cont("RSP_CNT ");
if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
- printk("BYTE_EN_BUS ");
+ pr_cont("BYTE_EN_BUS ");
if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
- printk("RSP_DAT ");
+ pr_cont("RSP_DAT ");
if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
- printk("RCR_ACK ");
+ pr_cont("RCR_ACK ");
if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
- printk("RCR_SHA_PAR ");
+ pr_cont("RCR_SHA_PAR ");
if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
- printk("RBR_PRE_PAR ");
+ pr_cont("RBR_PRE_PAR ");
if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
- printk("CONFIG ");
+ pr_cont("CONFIG ");
if (stat & RX_DMA_CTL_STAT_RCRINCON)
- printk("RCRINCON ");
+ pr_cont("RCRINCON ");
if (stat & RX_DMA_CTL_STAT_RCRFULL)
- printk("RCRFULL ");
+ pr_cont("RCRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRFULL)
- printk("RBRFULL ");
+ pr_cont("RBRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
- printk("RBRLOGPAGE ");
+ pr_cont("RBRLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
- printk("CFIGLOGPAGE ");
+ pr_cont("CFIGLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
- printk("DC_FIDO ");
+ pr_cont("DC_FIDO ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
@@ -3883,9 +3853,9 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
err = -EINVAL;
if (err) {
- dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
- np->dev->name, rp->rx_channel,
- (unsigned long long) stat);
+ netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
+ rp->rx_channel,
+ (unsigned long long) stat);
niu_log_rxchan_errors(np, rp, stat);
}
@@ -3899,27 +3869,26 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
u64 cs)
{
- dev_err(np->device, PFX "%s: TX channel %u errors ( ",
- np->dev->name, rp->tx_channel);
+ netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
if (cs & TX_CS_MBOX_ERR)
- printk("MBOX ");
+ pr_cont("MBOX ");
if (cs & TX_CS_PKT_SIZE_ERR)
- printk("PKT_SIZE ");
+ pr_cont("PKT_SIZE ");
if (cs & TX_CS_TX_RING_OFLOW)
- printk("TX_RING_OFLOW ");
+ pr_cont("TX_RING_OFLOW ");
if (cs & TX_CS_PREF_BUF_PAR_ERR)
- printk("PREF_BUF_PAR ");
+ pr_cont("PREF_BUF_PAR ");
if (cs & TX_CS_NACK_PREF)
- printk("NACK_PREF ");
+ pr_cont("NACK_PREF ");
if (cs & TX_CS_NACK_PKT_RD)
- printk("NACK_PKT_RD ");
+ pr_cont("NACK_PKT_RD ");
if (cs & TX_CS_CONF_PART_ERR)
- printk("CONF_PART ");
+ pr_cont("CONF_PART ");
if (cs & TX_CS_PKT_PRT_ERR)
- printk("PKT_PTR ");
+ pr_cont("PKT_PTR ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
@@ -3930,12 +3899,11 @@ static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
- dev_err(np->device, PFX "%s: TX channel %u error, "
- "cs[%llx] logh[%llx] logl[%llx]\n",
- np->dev->name, rp->tx_channel,
- (unsigned long long) cs,
- (unsigned long long) logh,
- (unsigned long long) logl);
+ netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
+ rp->tx_channel,
+ (unsigned long long)cs,
+ (unsigned long long)logh,
+ (unsigned long long)logl);
niu_log_txchan_errors(np, rp, cs);
@@ -3954,9 +3922,8 @@ static int niu_mif_interrupt(struct niu *np)
phy_mdint = 1;
}
- dev_err(np->device, PFX "%s: MIF interrupt, "
- "stat[%llx] phy_mdint(%d)\n",
- np->dev->name, (unsigned long long) mif_status, phy_mdint);
+ netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
+ (unsigned long long)mif_status, phy_mdint);
return -ENODEV;
}
@@ -4081,41 +4048,40 @@ static int niu_mac_interrupt(struct niu *np)
static void niu_log_device_error(struct niu *np, u64 stat)
{
- dev_err(np->device, PFX "%s: Core device errors ( ",
- np->dev->name);
+ netdev_err(np->dev, "Core device errors ( ");
if (stat & SYS_ERR_MASK_META2)
- printk("META2 ");
+ pr_cont("META2 ");
if (stat & SYS_ERR_MASK_META1)
- printk("META1 ");
+ pr_cont("META1 ");
if (stat & SYS_ERR_MASK_PEU)
- printk("PEU ");
+ pr_cont("PEU ");
if (stat & SYS_ERR_MASK_TXC)
- printk("TXC ");
+ pr_cont("TXC ");
if (stat & SYS_ERR_MASK_RDMC)
- printk("RDMC ");
+ pr_cont("RDMC ");
if (stat & SYS_ERR_MASK_TDMC)
- printk("TDMC ");
+ pr_cont("TDMC ");
if (stat & SYS_ERR_MASK_ZCP)
- printk("ZCP ");
+ pr_cont("ZCP ");
if (stat & SYS_ERR_MASK_FFLP)
- printk("FFLP ");
+ pr_cont("FFLP ");
if (stat & SYS_ERR_MASK_IPP)
- printk("IPP ");
+ pr_cont("IPP ");
if (stat & SYS_ERR_MASK_MAC)
- printk("MAC ");
+ pr_cont("MAC ");
if (stat & SYS_ERR_MASK_SMX)
- printk("SMX ");
+ pr_cont("SMX ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_device_error(struct niu *np)
{
u64 stat = nr64(SYS_ERR_STAT);
- dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netdev_err(np->dev, "Core device error, stat[%llx]\n",
+ (unsigned long long)stat);
niu_log_device_error(np, stat);
@@ -4197,8 +4163,8 @@ static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
RX_DMA_CTL_STAT_RCRTO);
nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
- niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
}
static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
@@ -4206,8 +4172,8 @@ static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
{
rp->tx_cs = nr64(TX_CS(rp->tx_channel));
- niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
- np->dev->name, (unsigned long long) rp->tx_cs);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
}
static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
@@ -4265,8 +4231,8 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
u64 v0, v1, v2;
if (netif_msg_intr(np))
- printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
- lp, ldg);
+ printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
+ __func__, lp, ldg);
spin_lock_irqsave(&np->lock, flags);
@@ -4275,7 +4241,7 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
v2 = nr64(LDSV2(ldg));
if (netif_msg_intr(np))
- printk("v0[%llx] v1[%llx] v2[%llx]\n",
+ pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
(unsigned long long) v0,
(unsigned long long) v1,
(unsigned long long) v2);
@@ -4400,8 +4366,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4411,8 +4377,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rcr)
return -ENOMEM;
if ((unsigned long)rp->rcr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
+ rp->rcr);
return -EINVAL;
}
rp->rcr_table_size = MAX_RCR_RING_SIZE;
@@ -4424,8 +4390,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rbr)
return -ENOMEM;
if ((unsigned long)rp->rbr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
+ rp->rbr);
return -EINVAL;
}
rp->rbr_table_size = MAX_RBR_RING_SIZE;
@@ -4458,8 +4424,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4469,8 +4435,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->descr)
return -ENOMEM;
if ((unsigned long)rp->descr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA descr table %p\n", np->dev->name, rp->descr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
+ rp->descr);
return -EINVAL;
}
@@ -4726,10 +4692,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
TX_RNG_CFIG_STADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "DMA addr (%llx) is not aligned.\n",
- np->dev->name, channel,
- (unsigned long long) rp->descr_dma);
+ netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
+ channel, (unsigned long long)rp->descr_dma);
return -EINVAL;
}
@@ -4746,10 +4710,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "MBOX addr (%llx) is has illegal bits.\n",
- np->dev->name, channel,
- (unsigned long long) rp->mbox_dma);
+ netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
+ channel, (unsigned long long)rp->mbox_dma);
return -EINVAL;
}
nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
@@ -5146,9 +5108,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5160,9 +5121,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5527,8 +5487,7 @@ static int niu_reset_tx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
- "BTXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BTXMAC_SW_RST));
return -ENODEV;
@@ -5629,12 +5588,11 @@ static int niu_reset_rx_xmac(struct niu *np)
while (--limit >= 0) {
if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
XRXMAC_SW_RST_SOFT_RST)))
- break;
+ break;
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
- "XRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(XRXMAC_SW_RST));
return -ENODEV;
@@ -5655,8 +5613,7 @@ static int niu_reset_rx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
- "BRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BRXMAC_SW_RST));
return -ENODEV;
@@ -5960,11 +5917,9 @@ static void niu_disable_ipp(struct niu *np)
}
if (limit < 0 &&
(rd != 0 && wr != 1)) {
- dev_err(np->device, PFX "%s: IPP would not quiesce, "
- "rd_ptr[%llx] wr_ptr[%llx]\n",
- np->dev->name,
- (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
- (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
+ netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
+ (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
+ (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
}
val = nr64_ipp(IPP_CFIG);
@@ -5981,12 +5936,12 @@ static int niu_init_hw(struct niu *np)
{
int i, err;
- niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
niu_txc_enable_port(np, 1);
niu_txc_port_dma_enable(np, 1);
niu_txc_set_imask(np, 0);
- niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -5995,27 +5950,27 @@ static int niu_init_hw(struct niu *np)
return err;
}
- niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
err = niu_init_rx_channels(np);
if (err)
goto out_uninit_tx_channels;
- niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
err = niu_init_classifier_hw(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
err = niu_init_zcp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
err = niu_init_ipp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
err = niu_init_mac(np);
if (err)
goto out_uninit_ipp;
@@ -6023,16 +5978,16 @@ static int niu_init_hw(struct niu *np)
return 0;
out_uninit_ipp:
- niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
niu_disable_ipp(np);
out_uninit_rx_channels:
- niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
niu_stop_rx_channels(np);
niu_reset_rx_channels(np);
out_uninit_tx_channels:
- niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
niu_stop_tx_channels(np);
niu_reset_tx_channels(np);
@@ -6041,25 +5996,25 @@ out_uninit_tx_channels:
static void niu_stop_hw(struct niu *np)
{
- niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
niu_enable_interrupts(np, 0);
- niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
niu_enable_rx_mac(np, 0);
- niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
niu_disable_ipp(np);
- niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
niu_stop_tx_channels(np);
- niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
niu_stop_rx_channels(np);
- niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
niu_reset_tx_channels(np);
- niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
niu_reset_rx_channels(np);
}
@@ -6369,7 +6324,7 @@ static void niu_set_rx_mode(struct net_device *dev)
np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
if (dev->flags & IFF_PROMISC)
np->flags |= NIU_FLAGS_PROMISC;
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
+ if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
np->flags |= NIU_FLAGS_MCAST;
alt_cnt = netdev_uc_count(dev);
@@ -6384,14 +6339,12 @@ static void niu_set_rx_mode(struct net_device *dev)
netdev_for_each_uc_addr(ha, dev) {
err = niu_set_alt_mac(np, index, ha->addr);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "adding alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d adding alt mac %d\n",
+ err, index);
err = niu_enable_alt_mac(np, index, 1);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "enabling alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d enabling alt mac %d\n",
+ err, index);
index++;
}
@@ -6404,15 +6357,14 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = alt_start; i < niu_num_alt_addr(np); i++) {
err = niu_enable_alt_mac(np, i, 0);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "disabling alt mac %d\n",
- dev->name, err, i);
+ netdev_warn(dev, "Error %d disabling alt mac %d\n",
+ err, i);
}
}
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
for (addr = dev->mc_list; addr; addr = addr->next) {
u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
@@ -6570,7 +6522,7 @@ static void niu_tx_timeout(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
- dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
+ dev_err(np->device, "%s: Transmit timed out, resetting\n",
dev->name);
schedule_work(&np->reset_task);
@@ -6672,8 +6624,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_tx_stop_queue(txq);
- dev_err(np->device, PFX "%s: BUG! Tx ring full when "
- "queue awake!\n", dev->name);
+ dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
rp->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -7237,8 +7188,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
tp = &parent->tcam[idx];
if (!tp->valid) {
- pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n",
- parent->index, np->dev->name, (u16)nfc->fs.location, idx);
+ netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
+ parent->index, (u16)nfc->fs.location, idx);
return -EINVAL;
}
@@ -7248,8 +7199,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
ret = niu_class_to_ethflow(class, &fsp->flow_type);
if (ret < 0) {
- pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
+ parent->index);
ret = -EINVAL;
goto out;
}
@@ -7332,9 +7283,8 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
if (n_entries != cnt) {
/* print warning, this should not happen */
- pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, "
- "n_entries[%d] != cnt[%d]!!!\n\n",
- np->parent->index, np->dev->name, n_entries, cnt);
+ netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
+ np->parent->index, __func__, n_entries, cnt);
}
return 0;
@@ -7561,9 +7511,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
}
}
if (!add_usr_cls) {
- pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: "
- "Could not find/insert class for pid %d\n",
- parent->index, np->dev->name, uspec->proto);
+ netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
+ parent->index, __func__, uspec->proto);
ret = -EINVAL;
goto out;
}
@@ -7596,9 +7545,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
case AH_V6_FLOW:
case ESP_V6_FLOW:
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "flow %d for IPv6 not implemented\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
case IP_USER_FLOW:
@@ -7607,17 +7555,15 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
class);
} else {
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "usr flow for IPv6 not implemented\n\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
+ parent->index, __func__);
ret = -EINVAL;
goto out;
}
break;
default:
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Unknown flow type %d\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
}
@@ -7627,10 +7573,9 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
tp->assoc_data = TCAM_ASSOCDATA_DISC;
} else {
if (fsp->ring_cookie >= np->num_rx_rings) {
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Invalid RX ring %lld\n\n",
- parent->index, np->dev->name,
- (long long) fsp->ring_cookie);
+ netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
+ parent->index, __func__,
+ (long long)fsp->ring_cookie);
ret = -EINVAL;
goto out;
}
@@ -7699,10 +7644,9 @@ static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
}
}
if (i == NIU_L3_PROG_CLS) {
- pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry,"
- "Usr class 0x%llx not found \n",
- parent->index, np->dev->name,
- (unsigned long long) class);
+ netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
+ parent->index, __func__,
+ (unsigned long long)class);
ret = -EINVAL;
goto out;
}
@@ -8001,9 +7945,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
* won't get any interrupts and that's painful to debug.
*/
if (nr64(LDG_NUM(ldn)) != ldg) {
- dev_err(np->device, PFX "Port %u, mis-matched "
- "LDG assignment "
- "for ldn %d, should be %d is %llu\n",
+ dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
np->port, ldn, ldg,
(unsigned long long) nr64(LDG_NUM(ldn)));
return -EINVAL;
@@ -8056,7 +7998,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8071,7 +8013,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8152,8 +8094,9 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
s += i + 5;
sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
- niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
- vpd->fcode_major, vpd->fcode_minor);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: FCODE major(%d) minor(%d)\n",
+ vpd->fcode_major, vpd->fcode_minor);
if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
(vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
@@ -8173,8 +8116,8 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
#define FOUND_MASK_PHY 0x00000020
#define FOUND_MASK_ALL 0x0000003f
- niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n",
- start, end);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: start[%x] end[%x]\n", start, end);
while (start < end) {
int len, err, instance, type, prop_len;
char namebuf[64];
@@ -8228,8 +8171,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
}
if (max_len && prop_len > max_len) {
- dev_err(np->device, PFX "Property '%s' length (%d) is "
- "too long.\n", namebuf, prop_len);
+ dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
return -EINVAL;
}
@@ -8237,8 +8179,9 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
u32 off = start + 5 + err;
int i;
- niudbg(PROBE, "VPD_SCAN: Reading in property [%s] "
- "len[%d]\n", namebuf, prop_len);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: Reading in property [%s] len[%d]\n",
+ namebuf, prop_len);
for (i = 0; i < prop_len; i++)
*prop_buf++ = niu_pci_eeprom_read(np, off + i);
}
@@ -8402,8 +8345,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
u8 val8;
if (!is_valid_ether_addr(&vpd->local_mac[0])) {
- dev_err(np->device, PFX "VPD MAC invalid, "
- "falling back to SPROM.\n");
+ dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
@@ -8420,14 +8362,14 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
}
if (np->flags & NIU_FLAGS_10G)
- np->mac_xcvr = MAC_XCVR_XPCS;
+ np->mac_xcvr = MAC_XCVR_XPCS;
} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "Illegal phy string [%s].\n",
+ dev_err(np->device, "Illegal phy string [%s]\n",
np->vpd.phy_type);
- dev_err(np->device, PFX "Falling back to SPROM.\n");
+ dev_err(np->device, "Falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
}
@@ -8455,7 +8397,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->eeprom_len = len;
- niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Image size %llu\n", (unsigned long long)val);
sum = 0;
for (i = 0; i < len; i++) {
@@ -8465,10 +8408,10 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
sum += (val >> 16) & 0xff;
sum += (val >> 24) & 0xff;
}
- niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff));
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Checksum %x\n", (int)(sum & 0xff));
if ((sum & 0xff) != 0xab) {
- dev_err(np->device, PFX "Bad SPROM checksum "
- "(%x, should be 0xab)\n", (int) (sum & 0xff));
+ dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
return -EINVAL;
}
@@ -8491,11 +8434,12 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
ESPC_PHY_TYPE_PORT3_SHIFT;
break;
default:
- dev_err(np->device, PFX "Bogus port number %u\n",
+ dev_err(np->device, "Bogus port number %u\n",
np->port);
return -EINVAL;
}
- niudbg(PROBE, "SPROM: PHY type %x\n", val8);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: PHY type %x\n", val8);
switch (val8) {
case ESPC_PHY_TYPE_1G_COPPER:
@@ -8527,30 +8471,27 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8);
+ dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
return -EINVAL;
}
val = nr64(ESPC_MAC_ADDR0);
- niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
dev->perm_addr[0] = (val >> 0) & 0xff;
dev->perm_addr[1] = (val >> 8) & 0xff;
dev->perm_addr[2] = (val >> 16) & 0xff;
dev->perm_addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
- niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
dev->perm_addr[4] = (val >> 0) & 0xff;
dev->perm_addr[5] = (val >> 8) & 0xff;
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- dev_err(np->device, PFX "SPROM MAC address invalid\n");
- dev_err(np->device, PFX "[ \n");
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
+ dev->perm_addr);
return -EINVAL;
}
@@ -8562,8 +8503,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
val = nr64(ESPC_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 8 * 4)
return -EINVAL;
@@ -8578,8 +8519,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.model[val] = '\0';
val = nr64(ESPC_BD_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 4 * 4)
return -EINVAL;
@@ -8595,8 +8536,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.mac_num =
nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
- niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n",
- np->vpd.mac_num);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
return 0;
}
@@ -8629,8 +8570,6 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
}
}
- niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
- np->port, parent->num_ports);
if (np->port >= parent->num_ports)
return -ENODEV;
@@ -8659,14 +8598,12 @@ static int __devinit phy_record(struct niu_parent *parent,
pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
parent->index, id,
- (type == PHY_TYPE_PMA_PMD ?
- "PMA/PMD" :
- (type == PHY_TYPE_PCS ?
- "PCS" : "MII")),
+ type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
+ type == PHY_TYPE_PCS ? "PCS" : "MII",
phy_port);
if (p->cur[type] >= NIU_MAX_PORTS) {
- printk(KERN_ERR PFX "Too many PHY ports.\n");
+ pr_err("Too many PHY ports\n");
return -EINVAL;
}
idx = p->cur[type];
@@ -8727,8 +8664,7 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
parent->rxchan_per_port[i] = (16 / num_ports);
parent->txchan_per_port[i] = (16 / num_ports);
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8771,8 +8707,7 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
parent->rxchan_per_port[i] = rx_chans_per_1g;
parent->txchan_per_port[i] = tx_chans_per_1g;
}
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8781,23 +8716,20 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
}
if (tot_rx > NIU_NUM_RXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
parent->index, tot_rx);
for (i = 0; i < num_ports; i++)
parent->rxchan_per_port[i] = 1;
}
if (tot_tx > NIU_NUM_TXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
parent->index, tot_tx);
for (i = 0; i < num_ports; i++)
parent->txchan_per_port[i] = 1;
}
if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
- printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, "
- "RX[%d] TX[%d]\n",
- parent->index, tot_rx, tot_tx);
+ pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
+ parent->index, tot_rx, tot_tx);
}
}
@@ -8825,18 +8757,18 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
struct rdc_table *rt = &tp->tables[grp];
int slot;
- pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ",
+ pr_info("niu%d: Port %d RDC tbl(%d) [ ",
parent->index, i, tp->first_table_num + grp);
for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
rt->rxdma_channel[slot] =
rdc_channel_base + this_channel_offset;
- printk("%d ", rt->rxdma_channel[slot]);
+ pr_cont("%d ", rt->rxdma_channel[slot]);
if (++this_channel_offset == num_channels)
this_channel_offset = 0;
}
- printk("]\n");
+ pr_cont("]\n");
}
parent->rdc_default[i] = rdc_channel_base;
@@ -8996,8 +8928,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
break;
default:
- printk(KERN_ERR PFX "Unsupported port config "
- "10G[%d] 1G[%d]\n",
+ pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
num_10g, num_1g);
return -EINVAL;
}
@@ -9015,8 +8946,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
return 0;
unknown_vg_1g_port:
- printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n",
- lowest_1g);
+ pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
return -EINVAL;
}
@@ -9025,9 +8955,6 @@ static int __devinit niu_probe_ports(struct niu *np)
struct niu_parent *parent = np->parent;
int err, i;
- niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
- parent->port_phy);
-
if (parent->port_phy == PORT_PHY_UNKNOWN) {
err = walk_phys(np, parent);
if (err)
@@ -9048,9 +8975,6 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
{
struct niu_classifier *cp = &np->clas;
- niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
- np->parent->tcam_num_entries);
-
cp->tcam_top = (u16) np->port;
cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
cp->h1_init = 0xffffffff;
@@ -9116,8 +9040,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Port %u is invalid, cannot "
- "compute MAC block offset.\n", np->port);
+ dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
return -EINVAL;
}
@@ -9327,9 +9250,8 @@ static int __devinit niu_get_of_props(struct niu *np)
phy_type = of_get_property(dp, "phy-type", &prop_len);
if (!phy_type) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "phy-type property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks phy-type property\n",
+ dp->full_name);
return -EINVAL;
}
@@ -9339,34 +9261,26 @@ static int __devinit niu_get_of_props(struct niu *np)
strcpy(np->vpd.phy_type, phy_type);
if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "%s: Illegal phy string [%s].\n",
- dp->full_name, np->vpd.phy_type);
+ netdev_err(dev, "%s: Illegal phy string [%s]\n",
+ dp->full_name, np->vpd.phy_type);
return -EINVAL;
}
mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
if (!mac_addr) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "local-mac-address property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
+ dp->full_name);
return -EINVAL;
}
if (prop_len != dev->addr_len) {
- dev_err(np->device, PFX "%s: OF MAC address prop len (%d) "
- "is wrong.\n",
- dp->full_name, prop_len);
+ netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
+ dp->full_name, prop_len);
}
memcpy(dev->perm_addr, mac_addr, dev->addr_len);
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- int i;
-
- dev_err(np->device, PFX "%s: OF MAC address is invalid\n",
- dp->full_name);
- dev_err(np->device, PFX "%s: [ \n",
- dp->full_name);
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ netdev_err(dev, "%s: OF MAC address is invalid\n",
+ dp->full_name);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
return -EINVAL;
}
@@ -9414,8 +9328,8 @@ static int __devinit niu_get_invariants(struct niu *np)
nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
offset = niu_pci_vpd_offset(np);
- niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n",
- offset);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() VPD offset [%08x]\n", __func__, offset);
if (offset)
niu_pci_vpd_fetch(np, offset);
nw64(ESPC_PIO_EN, 0);
@@ -9575,8 +9489,6 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
struct niu_parent *p;
int i;
- niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
-
plat_dev = platform_device_register_simple("niu", niu_parent_index,
NULL, 0);
if (IS_ERR(plat_dev))
@@ -9641,9 +9553,6 @@ static struct niu_parent * __devinit niu_get_parent(struct niu *np,
struct niu_parent *p, *tmp;
int port = np->port;
- niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
- ptype, port);
-
mutex_lock(&niu_parent_lock);
p = NULL;
list_for_each_entry(tmp, &niu_parent_list, list) {
@@ -9681,7 +9590,8 @@ static void niu_put_parent(struct niu *np)
BUG_ON(!p || p->ports[port] != np);
- niudbg(PROBE, "niu_put_parent: port[%u]\n", port);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() port[%u]\n", __func__, port);
sprintf(port_name, "port%d", port);
@@ -9772,7 +9682,7 @@ static struct net_device * __devinit niu_alloc_and_init(
dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
if (!dev) {
- dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
return NULL;
}
@@ -9858,30 +9768,26 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, PFX "Cannot find proper PCI device "
- "base addresses, aborting.\n");
+ dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (pos <= 0) {
- dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
goto err_out_free_res;
}
@@ -9920,17 +9826,14 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HIGHDMA;
err = pci_set_consistent_dma_mask(pdev, dma_mask);
if (err) {
- dev_err(&pdev->dev, PFX "Unable to obtain 44 bit "
- "DMA for consistent allocations, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
goto err_out_release_parent;
}
}
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_out_release_parent;
}
}
@@ -9939,8 +9842,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
- dev_err(&pdev->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -9955,15 +9857,13 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&pdev->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
@@ -10157,7 +10057,7 @@ static int __devinit niu_of_probe(struct of_device *op,
reg = of_get_property(op->node, "reg", NULL);
if (!reg) {
- dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n",
+ dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
op->node->full_name);
return -ENODEV;
}
@@ -10186,8 +10086,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[1]),
"niu regs");
if (!np->regs) {
- dev_err(&op->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -10196,8 +10095,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[2]),
"niu vregs-1");
if (!np->vir_regs_1) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10206,8 +10104,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[3]),
"niu vregs-2");
if (!np->vir_regs_2) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10217,15 +10114,13 @@ static int __devinit niu_of_probe(struct of_device *op,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&op->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&op->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index a3b6aa0f375..8dd509c09bc 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1719,7 +1719,7 @@ static void ns83820_set_multicast(struct net_device *ndev)
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
- if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
+ if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev))
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 6fd8789ef48..3a0f910924a 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -493,8 +493,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
if (netdev->flags & IFF_MULTICAST) {
- if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
- || netdev->mc_count > available_cam_entries)
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > available_cam_entries)
multicast_mode = 2; /* 1 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
@@ -511,7 +511,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
}
if (multicast_mode == 0) {
- i = netdev->mc_count;
+ i = netdev_mc_count(netdev);
list = netdev->mc_list;
while (i--) {
octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 20273832bfc..bbdf0398c93 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1820,7 +1820,7 @@ static void netdrv_set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -1829,7 +1829,7 @@ static void netdrv_set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 98938ea9e0b..3d1d3a7b7ed 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1148,7 +1148,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 322e11df009..091e0b00043 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -886,7 +886,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
opts |= RxMulticast | RxProm;
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
opts |= RxMulticast;
outw(opts, ioaddr + EL3_CMD);
}
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7b17404d085..3d573ed5f7c 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -1187,19 +1187,19 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 12e3233868e..c42a31a97fa 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1481,8 +1481,8 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
@@ -1490,13 +1490,13 @@ static void set_multicast_list(struct net_device *dev)
#endif
/* Set multicast_num_addrs. */
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
/* Set multicast_ladrf. */
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
dmi = dmi->next;
BuildLAF(lp->multicast_ladrf, adr);
@@ -1537,15 +1537,15 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
}
#endif
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
restore_multicast_list(dev);
} /* set_multicast_list */
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 6dd486d2977..d2e86b8887c 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1638,8 +1638,8 @@ static void set_rx_mode(struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
- if (dev->mc_count) {
- fill_multicast_tbl(dev->mc_count, dev->mc_list,
+ if (!netdev_mc_empty(dev)) {
+ fill_multicast_tbl(netdev_mc_count(dev), dev->mc_list,
(u_char *)multicast_table);
}
rx_cfg_setting = RxStripCRC | RxEnable;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 466fc72698c..4ace18a7115 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1384,7 +1384,7 @@ set_addresses(struct net_device *dev)
if (++n > 9)
break;
i = 0;
- if (n > 1 && n <= dev->mc_count && dmi) {
+ if (n > 1 && n <= netdev_mc_count(dev) && dmi) {
dmi = dmi->next;
}
}
@@ -1394,7 +1394,7 @@ set_addresses(struct net_device *dev)
SelectPage(k);
}
- if (n && n <= dev->mc_count && dmi)
+ if (n && n <= netdev_mc_count(dev) && dmi)
addr = dmi->dmi_addr;
else
addr = dev->dev_addr;
@@ -1424,9 +1424,9 @@ set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* snoop */
PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
- } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) {
PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* the chip can filter 9 addresses perfectly */
PutByte(XIRCREG42_SWC1, value | 0x01);
SelectPage(0x40);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 0dc7ff896ee..3522794550d 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -2698,7 +2698,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 0c768593aad..c19dd4a6cd7 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -568,7 +568,7 @@ void gelic_net_set_multi(struct net_device *netdev)
status);
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) {
+ (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) {
status = lv1_net_add_multicast_address(bus_id(card),
dev_id(card),
0, 1);
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index dc6cd69d6d9..8ea7f869e29 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -447,12 +447,12 @@ void qlcnic_set_multi(struct net_device *netdev)
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
for (mc_ptr = netdev->mc_list; mc_ptr;
mc_ptr = mc_ptr->next) {
qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index ebfd17776b5..57d135e3bfa 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -19,14 +19,6 @@
#define DRV_VERSION "v1.00.00.23.00.00-01"
#define PFX "qlge: "
-#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
- do { \
- if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
- ; \
- else \
- dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
- "%s: " fmt, __func__, ##args); \
- } while (0)
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 57df835147e..ff8550d2ca8 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -443,8 +443,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower MAC address */
@@ -455,8 +455,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_MULTI_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower Mcast address */
@@ -479,8 +479,8 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
for (i = 0; i < 16; i++) {
status = ql_get_routing_reg(qdev, i, &value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of routing index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of routing index register.\n");
goto err;
} else {
*buf++ = value;
@@ -736,8 +736,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
int i;
if (!mpi_coredump) {
- QPRINTK(qdev, DRV, ERR,
- "No memory available.\n");
+ netif_err(qdev, drv, qdev->ndev, "No memory available.\n");
return -ENOMEM;
}
@@ -749,8 +748,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
status = ql_pause_mpi_risc(qdev);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed RISC pause. Status = 0x%.08x\n", status);
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC pause. Status = 0x%.08x\n", status);
goto err;
}
@@ -911,9 +910,9 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
status = ql_get_serdes_regs(qdev, mpi_coredump);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
- status);
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+ status);
goto err;
}
@@ -1177,16 +1176,16 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
/* clear the pause */
status = ql_unpause_mpi_risc(qdev);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed RISC unpause. Status = 0x%.08x\n", status);
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC unpause. Status = 0x%.08x\n", status);
goto err;
}
/* Reset the RISC so we can dump RAM */
status = ql_hard_reset_mpi_risc(qdev);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed RISC reset. Status = 0x%.08x\n", status);
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC reset. Status = 0x%.08x\n", status);
goto err;
}
@@ -1198,8 +1197,9 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
CODE_RAM_ADDR, CODE_RAM_CNT);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Dump of CODE RAM. Status = 0x%.08x\n", status);
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of CODE RAM. Status = 0x%.08x\n",
+ status);
goto err;
}
@@ -1212,8 +1212,9 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
MEMC_RAM_ADDR, MEMC_RAM_CNT);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Dump of MEMC RAM. Status = 0x%.08x\n", status);
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
+ status);
goto err;
}
err:
@@ -1225,21 +1226,19 @@ err:
static void ql_get_core_dump(struct ql_adapter *qdev)
{
if (!ql_own_firmware(qdev)) {
- QPRINTK(qdev, DRV, ERR, "%s: Don't own firmware!\n",
- qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
return;
}
if (!netif_running(qdev->ndev)) {
- QPRINTK(qdev, IFUP, ERR,
- "Force Coredump can only be done from interface "
- "that is up.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Force Coredump can only be done from interface that is up.\n");
return;
}
if (ql_mb_sys_err(qdev)) {
- QPRINTK(qdev, IFUP, ERR,
- "Fail force coredump with ql_mb_sys_err().\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Fail force coredump with ql_mb_sys_err().\n");
return;
}
}
@@ -1334,7 +1333,8 @@ void ql_mpi_core_to_log(struct work_struct *work)
count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
tmp = (u32 *)qdev->mpi_coredump;
- QPRINTK(qdev, DRV, DEBUG, "Core is dumping to log file!\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Core is dumping to log file!\n");
for (i = 0; i < count; i += 8) {
printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 058fa0a48c6..4f26afeb0f3 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -67,8 +67,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -89,8 +89,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -107,8 +107,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
spin_lock(&qdev->stats_lock);
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- QPRINTK(qdev, DRV, ERR,
- "Couldn't get xgmac sem.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get xgmac sem.\n");
goto quit;
}
/*
@@ -116,8 +116,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x200; i < 0x280; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -129,8 +130,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x300; i < 0x3d0; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -142,8 +144,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x500; i < 0x540; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -155,8 +158,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x568; i < 0x5a8; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -167,8 +171,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get RX NIC FIFO DROP statistics.
*/
if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n", i);
goto end;
} else
*iter = data;
@@ -396,14 +400,13 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
qdev->wol = wol->wolopts;
- QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
- qdev->wol, ndev->name);
+ netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
if (!qdev->wol) {
u32 wol = 0;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "cleared sucessfully" : "clear failed",
- wol, qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
+ status == 0 ? "cleared sucessfully" : "clear failed",
+ wol);
}
return 0;
@@ -534,8 +537,8 @@ static void ql_self_test(struct net_device *ndev,
}
clear_bit(QL_SELFTEST, &qdev->flags);
} else {
- QPRINTK(qdev, DRV, ERR,
- "%s: is down, Loopback test will fail.\n", ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "is down, Loopback test will fail.\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 3cb60e10d45..2c052caee88 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -128,7 +128,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
break;
default:
- QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
+ netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
return -EINVAL;
}
@@ -168,17 +168,17 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
/* check for errors */
if (temp & err_bit) {
- QPRINTK(qdev, PROBE, ALERT,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
+ netif_alert(qdev, probe, qdev->ndev,
+ "register 0x%.08x access error, value = 0x%.08x!.\n",
+ reg, temp);
return -EIO;
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
count--;
}
- QPRINTK(qdev, PROBE, ALERT,
- "Timed out waiting for reg %x to come ready.\n", reg);
+ netif_alert(qdev, probe, qdev->ndev,
+ "Timed out waiting for reg %x to come ready.\n", reg);
return -ETIMEDOUT;
}
@@ -221,7 +221,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
map = pci_map_single(qdev->pdev, ptr, size, direction);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
@@ -231,8 +231,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
status = ql_wait_cfg(qdev, bit);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for CFG to come ready.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for CFG to come ready.\n");
goto exit;
}
@@ -313,8 +313,8 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -371,12 +371,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- QPRINTK(qdev, IFUP, DEBUG,
- "Adding %s address %pM"
- " at index %d in the CAM.\n",
- ((type ==
- MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
- "UNICAST"), addr, index);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Adding %s address %pM at index %d in the CAM.\n",
+ type == MAC_ADDR_TYPE_MULTI_MAC ?
+ "MULTICAST" : "UNICAST",
+ addr, index);
status =
ql_wait_reg_rdy(qdev,
@@ -426,9 +425,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
- QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
- (enable_bit ? "Adding" : "Removing"),
- index, (enable_bit ? "to" : "from"));
+ netif_info(qdev, ifup, qdev->ndev,
+ "%s VLAN ID %d %s the CAM.\n",
+ enable_bit ? "Adding" : "Removing",
+ index,
+ enable_bit ? "to" : "from");
status =
ql_wait_reg_rdy(qdev,
@@ -443,8 +444,8 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
}
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -463,14 +464,13 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
if (set) {
addr = &qdev->ndev->dev_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Set Mac addr %pM\n", addr);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Clearing MAC address on %s\n",
- qdev->ndev->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Clearing MAC address\n");
}
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
@@ -479,23 +479,21 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
- "address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init mac address.\n");
return status;
}
void ql_link_on(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
ql_set_mac_addr(qdev, 1);
}
void ql_link_off(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
ql_set_mac_addr(qdev, 0);
}
@@ -532,27 +530,27 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
- QPRINTK(qdev, IFUP, DEBUG,
- "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
- (enable ? "Adding" : "Removing"),
- ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
- ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
- ((index ==
- RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
- ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
- ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
- ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
- ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
- ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
- ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
- ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
- ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
- ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
- ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
- ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
- ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
- ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
- (enable ? "to" : "from"));
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s %s mask %s the routing reg.\n",
+ enable ? "Adding" : "Removing",
+ index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
+ index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
+ index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
+ index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
+ index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
+ index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
+ index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
+ index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
+ index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
+ index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
+ index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
+ index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
+ index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
+ index == RT_IDX_UNUSED013 ? "UNUSED13" :
+ index == RT_IDX_UNUSED014 ? "UNUSED14" :
+ index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
+ "(Bad index != RT_IDX)",
+ enable ? "to" : "from");
switch (mask) {
case RT_IDX_CAM_HIT:
@@ -612,8 +610,8 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
break;
}
default:
- QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
- mask);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Mask type %d not yet supported.\n", mask);
status = -EPERM;
goto exit;
}
@@ -719,7 +717,7 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
status = strncmp((char *)&qdev->flash, str, 4);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
return status;
}
@@ -727,8 +725,8 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
csum += le16_to_cpu(*flash++);
if (csum)
- QPRINTK(qdev, IFUP, ERR,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Invalid flash checksum, csum = 0x%.04x.\n", csum);
return csum;
}
@@ -780,7 +778,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
}
@@ -789,7 +788,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8000) / sizeof(u16),
"8000");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -807,7 +806,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
- QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
@@ -841,7 +840,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
@@ -851,7 +851,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8012) / sizeof(u16),
"8012");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -969,17 +969,17 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
- QPRINTK(qdev, LINK, INFO,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
+ netif_info(qdev, link, qdev->ndev,
+ "Another function has the semaphore, so wait for the port init bit to come ready.\n");
status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
- QPRINTK(qdev, LINK, CRIT,
- "Port initialize timed out.\n");
+ netif_crit(qdev, link, qdev->ndev,
+ "Port initialize timed out.\n");
}
return status;
}
- QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
+ netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
@@ -1109,8 +1109,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
- QPRINTK(qdev, DRV, ERR,
- "page allocation failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "page allocation failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.offset = 0;
@@ -1120,8 +1120,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
- QPRINTK(qdev, DRV, ERR,
- "PCI mapping failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "PCI mapping failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.map = map;
@@ -1158,15 +1158,15 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->lbq_free_cnt > 32) {
for (i = 0; i < 16; i++) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- QPRINTK(qdev, IFUP, ERR,
- "Could not get a page chunk.\n");
- return;
- }
+ netif_err(qdev, ifup, qdev->ndev,
+ "Could not get a page chunk.\n");
+ return;
+ }
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
@@ -1191,9 +1191,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: updating prod idx = %d.\n",
+ rx_ring->lbq_prod_idx);
ql_write_db_reg(rx_ring->lbq_prod_idx,
rx_ring->lbq_prod_idx_db_reg);
}
@@ -1211,19 +1211,20 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->sbq_free_cnt > 16) {
for (i = 0; i < 16; i++) {
sbq_desc = &rx_ring->sbq[clean_idx];
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "sbq: getting new skb for index %d.\n",
+ sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, PROBE, ERR,
- "Couldn't get an skb.\n");
+ netif_err(qdev, probe, qdev->ndev,
+ "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1233,7 +1234,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->sbq_buf_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -1257,9 +1259,9 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: updating prod idx = %d.\n",
+ rx_ring->sbq_prod_idx);
ql_write_db_reg(rx_ring->sbq_prod_idx,
rx_ring->sbq_prod_idx_db_reg);
}
@@ -1291,8 +1293,9 @@ static void ql_unmap_send(struct ql_adapter *qdev,
* then its an OAL.
*/
if (i == 7) {
- QPRINTK(qdev, TX_DONE, DEBUG,
- "unmapping OAL area.\n");
+ netif_printk(qdev, tx_done, KERN_DEBUG,
+ qdev->ndev,
+ "unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
@@ -1301,8 +1304,8 @@ static void ql_unmap_send(struct ql_adapter *qdev,
maplen),
PCI_DMA_TODEVICE);
} else {
- QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
- i);
+ netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
+ "unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
@@ -1327,7 +1330,8 @@ static int ql_map_send(struct ql_adapter *qdev,
int frag_cnt = skb_shinfo(skb)->nr_frags;
if (frag_cnt) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "frag_cnt = %d.\n", frag_cnt);
}
/*
* Map the skb buffer first.
@@ -1336,8 +1340,8 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping failed with error: %d\n", err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping failed with error: %d\n", err);
return NETDEV_TX_BUSY;
}
@@ -1383,9 +1387,9 @@ static int ql_map_send(struct ql_adapter *qdev,
PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping outbound address list with error: %d\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
goto map_error;
}
@@ -1413,9 +1417,9 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping frags failed with error: %d.\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping frags failed with error: %d.\n",
+ err);
goto map_error;
}
@@ -1460,7 +1464,8 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
skb = napi_get_frags(napi);
if (!skb) {
- QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, exiting.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
@@ -1503,8 +1508,8 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
- QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
- "need to unwind!.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, need to unwind!.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
@@ -1516,8 +1521,8 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
- ib_mac_rsp->flags2);
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
rx_ring->rx_errors++;
goto err_out;
}
@@ -1526,14 +1531,15 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
* MTU since FCoE uses 2k frames.
*/
if (skb->len > ndev->mtu + ETH_HLEN) {
- QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Segment too small, dropping.\n");
rx_ring->rx_dropped++;
goto err_out;
}
memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers and data in large. Chain "
- "page to new skb and pull tail.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset+ETH_HLEN,
length-ETH_HLEN);
@@ -1550,8 +1556,8 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
@@ -1560,8 +1566,9 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
if (!(iph->frag_off &
cpu_to_be16(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
}
}
}
@@ -1600,8 +1607,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (new_skb == NULL) {
- QPRINTK(qdev, PROBE, ERR,
- "No skb available, drop the packet.\n");
+ netif_err(qdev, probe, qdev->ndev,
+ "No skb available, drop the packet.\n");
rx_ring->rx_dropped++;
return;
}
@@ -1611,8 +1618,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
- ib_mac_rsp->flags2);
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
dev_kfree_skb_any(skb);
rx_ring->rx_errors++;
return;
@@ -1637,16 +1644,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
prefetch(skb->data);
skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
- QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
rx_ring->rx_packets++;
rx_ring->rx_bytes += skb->len;
@@ -1660,8 +1669,8 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
@@ -1670,8 +1679,9 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
if (!(iph->frag_off &
cpu_to_be16(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
}
}
}
@@ -1725,7 +1735,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header of %d bytes in small buffer.\n", hdr_len);
/*
* Headers fit nicely into a small buffer.
*/
@@ -1744,15 +1755,16 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* Handle the data buffer(s).
*/
if (unlikely(!length)) { /* Is there data too? */
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No Data buffer in this packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No Data buffer in this packet.\n");
return skb;
}
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Headers in small, data of %d bytes in small, combine them.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Headers in small, data of %d bytes in small, combine them.\n",
+ length);
/*
* Data is less than small buffer size so it's
* stuffed in a small buffer.
@@ -1778,8 +1790,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
maplen),
PCI_DMA_FROMDEVICE);
} else {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes in a single small buffer.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes in a single small buffer.\n",
+ length);
sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
@@ -1794,18 +1807,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Header in small, %d bytes in large. Chain large to small!\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header in small, %d bytes in large. Chain large to small!\n",
+ length);
/*
* The data is in a single large buffer. We
* chain it to the header buffer's skb and let
* it rip.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Chaining page at offset = %d,"
- "for %d bytes to skb.\n",
- lbq_desc->p.pg_chunk.offset, length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Chaining page at offset = %d, for %d bytes to skb.\n",
+ lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
@@ -1821,8 +1834,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) {
- QPRINTK(qdev, PROBE, DEBUG,
- "No skb available, drop the packet.\n");
+ netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop the packet.\n");
return NULL;
}
pci_unmap_page(qdev->pdev,
@@ -1831,8 +1844,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
pci_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
skb_fill_page_desc(skb, 0,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1873,8 +1887,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* a local buffer and use it to find the
* pages to chain.
*/
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers & data in chain of large.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers & data in chain of large.\n",
+ length);
skb = sbq_desc->p.skb;
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
@@ -1884,9 +1899,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
size = (length < rx_ring->lbq_buf_size) ? length :
rx_ring->lbq_buf_size;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Adding page %d to skb for %d bytes.\n",
- i, size);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Adding page %d to skb for %d bytes.\n",
+ i, size);
skb_fill_page_desc(skb, i,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1916,16 +1931,16 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No skb available, drop packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop packet.\n");
rx_ring->rx_dropped++;
return;
}
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
- ib_mac_rsp->flags2);
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
dev_kfree_skb_any(skb);
rx_ring->rx_errors++;
return;
@@ -1950,17 +1965,18 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
prefetch(skb->data);
skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
rx_ring->rx_multicast++;
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
}
skb->protocol = eth_type_trans(skb, ndev);
@@ -1973,8 +1989,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
@@ -1983,8 +1999,8 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
if (!(iph->frag_off &
cpu_to_be16(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
}
}
}
@@ -2054,8 +2070,9 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
/* Free small buffer that holds the IAL */
lbq_desc = ql_get_curr_sbuf(rx_ring);
- QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
- length, qdev->ndev->mtu);
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Dropping frame, len %d > mtu %d\n",
+ length, qdev->ndev->mtu);
/* Unwind the large buffers for this frame. */
while (length > 0) {
@@ -2090,20 +2107,20 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
OB_MAC_IOCB_RSP_L |
OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Total descriptor length did not match transfer length.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Total descriptor length did not match transfer length.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too short to be legal, not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too short to be valid, not sent.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too long, but sent anyway.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too long, but sent anyway.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "PCI backplane error. Frame not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "PCI backplane error. Frame not sent.\n");
}
}
atomic_inc(&tx_ring->tx_count);
@@ -2133,33 +2150,35 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR,
- "Management Processor Fatal Error.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Management Processor Fatal Error.\n");
ql_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
- QPRINTK(qdev, LINK, ERR,
- "Multiple CAM hits lookup occurred.\n");
- QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
+ netif_err(qdev, link, qdev->ndev,
+ "Multiple CAM hits lookup occurred.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
- QPRINTK(qdev, RX_ERR, ERR,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netif_err(qdev, rx_err, qdev->ndev,
+ "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
default:
- QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
- ib_ae_rsp->event);
+ netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
+ ib_ae_rsp->event);
ql_queue_asic_error(qdev);
break;
}
@@ -2176,9 +2195,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
@@ -2189,9 +2208,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_process_mac_tx_intr(qdev, net_rsp);
break;
default:
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
}
count++;
ql_update_cq(rx_ring);
@@ -2223,9 +2242,9 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
rmb();
@@ -2241,11 +2260,10 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
net_rsp);
break;
default:
- {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- }
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ break;
}
count++;
ql_update_cq(rx_ring);
@@ -2266,8 +2284,8 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
- QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
- rx_ring->cq_id);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
* right after the RSS rings. */
@@ -2279,9 +2297,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing TX completion ring %d.\n",
- __func__, trx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing TX completion ring %d.\n",
+ __func__, trx_ring->cq_id);
ql_clean_outbound_rx_ring(trx_ring);
}
}
@@ -2291,9 +2309,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
*/
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing RX completion ring %d.\n",
- __func__, rx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing RX completion ring %d.\n",
+ __func__, rx_ring->cq_id);
work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
}
@@ -2310,12 +2328,13 @@ static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *gr
qdev->vlgrp = grp;
if (grp) {
- QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning on VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Turning off VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning off VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
@@ -2331,7 +2350,8 @@ static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
return;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -2348,7 +2368,8 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to clear vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -2377,7 +2398,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
spin_lock(&qdev->hw_lock);
if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "Shared Interrupt, Not ours!\n");
spin_unlock(&qdev->hw_lock);
return IRQ_NONE;
}
@@ -2390,10 +2412,11 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
- QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- QPRINTK(qdev, INTR, ERR,
- "Resetting chip. Error Status Register = 0x%x\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Resetting chip. Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -2406,7 +2429,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
- QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
+ netif_err(qdev, intr, qdev->ndev,
+ "Got MPI processor interrupt.\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
@@ -2421,8 +2445,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
var = ql_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
- QPRINTK(qdev, INTR, INFO,
- "Waking handler for rx_ring[0].\n");
+ netif_info(qdev, intr, qdev->ndev,
+ "Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
@@ -2519,9 +2543,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- QPRINTK(qdev, TX_QUEUED, INFO,
- "%s: shutting down tx queue %d du to lack of resources.\n",
- __func__, tx_ring_idx);
+ netif_info(qdev, tx_queued, qdev->ndev,
+ "%s: shutting down tx queue %d du to lack of resources.\n",
+ __func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
@@ -2542,8 +2566,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
- vlan_tx_tag_get(skb));
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
}
@@ -2557,8 +2581,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "Could not map the segments.\n");
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2569,8 +2593,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
- tx_ring->prod_idx, skb->len);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "tx queued, slot %d, len %d\n",
+ tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
return NETDEV_TX_OK;
@@ -2601,8 +2626,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev,
PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
if (qdev->rx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of RX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2610,8 +2635,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
if (qdev->tx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of TX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2665,7 +2690,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
if ((tx_ring->wq_base == NULL) ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
tx_ring->q =
@@ -2716,7 +2741,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
if (sbq_desc == NULL) {
- QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
@@ -2843,7 +2869,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->cq_base_dma);
if (rx_ring->cq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -2856,8 +2882,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->sbq_base_dma);
if (rx_ring->sbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue allocation failed.\n");
goto err_mem;
}
@@ -2868,8 +2894,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->sbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2885,8 +2911,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->lbq_base_dma);
if (rx_ring->lbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue allocation failed.\n");
goto err_mem;
}
/*
@@ -2896,8 +2922,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->lbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2926,10 +2952,10 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc = &tx_ring->q[i];
if (tx_ring_desc && tx_ring_desc->skb) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Freeing lost SKB %p, from queue %d, index %d.\n",
- tx_ring_desc->skb, j,
- tx_ring_desc->index);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Freeing lost SKB %p, from queue %d, index %d.\n",
+ tx_ring_desc->skb, j,
+ tx_ring_desc->index);
ql_unmap_send(qdev, tx_ring_desc,
tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
@@ -2960,16 +2986,16 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "RX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "RX resource allocation failed.\n");
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "TX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "TX resource allocation failed.\n");
goto err_mem;
}
}
@@ -3104,14 +3130,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
break;
default:
- QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
- rx_ring->type);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Invalid rx_ring->type = %d.\n", rx_ring->type);
}
- QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Initializing rx work queue.\n");
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
}
return err;
@@ -3157,10 +3184,11 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded WQICB.\n");
return err;
}
@@ -3214,15 +3242,15 @@ static void ql_enable_msix(struct ql_adapter *qdev)
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
- QPRINTK(qdev, IFUP, WARNING,
- "MSI-X Enable failed, trying MSI.\n");
+ netif_warn(qdev, ifup, qdev->ndev,
+ "MSI-X Enable failed, trying MSI.\n");
qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
} else if (err == 0) {
set_bit(QL_MSIX_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "MSI-X Enabled, got %d vectors.\n",
- qdev->intr_count);
+ netif_info(qdev, ifup, qdev->ndev,
+ "MSI-X Enabled, got %d vectors.\n",
+ qdev->intr_count);
return;
}
}
@@ -3231,13 +3259,14 @@ msi:
if (qlge_irq_type == MSI_IRQ) {
if (!pci_enable_msi(qdev->pdev)) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "Running with MSI interrupts.\n");
+ netif_info(qdev, ifup, qdev->ndev,
+ "Running with MSI interrupts.\n");
return;
}
}
qlge_irq_type = LEG_IRQ;
- QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Running with legacy interrupts.\n");
}
/* Each vector services 1 RSS ring and and 1 or more
@@ -3409,12 +3438,12 @@ static void ql_free_irq(struct ql_adapter *qdev)
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msix interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msix interrupt %d.\n", i);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msi interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msi interrupt %d.\n", i);
}
}
}
@@ -3439,32 +3468,33 @@ static int ql_request_irq(struct ql_adapter *qdev)
intr_context->name,
&qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed request for MSIX interrupt %d.\n",
- i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed request for MSIX interrupt %d.\n",
+ i);
goto err_irq;
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[i].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[i].type ==
- TX_Q ? "TX_Q" : "",
- qdev->rx_ring[i].type ==
- RX_Q ? "RX_Q" : "", intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[i].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[i].type == TX_Q ?
+ "TX_Q" :
+ qdev->rx_ring[i].type == RX_Q ?
+ "RX_Q" : "",
+ intr_context->name);
}
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "trying msi or legacy interrupts.\n");
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: irq = %d.\n", __func__, pdev->irq);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: context->name = %s.\n", __func__,
- intr_context->name);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: dev_id = 0x%p.\n", __func__,
- &qdev->rx_ring[0]);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "trying msi or legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: irq = %d.\n", __func__, pdev->irq);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: context->name = %s.\n", __func__,
+ intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: dev_id = 0x%p.\n", __func__,
+ &qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
test_bit(QL_MSI_ENABLED,
@@ -3474,20 +3504,20 @@ static int ql_request_irq(struct ql_adapter *qdev)
if (status)
goto err_irq;
- QPRINTK(qdev, IFUP, ERR,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[0].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
- intr_context->name);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[0].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
+ qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ intr_context->name);
}
intr_context->hooked = 1;
}
return status;
err_irq:
- QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
ql_free_irq(qdev);
return status;
}
@@ -3521,14 +3551,15 @@ static int ql_start_rss(struct ql_adapter *qdev)
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
- QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded RICB.\n");
return status;
}
@@ -3543,9 +3574,8 @@ static int ql_clear_routing_entries(struct ql_adapter *qdev)
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM "
- "packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
break;
}
}
@@ -3569,14 +3599,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for error packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for broadcast packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for broadcast packets.\n");
goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
@@ -3586,8 +3616,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
RT_IDX_RSS_MATCH, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for MATCH RSS packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for MATCH RSS packets.\n");
goto exit;
}
}
@@ -3595,8 +3625,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
@@ -3614,13 +3644,13 @@ int ql_cam_route_initialize(struct ql_adapter *qdev)
set &= qdev->port_link_up;
status = ql_set_mac_addr(qdev, set);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
status = ql_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
@@ -3685,8 +3715,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start rx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start rx ring[%d].\n", i);
return status;
}
}
@@ -3697,7 +3727,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
if (qdev->rss_ring_count > 1) {
status = ql_start_rss(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
}
}
@@ -3706,8 +3736,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->tx_ring_count; i++) {
status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start tx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start tx ring[%d].\n", i);
return status;
}
}
@@ -3715,20 +3745,20 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = 0; i < qdev->rss_ring_count; i++) {
- QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
- i);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Enabling NAPI for rx_ring[%d].\n", i);
napi_enable(&qdev->rx_ring[i].napi);
}
@@ -3745,7 +3775,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
}
@@ -3768,8 +3798,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
} while (time_before(jiffies, end_jiffies));
if (value & RST_FO_FR) {
- QPRINTK(qdev, IFDOWN, ERR,
- "ETIMEDOUT!!! errored out of resetting the chip!\n");
+ netif_err(qdev, ifdown, qdev->ndev,
+ "ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
@@ -3782,16 +3812,17 @@ static void ql_display_dev_info(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- QPRINTK(qdev, PROBE, INFO,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
- "XG Roll = %d, XG Rev = %d.\n",
- qdev->func,
- qdev->port,
- qdev->chip_rev_id & 0x0000000f,
- qdev->chip_rev_id >> 4 & 0x0000000f,
- qdev->chip_rev_id >> 8 & 0x0000000f,
- qdev->chip_rev_id >> 12 & 0x0000000f);
- QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
+ netif_info(qdev, probe, qdev->ndev,
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
+ "XG Roll = %d, XG Rev = %d.\n",
+ qdev->func,
+ qdev->port,
+ qdev->chip_rev_id & 0x0000000f,
+ qdev->chip_rev_id >> 4 & 0x0000000f,
+ qdev->chip_rev_id >> 8 & 0x0000000f,
+ qdev->chip_rev_id >> 12 & 0x0000000f);
+ netif_info(qdev, probe, qdev->ndev,
+ "MAC address %pM\n", ndev->dev_addr);
}
int ql_wol(struct ql_adapter *qdev)
@@ -3808,23 +3839,23 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST)) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
- qdev->wol);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+ qdev->wol);
return -EINVAL;
}
if (qdev->wol & WAKE_MAGIC) {
status = ql_mb_wol_set_magic(qdev, 1);
if (status) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Failed to set magic packet on %s.\n",
- qdev->ndev->name);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Failed to set magic packet on %s.\n",
+ qdev->ndev->name);
return status;
} else
- QPRINTK(qdev, DRV, INFO,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
@@ -3832,9 +3863,10 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "Sucessfully set" : "Failed", wol,
- qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "Sucessfully set" : "Failed",
+ wol, qdev->ndev->name);
}
return status;
@@ -3875,8 +3907,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
status = ql_adapter_reset(qdev);
if (status)
- QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
- qdev->func);
+ netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
+ qdev->func);
return status;
}
@@ -3886,7 +3918,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
err = ql_adapter_initialize(qdev);
if (err) {
- QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
+ netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
@@ -3918,7 +3950,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
int status = 0;
if (ql_alloc_mem_resources(qdev)) {
- QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
status = ql_request_irq(qdev);
@@ -3934,7 +3966,7 @@ static int qlge_close(struct net_device *ndev)
* brought the adapter down.
*/
if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
- QPRINTK(qdev, DRV, ERR, "EEH fatal did unload.\n");
+ netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
clear_bit(QL_EEH_FATAL, &qdev->flags);
return 0;
}
@@ -4008,9 +4040,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- QPRINTK(qdev, IFUP, DEBUG,
- "lbq_buf_size %d, order = %d\n",
- rx_ring->lbq_buf_size, qdev->lbq_buf_order);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "lbq_buf_size %d, order = %d\n",
+ rx_ring->lbq_buf_size,
+ qdev->lbq_buf_order);
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
@@ -4074,14 +4107,14 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- QPRINTK(qdev, IFUP, ERR,
- "Waiting for adapter UP...\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for adapter UP\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
@@ -4107,8 +4140,8 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
return status;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device.\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
@@ -4120,9 +4153,9 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
int status;
if (ndev->mtu == 1500 && new_mtu == 9000) {
- QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
- QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
} else
return -EINVAL;
@@ -4137,8 +4170,8 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
status = ql_change_rx_buffers(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Changing MTU failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Changing MTU failed.\n");
}
return status;
@@ -4198,8 +4231,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set promiscous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -4208,8 +4241,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear promiscous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -4221,12 +4254,12 @@ static void qlge_set_multicast_list(struct net_device *ndev)
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+ (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set all-multi mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -4235,15 +4268,15 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear all-multi mode.\n");
} else {
clear_bit(QL_ALLMULTI, &qdev->flags);
}
}
}
- if (ndev->mc_count) {
+ if (!netdev_mc_empty(ndev)) {
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
@@ -4251,16 +4284,16 @@ static void qlge_set_multicast_list(struct net_device *ndev)
i++, mc_ptr = mc_ptr->next)
if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to loadmulticast address.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to loadmulticast address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (ql_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set multicast match mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set multicast match mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -4285,7 +4318,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
if (status)
- QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+ netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
@@ -4318,8 +4351,8 @@ static void ql_asic_reset_work(struct work_struct *work)
rtnl_unlock();
return;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
@@ -4578,7 +4611,7 @@ static void ql_timer(unsigned long data)
var = ql_read32(qdev, STS);
if (pci_channel_offline(qdev->pdev)) {
- QPRINTK(qdev, IFUP, ERR, "EEH STS = 0x%.08x.\n", var);
+ netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
return;
}
@@ -4747,14 +4780,14 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
- QPRINTK(qdev, IFUP, ERR,
- "Cannot re-enable PCI device after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
if (ql_adapter_reset(qdev)) {
- QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
+ netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -4771,13 +4804,13 @@ static void qlge_io_resume(struct pci_dev *pdev)
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
- QPRINTK(qdev, IFUP, ERR,
- "Device initialization failed after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device initialization failed after reset.\n");
return;
}
} else {
- QPRINTK(qdev, IFUP, ERR,
- "Device was not running prior to EEH.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device was not running prior to EEH.\n");
}
qdev->timer.expires = jiffies + (5*HZ);
add_timer(&qdev->timer);
@@ -4828,7 +4861,7 @@ static int qlge_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
+ netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2c846f17fc..3c00462a5d2 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -135,7 +135,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
}
}
@@ -208,7 +208,7 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
- QPRINTK(qdev, DRV, ERR, "Enter!\n");
+ netif_err(qdev, drv, qdev->ndev, "Enter!\n");
/* Get the status data and start up a thread to
* handle the request.
*/
@@ -216,8 +216,8 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
} else {
/* Begin polled mode early so
@@ -240,8 +240,8 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting RISC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting RISC!\n");
ql_queue_fw_error(qdev);
} else
/* Wake up the sleeping mpi_idc_work thread that is
@@ -259,13 +259,13 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "%s: Could not get mailbox status.\n", __func__);
+ netif_err(qdev, drv, qdev->ndev,
+ "%s: Could not get mailbox status.\n", __func__);
return;
}
qdev->link_status = mbcp->mbox_out[1];
- QPRINTK(qdev, DRV, ERR, "Link Up.\n");
+ netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
/* If we're coming back from an IDC event
* then set up the CAM and frame routing.
@@ -273,8 +273,8 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return;
} else
clear_bit(QL_CAM_RT_SET, &qdev->flags);
@@ -285,7 +285,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* to our liking.
*/
if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
- QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n");
+ netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
set_bit(QL_PORT_CFG, &qdev->flags);
/* Begin polled mode early so
* we don't get another interrupt
@@ -307,7 +307,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
ql_link_off(qdev);
}
@@ -320,9 +320,9 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
return status;
}
@@ -335,9 +335,9 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
return status;
}
@@ -350,13 +350,13 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
else {
int i;
- QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
for (i = 0; i < mbcp->out_count; i++)
- QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
+ netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
}
@@ -371,15 +371,15 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+ netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
} else {
- QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
+ netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
+ mbcp->mbox_out[1]);
qdev->fw_rev_id = mbcp->mbox_out[1];
status = ql_cam_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
}
}
@@ -398,8 +398,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->out_count = 1;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
goto end;
}
@@ -488,15 +488,14 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->mbox_out[0] = MB_CMD_STS_ERR;
return status;
}
- QPRINTK(qdev, DRV, ERR,
- "Firmware initialization failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware initialization failed.\n");
status = -EIO;
ql_queue_fw_error(qdev);
break;
case AEN_SYS_ERR:
- QPRINTK(qdev, DRV, ERR,
- "System Error.\n");
+ netif_err(qdev, drv, qdev->ndev, "System Error.\n");
ql_queue_fw_error(qdev);
status = -EIO;
break;
@@ -509,8 +508,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* Need to support AEN 8110 */
break;
default:
- QPRINTK(qdev, DRV, ERR,
- "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
/* Clear the MPI firmware status. */
}
end:
@@ -583,8 +582,8 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
goto done;
} while (time_before(jiffies, count));
- QPRINTK(qdev, DRV, ERR,
- "Timed out waiting for mailbox complete.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Timed out waiting for mailbox complete.\n");
status = -ETIMEDOUT;
goto end;
@@ -646,8 +645,8 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed about firmware command\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed about firmware command\n");
status = -EIO;
}
@@ -678,8 +677,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Firmware State.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Firmware State.\n");
status = -EIO;
}
@@ -688,8 +687,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
* happen.
*/
if (mbcp->mbox_out[1] & 1) {
- QPRINTK(qdev, DRV, ERR,
- "Firmware waiting for initialization.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware waiting for initialization.\n");
status = -EIO;
}
@@ -721,8 +720,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed IDC ACK send.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
status = -EIO;
}
return status;
@@ -753,11 +751,11 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
- QPRINTK(qdev, DRV, ERR,
- "Port Config sent, wait for IDC.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Port Config sent, wait for IDC.\n");
} else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Set Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Set Port Configuration.\n");
status = -EIO;
}
return status;
@@ -791,8 +789,7 @@ int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to dump risc RAM.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
status = -EIO;
}
return status;
@@ -842,12 +839,12 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Port Configuration.\n");
status = -EIO;
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "Passed Get Port Configuration.\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Passed Get Port Configuration.\n");
qdev->link_config = mbcp->mbox_out[1];
qdev->max_frame_size = mbcp->mbox_out[2];
}
@@ -874,8 +871,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -917,8 +913,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -944,8 +939,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
wait_for_completion_timeout(&qdev->ide_completion,
wait_time);
if (!wait_time) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Timeout.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
break;
}
/* Now examine the response from the IDC process.
@@ -953,18 +947,17 @@ static int ql_idc_wait(struct ql_adapter *qdev)
* more wait time.
*/
if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Time Extension from function.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC Time Extension from function.\n");
wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
} else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Success.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
status = 0;
break;
} else {
- QPRINTK(qdev, DRV, ERR,
- "IDC: Invalid State 0x%.04x.\n",
- mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC: Invalid State 0x%.04x.\n",
+ mbcp->mbox_out[0]);
status = -EIO;
break;
}
@@ -993,8 +986,8 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to set LED Configuration.\n");
status = -EIO;
}
@@ -1019,8 +1012,8 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get LED Configuration.\n");
status = -EIO;
} else
qdev->led_config = mbcp->mbox_out[1];
@@ -1050,16 +1043,16 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
/* This indicates that the firmware is
* already in the state we are trying to
* change it to.
*/
- QPRINTK(qdev, DRV, ERR,
- "Command parameters make no change.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command parameters make no change.\n");
}
return status;
}
@@ -1089,12 +1082,12 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
}
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get MPI traffic control.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get MPI traffic control.\n");
status = -EIO;
}
return status;
@@ -1150,8 +1143,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
status = ql_mb_get_port_cfg(qdev);
rtnl_unlock();
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to get port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to get port config data.\n");
goto err;
}
@@ -1164,8 +1157,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
status = ql_set_port_cfg(qdev);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to set port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to set port config data.\n");
goto err;
}
end:
@@ -1197,8 +1190,8 @@ void ql_mpi_idc_work(struct work_struct *work)
switch (aen) {
default:
- QPRINTK(qdev, DRV, ERR,
- "Bug: Unhandled IDC action.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Unhandled IDC action.\n");
break;
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
@@ -1213,11 +1206,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1246,11 +1239,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1298,12 +1291,12 @@ void ql_mpi_reset_work(struct work_struct *work)
* then there is nothing to do.
*/
if (!ql_own_firmware(qdev)) {
- QPRINTK(qdev, DRV, ERR, "Don't own firmware!\n");
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
return;
}
if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
- QPRINTK(qdev, DRV, ERR, "Core is dumped!\n");
+ netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev->core_is_dumped = 1;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_core_to_log, 5 * HZ);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index d68ba7a5863..b8103425fac 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -958,21 +958,22 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Too many multicast addresses
* accept all traffic */
- else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI))
+ else if ((netdev_mc_count(dev) > MCAST_MAX) ||
+ (dev->flags & IFF_ALLMULTI))
reg |= 0x0020;
iowrite16(reg, ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
/* Build the hash table */
- if (dev->mc_count > MCAST_MAX) {
+ if (netdev_mc_count(dev) > MCAST_MAX) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -994,14 +995,14 @@ static void r6040_multicast_list(struct net_device *dev)
iowrite16(hash_table[3], ioaddr + MAR3);
}
/* Multicast Address 1~4 case */
- for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
+ for (i = 0, dmi; (i < netdev_mc_count(dev)) && (i < MCAST_MAX); i++) {
adrp = (u16 *)dmi->dmi_addr;
iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
iowrite16(adrp[1], ioaddr + MID_1M + 8*i);
iowrite16(adrp[2], ioaddr + MID_1H + 8*i);
dmi = dmi->next;
}
- for (i = dev->mc_count; i < MCAST_MAX; i++) {
+ for (i = netdev_mc_count(dev); i < MCAST_MAX; i++) {
iowrite16(0xffff, ioaddr + MID_0L + 8*i);
iowrite16(0xffff, ioaddr + MID_0M + 8*i);
iowrite16(0xffff, ioaddr + MID_0H + 8*i);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index c1bb24cf079..616ae5aa66a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -744,12 +744,10 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
netif_carrier_on(dev);
- if (netif_msg_ifup(tp))
- printk(KERN_INFO PFX "%s: link up\n", dev->name);
+ netif_info(tp, ifup, dev, "link up\n");
} else {
- if (netif_msg_ifdown(tp))
- printk(KERN_INFO PFX "%s: link down\n", dev->name);
netif_carrier_off(dev);
+ netif_info(tp, ifdown, dev, "link down\n");
}
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -862,11 +860,8 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
} else if (autoneg == AUTONEG_ENABLE)
RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
else {
- if (netif_msg_link(tp)) {
- printk(KERN_WARNING "%s: "
- "incorrect speed setting refused in TBI mode\n",
- dev->name);
- }
+ netif_warn(tp, link, dev,
+ "incorrect speed setting refused in TBI mode\n");
ret = -EOPNOTSUPP;
}
@@ -901,9 +896,9 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
(tp->mac_version != RTL_GIGA_MAC_VER_15) &&
(tp->mac_version != RTL_GIGA_MAC_VER_16)) {
giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- } else if (netif_msg_link(tp)) {
- printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
- dev->name);
+ } else {
+ netif_info(tp, link, dev,
+ "PHY does not support 1000Mbps\n");
}
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
@@ -2705,8 +2700,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
if (tp->link_ok(ioaddr))
goto out_unlock;
- if (netif_msg_link(tp))
- printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
+ netif_warn(tp, link, dev, "PHY reset until link up\n");
tp->phy_reset_enable(ioaddr);
@@ -2776,8 +2770,7 @@ static void rtl8169_phy_reset(struct net_device *dev,
return;
msleep(1);
}
- if (netif_msg_link(tp))
- printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+ netif_err(tp, link, dev, "PHY reset failed\n");
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -2811,8 +2804,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
*/
rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
- if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
+ if (RTL_R8(PHYstatus) & TBI_Enable)
+ netif_info(tp, link, dev, "TBI auto-negotiating\n");
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -3012,8 +3005,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "enable failure\n");
+ netif_err(tp, probe, dev, "enable failure\n");
goto err_out_free_dev_1;
}
@@ -3023,29 +3015,24 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure PCI base addr 1 is MMIO */
if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
- }
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
rc = -ENODEV;
goto err_out_mwi_3;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "Invalid PCI region size(s), aborting\n");
- }
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
goto err_out_mwi_3;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "could not request regions.\n");
+ netif_err(tp, probe, dev, "could not request regions\n");
goto err_out_mwi_3;
}
@@ -3058,10 +3045,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "DMA configuration failed.\n");
- }
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
goto err_out_free_res_4;
}
}
@@ -3069,15 +3053,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* ioremap MMIO region */
ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
if (!ioaddr) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out_free_res_4;
}
tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!tp->pcie_cap && netif_msg_probe(tp))
- dev_info(&pdev->dev, "no PCI Express capability\n");
+ if (!tp->pcie_cap)
+ netif_info(tp, probe, dev, "no PCI Express capability\n");
RTL_W16(IntrMask, 0x0000);
@@ -3100,10 +3083,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Use appropriate default if unknown */
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- if (netif_msg_probe(tp)) {
- dev_notice(&pdev->dev,
- "unknown MAC, using family default\n");
- }
+ netif_notice(tp, probe, dev,
+ "unknown MAC, using family default\n");
tp->mac_version = cfg->default_ver;
}
@@ -3185,14 +3166,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- if (netif_msg_probe(tp)) {
- u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff;
-
- printk(KERN_INFO "%s: %s at 0x%lx, %pM, XID %08x IRQ %d\n",
- dev->name,
- rtl_chip_info[tp->chipset].name,
- dev->base_addr, dev->dev_addr, xid, dev->irq);
- }
+ netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
+ rtl_chip_info[tp->chipset].name,
+ dev->base_addr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
rtl8169_init_phy(dev, tp);
@@ -4131,10 +4108,10 @@ static void rtl8169_reinit_task(struct work_struct *work)
ret = rtl8169_open(dev);
if (unlikely(ret < 0)) {
- if (net_ratelimit() && netif_msg_drv(tp)) {
- printk(KERN_ERR PFX "%s: reinit failure (status = %d)."
- " Rescheduling.\n", dev->name, ret);
- }
+ if (net_ratelimit())
+ netif_err(tp, drv, dev,
+ "reinit failure (status = %d). Rescheduling\n",
+ ret);
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
@@ -4164,10 +4141,8 @@ static void rtl8169_reset_task(struct work_struct *work)
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
} else {
- if (net_ratelimit() && netif_msg_intr(tp)) {
- printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
- dev->name);
- }
+ if (net_ratelimit())
+ netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -4255,11 +4230,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts1;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
- if (netif_msg_drv(tp)) {
- printk(KERN_ERR
- "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
- }
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop;
}
@@ -4321,11 +4292,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- if (netif_msg_intr(tp)) {
- printk(KERN_ERR
- "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
- dev->name, pci_cmd, pci_status);
- }
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
+ pci_cmd, pci_status);
/*
* The recovery sequence below admits a very elaborated explanation:
@@ -4349,8 +4317,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
- if (netif_msg_intr(tp))
- printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
+ netif_info(tp, intr, dev, "disabling PCI DAC\n");
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
@@ -4477,11 +4444,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
if (status & DescOwn)
break;
if (unlikely(status & RxRES)) {
- if (netif_msg_rx_err(tp)) {
- printk(KERN_INFO
- "%s: Rx ERROR. status = %08x\n",
- dev->name, status);
- }
+ netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
+ status);
dev->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
dev->stats.rx_length_errors++;
@@ -4544,8 +4508,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
tp->cur_rx = cur_rx;
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (!delta && count && netif_msg_intr(tp))
- printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
/*
@@ -4555,8 +4519,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
* after refill ?
* - how do others driver handle this condition (Uh oh...).
*/
- if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
- printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
+ if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
return count;
}
@@ -4611,10 +4575,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (likely(napi_schedule_prep(&tp->napi)))
__napi_schedule(&tp->napi);
- else if (netif_msg_intr(tp)) {
- printk(KERN_INFO "%s: interrupt %04x in poll\n",
- dev->name, status);
- }
+ else
+ netif_info(tp, intr, dev,
+ "interrupt %04x in poll\n", status);
}
/* We only get a new MSI interrupt when all active irq
@@ -4750,15 +4713,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
- if (netif_msg_link(tp)) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
- dev->name);
- }
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -4769,7 +4729,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index d1664586e8f..102be16e9b5 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -5055,8 +5055,8 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Update individual M_CAST address list */
- if ((!sp->m_cast_flg) && dev->mc_count) {
- if (dev->mc_count >
+ if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
+ if (netdev_mc_count(dev) >
(config->max_mc_addr - config->max_mac_addr)) {
DBG_PRINT(ERR_DBG,
"%s: No more Rx filters can be added - "
@@ -5066,7 +5066,7 @@ static void s2io_set_multicast(struct net_device *dev)
}
prev_cnt = sp->mc_addr_count;
- sp->mc_addr_count = dev->mc_count;
+ sp->mc_addr_count = netdev_mc_count(dev);
/* Clear out the previous list of Mc in the H/W. */
for (i = 0; i < prev_cnt; i++) {
@@ -5092,7 +5092,7 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Create the new Rx filter list and update the same in H/W. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index fd8cb506a2b..00ff8995ad6 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -429,7 +429,7 @@ static void _sc92031_set_mar(struct net_device *dev)
u32 mar0 = 0, mar1 = 0;
if ((dev->flags & IFF_PROMISC) ||
- dev->mc_count > multicast_filter_limit ||
+ netdev_mc_count(dev) > multicast_filter_limit ||
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 62d5cd51a9d..dc58d9fd0f3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1615,7 +1615,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- for (i = 0; i < net_dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net_dev); i++) {
crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
+ WARN_ON(rc > 0);
EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 14793d8bd66..1bee62c8300 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
falcon_board(efx)->type->init_phy(efx);
- return rc;
+ return 0;
fail:
EFX_ERR(efx, "PHY reset timed out\n");
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 6b364a6c6c6..ed999d31f1f 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -660,7 +660,7 @@ static void sgiseeq_set_multicast(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
sp->mode = SEEQ_RCMD_RANY;
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
sp->mode = SEEQ_RCMD_RBMCAST;
else
sp->mode = SEEQ_RCMD_RBCAST;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 626de766443..8c4e38f9ebf 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -841,7 +841,7 @@ static void sis190_set_rx_mode(struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -852,7 +852,7 @@ static void sis190_set_rx_mode(struct net_device *dev)
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr =
ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 20c5ce47489..32ae87c09f5 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -2288,7 +2288,7 @@ static void set_rx_mode(struct net_device *net_dev)
rx_mode = RFPromiscuous;
for (i = 0; i < table_entries; i++)
mc_filter[i] = 0xffff;
- } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
(net_dev->flags & IFF_ALLMULTI)) {
/* too many multicast addresses or accept all multicast packet */
rx_mode = RFAAB | RFAAM;
@@ -2301,7 +2301,7 @@ static void set_rx_mode(struct net_device *net_dev)
struct dev_mc_list *mclist;
rx_mode = RFAAB;
for (i = 0, mclist = net_dev->mc_list;
- mclist && i < net_dev->mc_count;
+ mclist && i < netdev_mc_count(net_dev);
i++, mclist = mclist->next) {
unsigned int bit_nr =
sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 6b955a4f19b..346adfae986 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -872,14 +872,14 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
- } else if (dev->mc_count > 0) {
- if (dev->mc_count <= FPMAX_MULTICAST) {
+ } else if (!netdev_mc_empty(dev)) {
+ if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
// point to first multicast addr
dmi = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
mac_add_multicast(smc,
(struct fddi_addr *)dmi->dmi_addr,
1);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5ff46eb18d0..ffa55df4d60 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1074,13 +1074,11 @@ static void skge_link_up(struct skge_port *skge)
netif_carrier_on(skge->netdev);
netif_wake_queue(skge->netdev);
- if (netif_msg_link(skge)) {
- printk(KERN_INFO PFX
- "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
- skge->netdev->name, skge->speed,
- skge->duplex == DUPLEX_FULL ? "full" : "half",
- skge_pause(skge->flow_status));
- }
+ netif_info(skge, link, skge->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ skge->speed,
+ skge->duplex == DUPLEX_FULL ? "full" : "half",
+ skge_pause(skge->flow_status));
}
static void skge_link_down(struct skge_port *skge)
@@ -1089,8 +1087,7 @@ static void skge_link_down(struct skge_port *skge)
netif_carrier_off(skge->netdev);
netif_stop_queue(skge->netdev);
- if (netif_msg_link(skge))
- printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
+ netif_info(skge, link, skge->netdev, "Link is down\n");
}
@@ -1792,9 +1789,8 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u16 status = xm_read16(hw, port, XM_ISRC);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
xm_link_down(hw, port);
@@ -1898,9 +1894,8 @@ static inline void bcom_phy_intr(struct skge_port *skge)
u16 isrc;
isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
- skge->netdev->name, isrc);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x\n", isrc);
if (isrc & PHY_B_IS_PSE)
printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
@@ -2298,9 +2293,8 @@ static void yukon_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (status & GM_IS_RX_FF_OR) {
++dev->stats.rx_fifo_errors;
@@ -2379,9 +2373,8 @@ static void yukon_phy_intr(struct skge_port *skge)
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
- skge->netdev->name, istatus, phystat);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
@@ -2571,8 +2564,7 @@ static int skge_up(struct net_device *dev)
if (!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
- if (netif_msg_ifup(skge))
- printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ netif_info(skge, ifup, skge->netdev, "enabling interface\n");
if (dev->mtu > RX_BUF_SIZE)
skge->rx_buf_size = dev->mtu + ETH_HLEN;
@@ -2670,8 +2662,7 @@ static int skge_down(struct net_device *dev)
if (skge->mem == NULL)
return 0;
- if (netif_msg_ifdown(skge))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
+ netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
netif_tx_disable(dev);
@@ -2825,9 +2816,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
- if (unlikely(netif_msg_tx_queued(skge)))
- printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
- dev->name, e - skge->tx_ring.start, skb->len);
+ netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
+ "tx queued, slot %td, len %d\n",
+ e - skge->tx_ring.start, skb->len);
skge->tx_ring.to_use = e->next;
smp_wmb();
@@ -2858,9 +2849,8 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
- skge->netdev->name, e - skge->tx_ring.start);
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n", e - skge->tx_ring.start);
dev_kfree_skb(e->skb);
}
@@ -2885,8 +2875,7 @@ static void skge_tx_timeout(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_timer(skge))
- printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
+ netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
skge_tx_clean(dev);
@@ -2932,7 +2921,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- int i, count = dev->mc_count;
+ int i, count = netdev_mc_count(dev);
struct dev_mc_list *list = dev->mc_list;
u32 mode;
u8 filter[8];
@@ -2987,7 +2976,7 @@ static void yukon_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI) /* all multicast */
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
+ else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
@@ -2996,7 +2985,7 @@ static void yukon_set_multicast(struct net_device *dev)
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
yukon_add_filter(filter, list->dmi_addr);
}
@@ -3054,10 +3043,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
struct sk_buff *skb;
u16 len = control & BMU_BBC;
- if (unlikely(netif_msg_rx_status(skge)))
- printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
- dev->name, e - skge->rx_ring.start,
- status, len);
+ netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
+ "rx slot %td status 0x%x len %d\n",
+ e - skge->rx_ring.start, status, len);
if (len > skge->rx_buf_size)
goto error;
@@ -3111,10 +3099,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
return skb;
error:
- if (netif_msg_rx_err(skge))
- printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
- dev->name, e - skge->rx_ring.start,
- control, status);
+ netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
+ "rx err, slot %td control 0x%x status 0x%x\n",
+ e - skge->rx_ring.start, control, status);
if (skge->hw->chip_id == CHIP_ID_GENESIS) {
if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
@@ -3885,9 +3872,7 @@ static void __devinit skge_show_addr(struct net_device *dev)
{
const struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_probe(skge))
- printk(KERN_INFO PFX "%s: addr %pM\n",
- dev->name, dev->dev_addr);
+ netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
}
static int __devinit skge_probe(struct pci_dev *pdev,
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 744362272e2..dfff8e56a51 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.26"
+#define DRV_VERSION "1.27"
#define PFX DRV_NAME " "
/*
@@ -251,6 +251,8 @@ static void sky2_power_on(struct sky2_hw *hw)
sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
+
/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
reg = sky2_read32(hw, B2_GP_IO);
reg |= GLB_GPIO_STAT_RACE_DIS;
@@ -731,7 +733,6 @@ static void sky2_wol_init(struct sky2_port *sky2)
unsigned port = sky2->port;
enum flow_control save_mode;
u16 ctrl;
- u32 reg1;
/* Bring hardware out of reset */
sky2_write16(hw, B0_CTST, CS_RST_CLR);
@@ -782,14 +783,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
- /* Turn on legacy PCI-Express PME mode */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- reg1 |= PCI_Y2_PME_LEGACY;
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ /* Disable PiG firmware */
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
/* block receiver */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
-
}
static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -800,29 +798,15 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
hw->chip_rev != CHIP_REV_YU_EX_A0) ||
hw->chip_id >= CHIP_ID_YUKON_FE_P) {
/* Yukon-Extreme B0 and further Extreme devices */
- /* enable Store & Forward mode for TX */
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+ } else if (dev->mtu > ETH_DATA_LEN) {
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_DIS | TX_STFW_ENA);
-
- else
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_ENA| TX_STFW_ENA);
- } else {
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
- else {
- /* set Tx GMAC FIFO Almost Empty Threshold */
- sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
- (ECU_JUMBO_WM << 16) | ECU_AE_THR);
-
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
-
- /* Can't do offload because of lack of store/forward */
- dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
- }
- }
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
}
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -1065,6 +1049,40 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
return le;
}
+static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
+{
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ /* Stopping point for hardware truncation */
+ return (size - 8) / sizeof(u32);
+}
+
+static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
+{
+ struct rx_ring_info *re;
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ sky2->rx_nfrags = size >> PAGE_SHIFT;
+ BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
+
+ /* Compute residue after pages */
+ size -= sky2->rx_nfrags << PAGE_SHIFT;
+
+ /* Optimize to handle small packets and headers */
+ if (size < copybreak)
+ size = copybreak;
+ if (size < ETH_HLEN)
+ size = ETH_HLEN;
+
+ return size;
+}
+
/* Build description to hardware for one receive segment */
static void sky2_rx_add(struct sky2_port *sky2, u8 op,
dma_addr_t map, unsigned len)
@@ -1345,8 +1363,32 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
}
+static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned i;
+
+ sky2->rx_data_size = sky2_get_rx_data_size(sky2);
+
+ /* Fill Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ re->skb = sky2_rx_alloc(sky2);
+ if (!re->skb)
+ return -ENOMEM;
+
+ if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
+ dev_kfree_skb(re->skb);
+ re->skb = NULL;
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
/*
- * Allocate and setup receiver buffer pool.
+ * Setup receiver buffer pool.
* Normal case this ends up creating one list element for skb
* in the receive ring. Worst case if using large MTU and each
* allocation falls on a different 64 bit region, that results
@@ -1354,12 +1396,12 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
* One element is used for checksum enable/disable, and one
* extra to avoid wrap.
*/
-static int sky2_rx_start(struct sky2_port *sky2)
+static void sky2_rx_start(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
struct rx_ring_info *re;
unsigned rxq = rxqaddr[sky2->port];
- unsigned i, size, thresh;
+ unsigned i, thresh;
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
@@ -1380,40 +1422,9 @@ static int sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
- /* Space needed for frame data + headers rounded up */
- size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
-
- /* Stopping point for hardware truncation */
- thresh = (size - 8) / sizeof(u32);
-
- sky2->rx_nfrags = size >> PAGE_SHIFT;
- BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
-
- /* Compute residue after pages */
- size -= sky2->rx_nfrags << PAGE_SHIFT;
-
- /* Optimize to handle small packets and headers */
- if (size < copybreak)
- size = copybreak;
- if (size < ETH_HLEN)
- size = ETH_HLEN;
-
- sky2->rx_data_size = size;
-
- /* Fill Rx ring */
+ /* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
-
- re->skb = sky2_rx_alloc(sky2);
- if (!re->skb)
- goto nomem;
-
- if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
- dev_kfree_skb(re->skb);
- re->skb = NULL;
- goto nomem;
- }
-
sky2_rx_submit(sky2, re);
}
@@ -1423,6 +1434,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
* the register is limited to 9 bits, so if you do frames > 2052
* you better get the MTU right!
*/
+ thresh = sky2_get_rx_threshold(sky2);
if (thresh > 0x1ff)
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
else {
@@ -1454,13 +1466,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
}
-
-
-
- return 0;
-nomem:
- sky2_rx_clean(sky2);
- return -ENOMEM;
}
static int sky2_alloc_buffers(struct sky2_port *sky2)
@@ -1491,7 +1496,7 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
if (!sky2->rx_ring)
goto nomem;
- return 0;
+ return sky2_alloc_rx_skbs(sky2);
nomem:
return -ENOMEM;
}
@@ -1500,6 +1505,8 @@ static void sky2_free_buffers(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
+ sky2_rx_clean(sky2);
+
if (sky2->rx_le) {
pci_free_consistent(hw->pdev, RX_LE_BYTES,
sky2->rx_le, sky2->rx_le_map);
@@ -1518,16 +1525,16 @@ static void sky2_free_buffers(struct sky2_port *sky2)
sky2->rx_ring = NULL;
}
-/* Bring up network interface. */
-static int sky2_up(struct net_device *dev)
+static void sky2_hw_up(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 imask, ramsize;
- int cap, err;
+ u32 ramsize;
+ int cap;
struct net_device *otherdev = hw->dev[sky2->port^1];
+ tx_init(sky2);
+
/*
* On dual port PCI-X card, there is an problem where status
* can be received out of order due to split transactions
@@ -1539,16 +1546,7 @@ static int sky2_up(struct net_device *dev)
cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
cmd &= ~PCI_X_CMD_MAX_SPLIT;
sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
-
- }
-
- netif_carrier_off(dev);
-
- err = sky2_alloc_buffers(sky2);
- if (err)
- goto err_out;
-
- tx_init(sky2);
+ }
sky2_mac_init(hw, port);
@@ -1557,7 +1555,7 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
- pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
+ pr_debug(PFX "%s: ram buffer %dK\n", sky2->netdev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
else
@@ -1589,10 +1587,26 @@ static int sky2_up(struct net_device *dev)
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
- err = sky2_rx_start(sky2);
+ sky2_rx_start(sky2);
+}
+
+/* Bring up network interface. */
+static int sky2_up(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 imask;
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = sky2_alloc_buffers(sky2);
if (err)
goto err_out;
+ sky2_hw_up(sky2);
+
/* Enable interrupts from phy/mac for port */
imask = sky2_read32(hw, B0_IMSK);
imask |= portirq_msk[port];
@@ -1866,10 +1880,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
-
- /* Wake unless it's detached, and called e.g. from sky2_down() */
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
- netif_wake_queue(dev);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -1894,21 +1904,11 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
}
-/* Network shutdown */
-static int sky2_down(struct net_device *dev)
+static void sky2_hw_down(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u16 ctrl;
- u32 imask;
-
- /* Never really got started! */
- if (!sky2->tx_le)
- return 0;
-
- if (netif_msg_ifdown(sky2))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
/* Force flow control off */
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -1941,15 +1941,6 @@ static int sky2_down(struct net_device *dev)
sky2_rx_stop(sky2);
- /* Disable port IRQ */
- imask = sky2_read32(hw, B0_IMSK);
- imask &= ~portirq_msk[port];
- sky2_write32(hw, B0_IMSK, imask);
- sky2_read32(hw, B0_IMSK);
-
- synchronize_irq(hw->pdev->irq);
- napi_synchronize(&hw->napi);
-
spin_lock_bh(&sky2->phy_lock);
sky2_phy_power_down(hw, port);
spin_unlock_bh(&sky2->phy_lock);
@@ -1958,8 +1949,30 @@ static int sky2_down(struct net_device *dev)
/* Free any pending frames stuck in HW queue */
sky2_tx_complete(sky2, sky2->tx_prod);
+}
- sky2_rx_clean(sky2);
+/* Network shutdown */
+static int sky2_down(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ /* Never really got started! */
+ if (!sky2->tx_le)
+ return 0;
+
+ if (netif_msg_ifdown(sky2))
+ printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
+
+ /* Disable port IRQ */
+ sky2_write32(hw, B0_IMSK,
+ sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
+ sky2_read32(hw, B0_IMSK);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+
+ sky2_hw_down(sky2);
sky2_free_buffers(sky2);
@@ -2208,14 +2221,20 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u16 ctl, mode;
u32 imask;
+ /* MTU size outside the spec */
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
+ /* MTU > 1500 on yukon FE and FE+ not allowed */
if (new_mtu > ETH_DATA_LEN &&
(hw->chip_id == CHIP_ID_YUKON_FE ||
hw->chip_id == CHIP_ID_YUKON_FE_P))
return -EINVAL;
+ /* TSO, etc on Yukon Ultra and MTU > 1500 not supported */
+ if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
+ dev->features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
+
if (!netif_running(dev)) {
dev->mtu = new_mtu;
return 0;
@@ -2250,7 +2269,11 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
- err = sky2_rx_start(sky2);
+ err = sky2_alloc_rx_skbs(sky2);
+ if (!err)
+ sky2_rx_start(sky2);
+ else
+ sky2_rx_clean(sky2);
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2447,8 +2470,13 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
{
struct sky2_port *sky2 = netdev_priv(dev);
- if (netif_running(dev))
+ if (netif_running(dev)) {
sky2_tx_complete(sky2, last);
+
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ netif_wake_queue(dev);
+ }
}
static inline void sky2_skb_rx(const struct sky2_port *sky2,
@@ -2484,6 +2512,32 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
}
}
+static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+{
+ /* If this happens then driver assuming wrong format for chip type */
+ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
+
+ /* Both checksum counters are programmed to start at
+ * the same offset, so unless there is a problem they
+ * should match. This failure is an early indication that
+ * hardware receive checksumming won't work.
+ */
+ if (likely((u16)(status >> 16) == (u16)status)) {
+ struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = le16_to_cpu(status);
+ } else {
+ dev_notice(&sky2->hw->pdev->dev,
+ "%s: receive checksum problem (status = %#x)\n",
+ sky2->netdev->name, status);
+
+ /* Disable checksum offload */
+ sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_CHKSUM);
+ }
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2552,37 +2606,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
/* fall through */
#endif
case OP_RXCHKS:
- if (!(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
- break;
-
- /* If this happens then driver assuming wrong format */
- if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
- if (net_ratelimit())
- printk(KERN_NOTICE "%s: unexpected"
- " checksum status\n",
- dev->name);
- break;
- }
-
- /* Both checksum counters are programmed to start at
- * the same offset, so unless there is a problem they
- * should match. This failure is an early indication that
- * hardware receive checksumming won't work.
- */
- if (likely(status >> 16 == (status & 0xffff))) {
- skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = le16_to_cpu(status);
- } else {
- printk(KERN_NOTICE PFX "%s: hardware receive "
- "checksum problem (status = %#x)\n",
- dev->name, status);
- sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
-
- sky2_write32(sky2->hw,
- Q_ADDR(rxqaddr[port], Q_CSR),
- BMU_DIS_RX_CHKSUM);
- }
+ if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
+ sky2_rx_checksum(sky2, status);
break;
case OP_TXINDEXLE:
@@ -3035,11 +3060,20 @@ static void sky2_reset(struct sky2_hw *hw)
u32 hwe_mask = Y2_HWE_ALL_MASK;
/* disable ASF */
- if (hw->chip_id == CHIP_ID_YUKON_EX) {
+ if (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ sky2_write32(hw, CPU_WDOG, 0);
status = sky2_read16(hw, HCU_CCSR);
status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
HCU_CCSR_UC_STATE_MSK);
+ /*
+ * CPU clock divider shouldn't be used because
+ * - ASF firmware may malfunction
+ * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
+ */
+ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
sky2_write16(hw, HCU_CCSR, status);
+ sky2_write32(hw, CPU_WDOG, 0);
} else
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
@@ -3122,7 +3156,7 @@ static void sky2_reset(struct sky2_hw *hw)
/* check if PSMv2 was running before */
reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
if (reg & PCI_EXP_LNKCTL_ASPMC) {
- int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
/* restore the PCIe Link Control register */
sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
}
@@ -3243,48 +3277,53 @@ static int sky2_reattach(struct net_device *dev)
static void sky2_restart(struct work_struct *work)
{
struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+ u32 imask;
int i;
rtnl_lock();
- for (i = 0; i < hw->ports; i++)
- sky2_detach(hw->dev[i]);
napi_disable(&hw->napi);
+ synchronize_irq(hw->pdev->irq);
+ imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
- sky2_reset(hw);
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
- for (i = 0; i < hw->ports; i++)
- sky2_reattach(hw->dev[i]);
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
- rtnl_unlock();
-}
+ if (!netif_running(dev))
+ continue;
-static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
-{
- return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
-}
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ sky2_hw_down(sky2);
+ }
-static void sky2_hw_set_wol(struct sky2_hw *hw)
-{
- int wol = 0;
- int i;
+ sky2_reset(hw);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct sky2_port *sky2 = netdev_priv(dev);
- if (sky2->wol)
- wol = 1;
+ if (!netif_running(dev))
+ continue;
+
+ sky2_hw_up(sky2);
+ netif_wake_queue(dev);
}
- if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
- device_set_wakeup_enable(&hw->pdev->dev, wol);
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
+
+ rtnl_unlock();
+}
+
+static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
+{
+ return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
}
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3305,11 +3344,6 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
sky2->wol = wol->wolopts;
-
- sky2_hw_set_wol(hw);
-
- if (!netif_running(dev))
- sky2_wol_init(sky2);
return 0;
}
@@ -3620,7 +3654,7 @@ static void sky2_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI)
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)
+ else if (netdev_mc_empty(dev) && !rx_pause)
reg &= ~GM_RXCR_MCF_ENA;
else {
int i;
@@ -3629,7 +3663,7 @@ static void sky2_set_multicast(struct net_device *dev)
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ for (i = 0; list && i < netdev_mc_count(dev); i++, list = list->next)
sky2_add_filter(filter, list->dmi_addr);
}
@@ -4803,7 +4837,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4828,6 +4861,8 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
wol |= sky2->wol;
}
+ device_set_wakeup_enable(&pdev->dev, wol != 0);
+
sky2_write32(hw, B0_IMSK, 0);
napi_disable(&hw->napi);
sky2_power_aux(hw);
@@ -4840,6 +4875,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
+#ifdef CONFIG_PM
static int sky2_resume(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4859,10 +4895,11 @@ static int sky2_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
/* Re-enable all clocks */
- if (hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto out;
+ }
sky2_reset(hw);
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
@@ -4888,34 +4925,7 @@ out:
static void sky2_shutdown(struct pci_dev *pdev)
{
- struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, wol = 0;
-
- if (!hw)
- return;
-
- rtnl_lock();
- del_timer_sync(&hw->watchdog_timer);
-
- for (i = 0; i < hw->ports; i++) {
- struct net_device *dev = hw->dev[i];
- struct sky2_port *sky2 = netdev_priv(dev);
-
- if (sky2->wol) {
- wol = 1;
- sky2_wol_init(sky2);
- }
- }
-
- if (wol)
- sky2_power_aux(hw);
- rtnl_unlock();
-
- pci_enable_wake(pdev, PCI_D3hot, wol);
- pci_enable_wake(pdev, PCI_D3cold, wol);
-
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
+ sky2_suspend(pdev, PMSG_SUSPEND);
}
static struct pci_driver sky2_driver = {
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 54cb303443e..a5e182dd981 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1895,14 +1895,14 @@ enum {
/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
enum {
- TX_STFW_DIS = 1<<31,/* Disable Store & Forward (Yukon-EC Ultra) */
- TX_STFW_ENA = 1<<30,/* Enable Store & Forward (Yukon-EC Ultra) */
+ TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
+ TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
- TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
- TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+ TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
+ TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 3c5a4f52345..ef9674c6713 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1323,7 +1323,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
mcr |= MAC_CR_MCPAS_;
}
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* the number of the 32 bit register, while the low 5 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *cur_addr;
@@ -1351,7 +1351,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
memset(multicast_table, 0, sizeof(multicast_table));
cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
u32 position;
/* do we have a pointer here? */
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 8371b82323a..41c3dddeab5 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -1542,7 +1542,7 @@ static void smc_set_multicast_list(struct net_device *dev)
/* We just get all multicast packets even if we only want them
. from one source. This will be changed at some future
. point. */
- else if (dev->mc_count ) {
+ else if (!netdev_mc_empty(dev)) {
/* support hardware multicasting */
/* be sure I get rid of flags I might have set */
@@ -1550,7 +1550,7 @@ static void smc_set_multicast_list(struct net_device *dev)
ioaddr + RCR );
/* NOTE: this has to set the bank, so make sure it is the
last thing called. The bank is set to zero at the top */
- smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ smc_setmulticast(ioaddr, netdev_mc_count(dev), dev->mc_list);
}
else {
outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index ea4fae79d6e..66450127c5a 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1395,7 +1395,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(2, "%s: RCR_ALMUL\n", dev->name);
lp->rcr_cur_mode |= RCR_ALMUL;
}
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* the number of the 8 bit register, while the low 3 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *cur_addr;
@@ -1423,7 +1423,7 @@ static void smc_set_multicast_list(struct net_device *dev)
memset(multicast_table, 0, sizeof(multicast_table));
cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
int position;
/* do we have a pointer here? */
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 494cd91ea39..3c1f9aa84cf 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1383,7 +1383,7 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
pdata->hashhi = 0;
pdata->hashlo = 0;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
@@ -1408,7 +1408,7 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
}
mc_list = mc_list->next;
}
- if (count != (unsigned int)dev->mc_count)
+ if (count != (unsigned int)netdev_mc_count(dev))
SMSC_WARNING(DRV, "mc_count != dev->mc_count");
pdata->hashhi = hash_high;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 1495a5dd4b4..2bd3c986559 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1062,7 +1062,7 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr &= (~MAC_CR_PRMS_);
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mc_list = dev->mc_list;
u32 hash_lo = 0, hash_hi = 0;
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 9599ce77ef8..bd8bc66f2e0 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -541,13 +541,15 @@ static void sonic_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
rcr |= SONIC_RCR_PRO;
} else {
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
if (sonic_debug > 2)
- printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
+ printk("sonic_multicast_list: mc_count %d\n",
+ netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
- for (i = 1; i <= dev->mc_count; i++) {
+ for (i = 1; i <= netdev_mc_count(dev); i++) {
addr = dmi->dmi_addr;
dmi = dmi->next;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index d0556a9b456..58bc7ac086c 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1796,15 +1796,15 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode |= AcceptAll;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
- } else if (dev->mc_count <= 14) {
+ } else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
+ for (i = 2, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev) + 2;
i++, mclist = mclist->next) {
eaddrs = (__be16 *)mclist->dmi_addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
@@ -1825,7 +1825,7 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
index ac48ed78704..576b256ee38 100644
--- a/drivers/net/stmmac/dwmac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -305,13 +305,13 @@ static void dwmac100_set_filter(struct net_device *dev)
value |= MAC_CONTROL_PR;
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
MAC_CONTROL_HP);
- } else if ((dev->mc_count > HASH_TABLE_SIZE)
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|| (dev->flags & IFF_ALLMULTI)) {
value |= MAC_CONTROL_PM;
value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (dev->mc_count == 0) { /* no multicast */
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
MAC_CONTROL_HO | MAC_CONTROL_HP);
} else {
@@ -327,7 +327,7 @@ static void dwmac100_set_filter(struct net_device *dev)
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
/* The upper 6 bits of the calculated CRC are used to
* index the contens of the hash table */
int bit_nr =
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index d812e9cdb3d..90dbb4f41ef 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -83,16 +83,16 @@ static void dwmac1000_set_filter(struct net_device *dev)
unsigned int value = 0;
DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, dev->mc_count, netdev_uc_count(dev));
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
if (dev->flags & IFF_PROMISC)
value = GMAC_FRAME_FILTER_PR;
- else if ((dev->mc_count > HASH_TABLE_SIZE)
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|| (dev->flags & IFF_ALLMULTI)) {
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
int i;
u32 mc_filter[2];
struct dev_mc_list *mclist;
@@ -102,7 +102,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ mclist && i < netdev_mc_count(dev); i++, mclist = mclist->next) {
/* The upper 6 bits of the calculated CRC are used to
index the contens of the hash table */
int bit_nr =
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index b447a871942..efedc252e4b 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -414,7 +414,7 @@ static int init586(struct net_device *dev)
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
struct dev_mc_list *dmi=dev->mc_list;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 0ca4241b4f6..99998862c22 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -917,7 +917,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 25e81ebd9cd..dfea56fa39e 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -1013,7 +1013,7 @@ static void bigmac_set_multicast(struct net_device *dev)
while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
udelay(20);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writel(0xffff, bregs + BMAC_HTABLE0);
sbus_writel(0xffff, bregs + BMAC_HTABLE1);
sbus_writel(0xffff, bregs + BMAC_HTABLE2);
@@ -1028,7 +1028,7 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 0c972e560cf..4171259590b 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1517,18 +1517,18 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b55ceb88d93..d497ec05395 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1837,7 +1837,7 @@ static u32 gem_setup_multicast(struct gem *gp)
int i;
if ((gp->dev->flags & IFF_ALLMULTI) ||
- (gp->dev->mc_count > 256)) {
+ (netdev_mc_count(gp->dev) > 256)) {
for (i=0; i<16; i++)
writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
rxcfg |= MAC_RXCFG_HFE;
@@ -1852,7 +1852,7 @@ static u32 gem_setup_multicast(struct gem *gp)
for (i = 0; i < 16; i++)
hash_table[i] = 0;
- for (i = 0; i < gp->dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(gp->dev); i++) {
char *addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 76ccd31cbf5..905df35ff78 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1516,7 +1516,7 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
- (hp->dev->mc_count > 64)) {
+ (netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
@@ -1531,7 +1531,7 @@ static int happy_meal_init(struct happy_meal *hp)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < hp->dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(hp->dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
@@ -2373,7 +2373,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
spin_lock_irq(&hp->happy_lock);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
@@ -2387,7 +2387,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 64e7d08c878..cf9d5bb9e1e 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1196,7 +1196,7 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 45c383f285e..3bc35d86ed6 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -636,7 +636,7 @@ static void qe_set_multicast(struct net_device *dev)
/* Lock out others. */
netif_stop_queue(dev);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
@@ -653,7 +653,7 @@ static void qe_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
addrs = dmi->dmi_addr;
dmi = dmi->next;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 033408f589f..d838d4015c6 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1941,18 +1941,18 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > CAM_ENTRY_MAX - 3) {
+ netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
/* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *cur_addr = dev->mc_list;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ for (i = 0; i < netdev_mc_count(dev); i++, cur_addr = cur_addr->next) {
if (!cur_addr)
break;
/* entry 0,1 is reserved. */
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index b907bee31fd..ab9b0280317 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev)
/* set IMF to accept all multicast frmaes */
for (i = 0; i < MAC_MCST_HASH_NUM; i++)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
u8 hash;
struct dev_mc_list *mclist;
u32 reg, val;
@@ -840,7 +840,7 @@ static void bdx_setmulti(struct net_device *ndev)
}
} else {
- DBG("only own mac %d\n", ndev->mc_count);
+ DBG("only own mac %d\n", netdev_mc_count(ndev));
rxf_val |= GMAC_RX_FILTER_AB;
}
WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7195bdec17f..385434ff396 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.106"
-#define DRV_MODULE_RELDATE "January 12, 2010"
+#define DRV_MODULE_VERSION "3.107"
+#define DRV_MODULE_RELDATE "February 12, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -642,7 +642,6 @@ static void tg3_disable_ints(struct tg3 *tp)
static void tg3_enable_ints(struct tg3 *tp)
{
int i;
- u32 coal_now = 0;
tp->irq_sync = 0;
wmb();
@@ -650,13 +649,14 @@ static void tg3_enable_ints(struct tg3 *tp)
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
- coal_now |= tnapi->coal_now;
+ tp->coal_now |= tnapi->coal_now;
}
/* Force an initial interrupt */
@@ -664,8 +664,9 @@ static void tg3_enable_ints(struct tg3 *tp)
(tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
else
- tw32(HOSTCC_MODE, tp->coalesce_mode |
- HOSTCC_MODE_ENABLE | coal_now);
+ tw32(HOSTCC_MODE, tp->coal_now);
+
+ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
}
static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
@@ -4552,6 +4553,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
pci_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
+
+ /* Ensure that the update to the skb happens after the physical
+ * addresses have been transferred to the new BD location.
+ */
+ smp_wmb();
+
src_map->skb = NULL;
}
@@ -4733,7 +4740,7 @@ next_pkt_nopost:
tw32_rx_mbox(tnapi->consmbox, sw_idx);
/* Refill RX ring(s). */
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
if (work_mask & RXD_OPAQUE_RING_STD) {
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
@@ -4755,7 +4762,8 @@ next_pkt_nopost:
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
- napi_schedule(&tp->napi[1].napi);
+ if (tnapi != &tp->napi[1])
+ napi_schedule(&tp->napi[1].napi);
}
return received;
@@ -4787,12 +4795,12 @@ static void tg3_poll_link(struct tg3 *tp)
}
}
-static void tg3_rx_prodring_xfer(struct tg3 *tp,
- struct tg3_rx_prodring_set *dpr,
- struct tg3_rx_prodring_set *spr)
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+ struct tg3_rx_prodring_set *dpr,
+ struct tg3_rx_prodring_set *spr)
{
u32 si, di, cpycnt, src_prod_idx;
- int i;
+ int i, err = 0;
while (1) {
src_prod_idx = spr->rx_std_prod_idx;
@@ -4815,6 +4823,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_std_cons_idx;
di = dpr->rx_std_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_std_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_std_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_std_buffers[di],
&spr->rx_std_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4855,6 +4880,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_jmb_cons_idx;
di = dpr->rx_jmb_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_jmb_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_jmb_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_jmb_buffers[di],
&spr->rx_jmb_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4872,6 +4914,8 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
TG3_RX_JUMBO_RING_SIZE;
}
+
+ return err;
}
static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
@@ -4893,27 +4937,29 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
work_done += tg3_rx(tnapi, budget - work_done);
if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
- int i;
- u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
- u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
+ struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
+ int i, err = 0;
+ u32 std_prod_idx = dpr->rx_std_prod_idx;
+ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
- for (i = 2; i < tp->irq_cnt; i++)
- tg3_rx_prodring_xfer(tp, tnapi->prodring,
- tp->napi[i].prodring);
+ for (i = 1; i < tp->irq_cnt; i++)
+ err |= tg3_rx_prodring_xfer(tp, dpr,
+ tp->napi[i].prodring);
wmb();
- if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
- u32 mbox = TG3_RX_STD_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
- }
+ if (std_prod_idx != dpr->rx_std_prod_idx)
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ dpr->rx_std_prod_idx);
- if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
- u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
- }
+ if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ dpr->rx_jmb_prod_idx);
mmiowb();
+
+ if (err)
+ tw32_f(HOSTCC_MODE, tp->coal_now);
}
return work_done;
@@ -6173,8 +6219,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
- if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
- tg3_rx_prodring_free(tp, &tp->prodring[j]);
+ tg3_rx_prodring_free(tp, &tp->prodring[j]);
}
}
@@ -6210,9 +6255,10 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
- tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
+ if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
+ tg3_free_rings(tp);
return -ENOMEM;
+ }
}
return 0;
@@ -6259,7 +6305,7 @@ static void tg3_free_consistent(struct tg3 *tp)
tp->hw_stats = NULL;
}
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
+ for (i = 0; i < tp->irq_cnt; i++)
tg3_rx_prodring_fini(tp, &tp->prodring[i]);
}
@@ -6271,7 +6317,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
+ for (i = 0; i < tp->irq_cnt; i++) {
if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
goto err_out;
}
@@ -6336,10 +6382,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
break;
}
- if (tp->irq_cnt == 1)
- tnapi->prodring = &tp->prodring[0];
- else if (i)
- tnapi->prodring = &tp->prodring[i - 1];
+ tnapi->prodring = &tp->prodring[i];
/*
* If multivector RSS is enabled, vector 0 does not handle
@@ -6678,6 +6721,13 @@ static int tg3_poll_fw(struct tg3 *tp)
tp->dev->name);
}
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ /* The 57765 A0 needs a little more
+ * time to do some important work.
+ */
+ mdelay(10);
+ }
+
return 0;
}
@@ -7545,8 +7595,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_abort_hw(tp, 1);
}
- if (reset_phy &&
- !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
+ if (reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
@@ -7865,6 +7914,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
@@ -9430,7 +9482,7 @@ static void __tg3_set_rx_mode(struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
tg3_set_multi (tp, 1);
- } else if (dev->mc_count < 1) {
+ } else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
tg3_set_multi (tp, 0);
} else {
@@ -9442,7 +9494,7 @@ static void __tg3_set_rx_mode(struct net_device *dev)
u32 bit;
u32 crc;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
@@ -10728,12 +10780,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
struct tg3_napi *tnapi, *rnapi;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
if (tp->irq_cnt > 1) {
- tnapi = &tp->napi[1];
rnapi = &tp->napi[1];
- } else {
- tnapi = &tp->napi[0];
- rnapi = &tp->napi[0];
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tnapi = &tp->napi[1];
}
coal_now = tnapi->coal_now | rnapi->coal_now;
@@ -10770,8 +10822,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
- tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
+ tg3_writephy(tp, MII_TG3_FET_PTEST,
+ MII_TG3_FET_PTEST_FRC_TX_LINK |
+ MII_TG3_FET_PTEST_FRC_TX_LOCK);
+ /* The write needs to be flushed for the AC131 */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
mac_mode |= MAC_MODE_PORT_MODE_MII;
} else
mac_mode |= MAC_MODE_PORT_MODE_GMII;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index e7f6214a168..b4fd59623cf 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -110,6 +110,7 @@
#define CHIPREV_ID_57780_A0 0x57780000
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
+#define CHIPREV_ID_57765_A0 0x57785000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1257,6 +1258,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
#define RDMAC_STATUS 0x00004804
@@ -2110,6 +2112,9 @@
/* Fast Ethernet Tranceiver definitions */
#define MII_TG3_FET_PTEST 0x17
+#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
+#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2699,6 +2704,7 @@ struct tg3 {
struct net_device *dev;
struct pci_dev *pdev;
+ u32 coal_now;
u32 msg_enable;
/* begin "tx thread" cacheline section */
@@ -2717,7 +2723,7 @@ struct tg3 {
struct vlan_group *vlgrp;
#endif
- struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
+ struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
/* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 3ec31dce99f..e44d5a074c6 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1335,7 +1335,7 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
- for ( i = 0; i < dev->mc_count; i++ ) {
+ for ( i = 0; i < netdev_mc_count(dev); i++ ) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
(char *) &dmi->dmi_addr );
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index b0d7db9d8bb..eff68e1d107 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1408,7 +1408,7 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 66272f2a075..1ce8f85a89a 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -996,7 +996,7 @@ static void tok_set_multicast_list(struct net_device *dev)
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
mclist = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(dev); i++) {
address[0] |= mclist->dmi_addr[2];
address[1] |= mclist->dmi_addr[3];
address[2] |= mclist->dmi_addr[4];
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 3f9d5a25562..26d84daf660 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1303,7 +1303,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next)
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next)
{
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index f010a4dc5f1..a242d125b34 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1178,7 +1178,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ for (i=0,dmi=dev->mc_list;i < netdev_mc_count(dev); i++,dmi = dmi->next) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index e3c42f5ac4a..6b8868959b8 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1214,7 +1214,7 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
{
int i;
struct dev_mc_list *mclist = dev->mc_list;
- for (i=0; i< dev->mc_count; i++)
+ for (i=0; i< netdev_mc_count(dev); i++)
{
((char *)(&tp->ocpl.FunctAddr))[0] |=
mclist->dmi_addr[2];
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a69c4a48bab..f4b30c4826f 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1184,7 +1184,7 @@ static void tsi108_set_rx_mode(struct net_device *dev)
rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
- if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
struct dev_mc_list *mc = dev->mc_list;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 29330209ad8..a4cff23dcdf 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -677,7 +677,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
@@ -706,7 +706,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -741,7 +741,7 @@ static void __de_set_rx_mode (struct net_device *dev)
goto out;
}
- if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
macmode |= AcceptAllMulticast;
goto out;
@@ -749,7 +749,7 @@ static void __de_set_rx_mode (struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
build_setup_frame_hash (de->setup_frame, dev);
else
build_setup_frame_perfect (de->setup_frame, dev);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index a8349b7200b..0b6a9731091 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1963,10 +1963,10 @@ SetMulticastFilter(struct net_device *dev)
omr &= ~(OMR_PR | OMR_PM);
pa = build_setup_frame(dev, ALL); /* Build the basic frame */
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
+ for (i = 0; i < netdev_mc_count(dev) ;i++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
@@ -1984,7 +1984,7 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- for (j=0; j<dev->mc_count; j++) {
+ for (j=0; j<netdev_mc_count(dev); j++) {
addrs=dmi->dmi_addr;
dmi=dmi->next;
for (i=0; i<ETH_ALEN; i++) {
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 5fc61c1012e..534afbdb9c9 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -658,9 +658,9 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
/* Send setup frame */
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev, netdev_mc_count(dev)); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* DM9102/DM9102A */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -1052,6 +1052,7 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
+ int mc_count = netdev_mc_count(dev);
DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
spin_lock_irqsave(&db->lock, flags);
@@ -1064,19 +1065,19 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
- DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
+ DMFE_DBUG(0, "Pass all multicast address", mc_count);
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- DMFE_DBUG(0, "Set multicast address", dev->mc_count);
+ DMFE_DBUG(0, "Set multicast address", mc_count);
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev, mc_count); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev, mc_count); /* DM9102/DM9102A */
spin_unlock_irqrestore(&db->lock, flags);
}
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index e1a5f03a49c..cce2ada0795 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -997,7 +997,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
@@ -1026,7 +1026,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1057,7 +1057,8 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
csr6 |= AcceptAllMulticast | AcceptAllPhys;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
@@ -1066,14 +1067,16 @@ static void set_rx_mode(struct net_device *dev)
/* Should verify correctness on big-endian/__powerpc__ */
struct dev_mc_list *mclist;
int i;
- if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+ if (netdev_mc_count(dev) > 64) {
+ /* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
if (tp->flags & COMET_MAC_ADDR)
filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
else
@@ -1107,7 +1110,8 @@ static void set_rx_mode(struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) {
+ /* Must use a multicast hash table. */
build_setup_frame_hash(tp->setup_frame, dev);
tx_flags = 0x08400000 | 192;
} else {
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index dc3335d906f..216ceb322ed 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -557,7 +557,7 @@ static void uli526x_init(struct net_device *dev)
update_cr6(db->cr6_data, ioaddr);
/* Send setup frame */
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -906,16 +906,18 @@ static void uli526x_set_filter_mode(struct net_device * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
- ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
+ ULI526X_DBUG(0, "Pass all multicast address",
+ netdev_mc_count(dev));
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
spin_unlock_irqrestore(&db->lock, flags);
}
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 9fb89afccf7..98711a9f35a 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1361,7 +1361,7 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
| AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
@@ -1370,8 +1370,9 @@ static u32 __set_rx_mode(struct net_device *dev)
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 6e4f754c4ba..edabc49a49b 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -924,17 +924,18 @@ typhoon_set_rx_mode(struct net_device *dev)
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
- } else if((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
- } else if(dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 225f65812f2..a05720289c7 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2031,7 +2031,8 @@ static void ucc_geth_set_multi(struct net_device *dev)
dmi = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
+ for (i = 0; i < netdev_mc_count(dev);
+ i++, dmi = dmi->next) {
/* Only support group multicast for now.
*/
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index a516185cbc9..f02551713b1 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -542,9 +542,9 @@ static void asix_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= AX_RX_CTL_AMALL;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
@@ -558,7 +558,7 @@ static void asix_set_multicast(struct net_device *net)
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
@@ -754,9 +754,9 @@ static void ax88172_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x01;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= 0x02;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
@@ -770,7 +770,7 @@ static void ax88172_set_multicast(struct net_device *net)
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 7d3fa06980c..5a13660ebd1 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -648,7 +648,9 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- for (i = 0, mc = netdev->mc_list; mc && i < netdev->mc_count; i++, mc = mc->next) {
+ for (i = 0, mc = netdev->mc_list;
+ mc && i < netdev_mc_count(netdev);
+ i++, mc = mc->next) {
u32 crc = ether_crc_le(6, mc->dmi_addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 55cf7081de1..9ab5c1983a7 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -139,7 +139,7 @@ static void int51x1_set_multicast(struct net_device *netdev)
/* do not expect to see traffic of other PLCs */
filter |= PACKET_TYPE_PROMISCUOUS;
devinfo(dev, "promiscuous mode enabled");
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
filter |= PACKET_TYPE_ALL_MULTICAST;
devdbg(dev, "receive all multicast enabled");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f1d64ef67ef..52671ea043a 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -881,7 +881,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
if (net->flags & IFF_PROMISC) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
}
- else if ((net->mc_count) || (net->flags & IFF_ALLMULTI)) {
+ else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 6fc098fe9ff..34665137f2c 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -443,9 +443,9 @@ static void mcs7830_data_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
data->config |= HIF_REG_CONFIG_PROMISCUOUS;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > MCS7830_MAX_MCAST) {
+ netdev_mc_count(net) > MCS7830_MAX_MCAST) {
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
@@ -457,7 +457,7 @@ static void mcs7830_data_set_multicast(struct net_device *net)
int i;
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(net); i++) {
crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
mc_list = mc_list->next;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index ed4a508ef26..44ae8f6d313 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1232,7 +1232,7 @@ static void pegasus_set_multicast(struct net_device *net)
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
if (netif_msg_link(pegasus))
pr_info("%s: Promiscuous mode enabled.\n", net->name);
- } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
if (netif_msg_link(pegasus))
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 21ac103fbb7..e85c89c6706 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -711,7 +711,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
dev->rx_creg |= cpu_to_le16(0x0001);
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
dev->rx_creg &= cpu_to_le16(0xfffe);
dev->rx_creg |= cpu_to_le16(0x0002);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 0c3c738d741..48555d0e374 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -384,7 +384,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
devdbg(dev, "receive all multicast enabled");
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
- } else if (dev->net->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev->net)) {
struct dev_mc_list *mc_list = dev->net->mc_list;
int count = 0;
@@ -406,7 +406,7 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
mc_list = mc_list->next;
}
- if (count != ((u32)dev->net->mc_count))
+ if (count != ((u32) netdev_mc_count(dev->net)))
devwarn(dev, "mc_count != dev->mc_count");
if (netif_msg_drv(dev))
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index a7e0c84426e..85df7ac636b 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1697,7 +1697,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
rx_mode = 0x1C;
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
@@ -1707,7 +1707,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
struct dev_mc_list *mclist;
int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f15485efe40..cd4e866321f 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1132,7 +1132,7 @@ static void velocity_set_multi(struct net_device *dev)
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
- } else if ((dev->mc_count > vptr->multicast_limit) ||
+ } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
(dev->flags & IFF_ALLMULTI)) {
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
@@ -1141,7 +1141,9 @@ static void velocity_set_multi(struct net_device *dev)
int offset = MCAM_SIZE - vptr->multicast_limit;
mac_get_cam_mask(regs, vptr->mCAMmask);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < netdev_mc_count(dev);
+ i++, mclist = mclist->next) {
mac_set_cam(regs, i + offset, mclist->dmi_addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
}
@@ -1877,13 +1879,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
/**
* tx_srv - transmit interrupt service
* @vptr; Velocity
- * @status:
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
* necessary/
*/
-static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
+static int velocity_tx_srv(struct velocity_info *vptr)
{
struct tx_desc *td;
int qnum;
@@ -2090,14 +2091,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
- * @status: adapter status (unused)
*
* Walk the receive ring of the velocity adapter and remove
* any received packets from the receive queue. Hand the ring
* slots back to the adapter for reuse.
*/
-static int velocity_rx_srv(struct velocity_info *vptr, int status,
- int budget_left)
+static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
{
struct net_device_stats *stats = &vptr->dev->stats;
int rd_curr = vptr->rx.curr;
@@ -2151,32 +2150,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
struct velocity_info *vptr = container_of(napi,
struct velocity_info, napi);
unsigned int rx_done;
- u32 isr_status;
-
- spin_lock(&vptr->lock);
- isr_status = mac_read_isr(vptr->mac_regs);
-
- /* Ack the interrupt */
- mac_write_isr(vptr->mac_regs, isr_status);
- if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
- velocity_error(vptr, isr_status);
+ unsigned long flags;
+ spin_lock_irqsave(&vptr->lock, flags);
/*
* Do rx and tx twice for performance (taken from the VIA
* out-of-tree driver).
*/
- rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
- velocity_tx_srv(vptr, isr_status);
- rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
- velocity_tx_srv(vptr, isr_status);
-
- spin_unlock(&vptr->lock);
+ rx_done = velocity_rx_srv(vptr, budget / 2);
+ velocity_tx_srv(vptr);
+ rx_done += velocity_rx_srv(vptr, budget - rx_done);
+ velocity_tx_srv(vptr);
/* If budget not fully consumed, exit the polling mode */
if (rx_done < budget) {
napi_complete(napi);
mac_enable_int(vptr->mac_regs);
}
+ spin_unlock_irqrestore(&vptr->lock, flags);
return rx_done;
}
@@ -2206,10 +2197,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
return IRQ_NONE;
}
+ /* Ack the interrupt */
+ mac_write_isr(vptr->mac_regs, isr_status);
+
if (likely(napi_schedule_prep(&vptr->napi))) {
mac_disable_int(vptr->mac_regs);
__napi_schedule(&vptr->napi);
}
+
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+ velocity_error(vptr, isr_status);
+
spin_unlock(&vptr->lock);
return IRQ_HANDLED;
@@ -3098,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
velocity_init_registers(vptr, VELOCITY_INIT_WOL);
mac_disable_int(vptr->mac_regs);
- velocity_tx_srv(vptr, 0);
+ velocity_tx_srv(vptr);
for (i = 0; i < vptr->tx.numq; i++) {
if (vptr->tx.used[i])
@@ -3342,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
{
struct velocity_info *vptr = netdev_priv(dev);
int max_us = 0x3f * 64;
+ unsigned long flags;
/* 6 bits of */
if (ecmd->tx_coalesce_usecs > max_us)
@@ -3363,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
ecmd->tx_coalesce_usecs);
/* Setup the interrupt suppression and queue timers */
+ spin_lock_irqsave(&vptr->lock, flags);
mac_disable_int(vptr->mac_regs);
setup_adaptive_interrupts(vptr);
setup_queue_timers(vptr);
@@ -3370,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
mac_clear_isr(vptr->mac_regs);
mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9d8984a3741..ce35b42cc2c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,9 +56,6 @@ struct virtnet_info
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
- /* Send queue. */
- struct sk_buff_head send;
-
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
@@ -505,7 +502,6 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- __skb_unlink(skb, &vi->send);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
tot_sgs += skb_vnet_hdr(skb)->num_sg;
@@ -588,15 +584,6 @@ again:
}
vi->svq->vq_ops->kick(vi->svq);
- /*
- * Put new one in send queue. You'd expect we'd need this before
- * xmit_skb calls add_buf(), since the callback can be triggered
- * immediately after that. But since the callback just triggers
- * another call back here, normal network xmit locking prevents the
- * race.
- */
- __skb_queue_head(&vi->send, skb);
-
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
nf_reset(skb);
@@ -735,6 +722,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
int uc_count;
+ int mc_count;
void *buf;
int i;
@@ -762,9 +750,11 @@ static void virtnet_set_rx_mode(struct net_device *dev)
allmulti ? "en" : "dis");
uc_count = netdev_uc_count(dev);
+ mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
- mac_data = buf = kzalloc(((uc_count + dev->mc_count) * ETH_ALEN) +
- (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
+ (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ mac_data = buf;
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
return;
@@ -784,13 +774,13 @@ static void virtnet_set_rx_mode(struct net_device *dev)
/* multicast list and count fill the end */
mac_data = (void *)&mac_data->macs[uc_count][0];
- mac_data->entries = dev->mc_count;
+ mac_data->entries = mc_count;
addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, addr = addr->next)
+ for (i = 0; i < mc_count; i++, addr = addr->next)
memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
- sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
+ sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
@@ -977,9 +967,6 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_VLAN_FILTER;
}
- /* Initialize our empty send queue. */
- skb_queue_head_init(&vi->send);
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -1016,6 +1003,12 @@ static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;
while (1) {
+ buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ if (!buf)
+ break;
+ dev_kfree_skb(buf);
+ }
+ while (1) {
buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
if (!buf)
break;
@@ -1035,11 +1028,11 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- /* Free our skbs in send queue, if any. */
- __skb_queue_purge(&vi->send);
unregister_netdev(vi->dev);
cancel_delayed_work_sync(&vi->refill);
+
+ /* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
vdev->config->del_vqs(vi->vdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b896f938611..ee1b397417f 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1668,7 +1668,7 @@ static u8 *
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
- u32 sz = netdev->mc_count * ETH_ALEN;
+ u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
@@ -1678,7 +1678,7 @@ vmxnet3_copy_mc(struct net_device *netdev)
int i;
struct dev_mc_list *mc = netdev->mc_list;
- for (i = 0; i < netdev->mc_count; i++) {
+ for (i = 0; i < netdev_mc_count(netdev); i++) {
BUG_ON(!mc);
memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
ETH_ALEN);
@@ -1708,12 +1708,12 @@ vmxnet3_set_mc(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
- netdev->mc_count * ETH_ALEN);
+ netdev_mc_count(netdev) * ETH_ALEN);
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index a6606b8948e..c248b01218a 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -1178,11 +1178,11 @@ static void vxge_set_multicast(struct net_device *dev)
memset(&mac_info, 0, sizeof(struct macInfo));
/* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && dev->mc_count) {
+ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
list_head = &vdev->vpaths[0].mac_addr_list;
- if ((dev->mc_count +
+ if ((netdev_mc_count(dev) +
(vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
vdev->vpaths[0].max_mac_addr_cnt)
goto _set_all_mcast;
@@ -1217,7 +1217,7 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index e6ca3eb4c0d..547912e6843 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -302,18 +302,6 @@ static int adm8211_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct adm8211_priv *priv = dev->priv;
-
- stats[0].len = priv->cur_tx - priv->dirty_tx;
- stats[0].limit = priv->tx_ring_size - 2;
- stats[0].count = priv->dirty_tx;
-
- return 0;
-}
-
static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
@@ -1773,7 +1761,6 @@ static const struct ieee80211_ops adm8211_ops = {
.prepare_multicast = adm8211_prepare_multicast,
.configure_filter = adm8211_configure_filter,
.get_stats = adm8211_get_stats,
- .get_tx_stats = adm8211_get_tx_stats,
.get_tsf = adm8211_get_tsft
};
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ef6b78da370..c22a34c7639 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2310,7 +2310,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
airo_set_promisc(ai);
}
- if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
+ if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
/* Turn on multicast. (Should be already setup...) */
}
}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index b99a8c2053d..8c8ce67971e 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -144,6 +144,12 @@ struct ar9170_sta_tid {
bool active;
};
+struct ar9170_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
+};
+
#define AR9170_QUEUE_TIMEOUT 64
#define AR9170_TX_TIMEOUT 8
#define AR9170_BA_TIMEOUT 4
@@ -211,7 +217,7 @@ struct ar9170 {
/* qos queue settings */
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[5];
+ struct ar9170_tx_queue_stats tx_stats[5];
struct ieee80211_tx_queue_params edcf[5];
spinlock_t cmdlock;
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index 4d27f7f67c7..91797cb6e0e 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -2396,18 +2396,6 @@ static int ar9170_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *tx_stats)
-{
- struct ar9170 *ar = hw->priv;
-
- spin_lock_bh(&ar->tx_stats_lock);
- memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
- spin_unlock_bh(&ar->tx_stats_lock);
-
- return 0;
-}
-
static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
@@ -2509,7 +2497,6 @@ static const struct ieee80211_ops ar9170_ops = {
.set_key = ar9170_set_key,
.sta_notify = ar9170_sta_notify,
.get_stats = ar9170_get_stats,
- .get_tx_stats = ar9170_get_tx_stats,
.ampdu_action = ar9170_ampdu_action,
};
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ad4d446f026..ac67f02e26d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -541,7 +541,6 @@ struct ath5k_txq_info {
/*
* Transmit packet types.
* used on tx control descriptor
- * TODO: Use them inside base.c corectly
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index edb6c90e376..8dce0077b02 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -241,8 +241,6 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -269,7 +267,6 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
.conf_tx = NULL,
- .get_tx_stats = ath5k_get_tx_stats,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
.reset_tsf = ath5k_reset_tsf,
@@ -1249,6 +1246,29 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
return 0;
}
+static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath5k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = AR5K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = AR5K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = AR5K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = AR5K_PKT_TYPE_PSPOLL;
+ else
+ htype = AR5K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
struct ath5k_txq *txq)
@@ -1303,7 +1323,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
+ ieee80211_get_hdrlen_from_skb(skb),
+ get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
@@ -1332,7 +1353,6 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
spin_lock_bh(&txq->lock);
list_add_tail(&bf->list, &txq->q);
- sc->tx_stats[txq->qnum].len++;
if (txq->link == NULL) /* is this first packet? */
ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
else /* no, so only link it */
@@ -1581,7 +1601,6 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
ath5k_txbuf_free(sc, bf);
spin_lock_bh(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock_bh(&sc->txbuflock);
@@ -2011,10 +2030,8 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
}
ieee80211_tx_status(sc->hw, skb);
- sc->tx_stats[txq->qnum].count++;
spin_lock(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock(&sc->txbuflock);
@@ -3116,17 +3133,6 @@ ath5k_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int
-ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct ath5k_softc *sc = hw->priv;
-
- memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats));
-
- return 0;
-}
-
static u64
ath5k_get_tsf(struct ieee80211_hw *hw)
{
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 952b3a21bbc..7e1a88a5abd 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -117,7 +117,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 0ea340fd071..83c7ea4c007 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -267,6 +267,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
+void ath9k_enable_ps(struct ath_softc *sc);
/********/
/* VIFs */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 422454fe4ff..d088ebfe63a 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -577,6 +577,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
u64 tsf;
int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ /* No need to configure beacon if we are not associated */
+ if (!common->curaid) {
+ ath_print(common, ATH_DBG_BEACON,
+ "STA is not yet associated..skipping beacon config\n");
+ return;
+ }
+
memset(&bs, 0, sizeof(bs));
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
@@ -739,7 +746,6 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
enum nl80211_iftype iftype;
/* Setup the beacon configuration parameters */
-
if (vif) {
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index f15fee76a4e..f00f5c744f4 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1217,6 +1217,17 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
/* As defined by IEEE 802.11-2007 17.3.8.6 */
slottime = ah->slottime + 3 * ah->coverage_class;
acktimeout = slottime + sifstime;
+
+ /*
+ * Workaround for early ACK timeouts, add an offset to match the
+ * initval's 64us ack timeout value.
+ * This was initially only meant to work around an issue with delayed
+ * BA frames in some implementations, but it has been found to fix ACK
+ * timeout issues in other cases as well.
+ */
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
+ acktimeout += 64 - sifstime - ah->slottime;
+
ath9k_hw_setslottime(ah, slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);
ath9k_hw_set_cts_timeout(ah, acktimeout);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 4b5e5484868..623c2f88498 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -622,7 +622,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT;
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6796d5cdc29..9c8f925c209 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -809,6 +809,7 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
clear_bit(key->hw_key_idx + 64, common->keymap);
if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
clear_bit(key->hw_key_idx + 32, common->keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
}
@@ -1492,6 +1493,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&sc->mutex);
}
+void ath9k_enable_ps(struct ath_softc *sc)
+{
+ sc->ps_enabled = true;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ sc->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->imask);
+ }
+ }
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+}
+
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_wiphy *aphy = hw->priv;
@@ -1546,22 +1560,13 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
sc->ps_flags |= PS_ENABLED;
- if (!(ah->caps.hw_caps &
- ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
- }
- }
/*
* At this point we know hardware has received an ACK
* of a previously sent null data frame.
*/
if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ ath9k_enable_ps(sc);
}
} else {
sc->ps_enabled = false;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3c790a4f38f..47294f90bbe 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1610,7 +1610,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf) && !is_pae(skb))
+ if (conf_is_ht(&hw->conf))
bf->bf_state.bf_type |= BUF_HT;
bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1696,7 +1696,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
goto tx_done;
}
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
/*
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
@@ -2048,10 +2048,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->ps_flags & PS_ENABLED)) {
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
- } else
+ if ((sc->ps_flags & PS_ENABLED))
+ ath9k_enable_ps(sc);
+ else
sc->ps_flags |= PS_NULLFUNC_COMPLETED;
}
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 039ac490465..04abd1f556b 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -110,8 +110,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = {
static inline bool is_wwr_sku(u16 regd)
{
- return ((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
- (regd == WORLD);
+ return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
+ (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
+ (regd == WORLD));
}
static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 54d6085a887..6a6ab0f630e 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
#define B43_MMIO_RNG 0x65A
+#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
#define B43_MMIO_POWERUP_DELAY 0x6A8
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 615af22c49f..be7abf8916a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1369,7 +1369,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "DMA tx mapping failure\n");
goto out;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1500,22 +1499,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_dmaring *ring;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = select_ring_by_priority(dev, i);
-
- stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
- stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
- stats[i].count = ring->nr_tx_packets;
- }
-}
-
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
const struct b43_dma_ops *ops = ring->ops;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index f7ab37c4cdb..dc91944d602 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -228,8 +228,6 @@ struct b43_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -278,9 +276,6 @@ void b43_dma_free(struct b43_wldev *dev);
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43_dma_tx(struct b43_wldev *dev,
struct sk_buff *skb);
void b43_dma_handle_txstatus(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 316a913860d..aa33d741e5e 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -637,10 +637,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
{
/* slot_time is in usec. */
- if (dev->phy.type != B43_PHYTYPE_G)
+ /* This test used to exit for all but a G PHY. */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
return;
- b43_write16(dev, 0x684, 510 + slot_time);
- b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
+ /* Shared memory location 0x0010 is the slot time and should be
+ * set to slot_time; however, this register is initially 0 and changing
+ * the value adversely affects the transmit rate for BCM4311
+ * devices. Until this behavior is unterstood, delete this step
+ *
+ * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ */
}
static void b43_short_slot_timing_enable(struct b43_wldev *dev)
@@ -3349,27 +3356,6 @@ out_unlock:
return err;
}
-static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43_wl *wl = hw_to_b43_wl(hw);
- struct b43_wldev *dev;
- int err = -ENODEV;
-
- mutex_lock(&wl->mutex);
- dev = wl->current_dev;
- if (dev && b43_status(dev) >= B43_STAT_STARTED) {
- if (b43_using_pio_transfers(dev))
- b43_pio_get_tx_stats(dev, stats);
- else
- b43_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- mutex_unlock(&wl->mutex);
-
- return err;
-}
-
static int b43_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -3980,6 +3966,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43_set_status(dev, B43_STAT_STARTED);
/* Start data flow (TX/RX). */
@@ -4389,8 +4376,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ieee80211_wake_queues(dev->wl->hw);
- ieee80211_wake_queues(dev->wl->hw);
-
b43_set_status(dev, B43_STAT_INITIALIZED);
out:
@@ -4596,7 +4581,6 @@ static const struct ieee80211_ops b43_hw_ops = {
.set_key = b43_op_set_key,
.update_tkip_key = b43_op_update_tkip_key,
.get_stats = b43_op_get_stats,
- .get_tx_stats = b43_op_get_tx_stats,
.get_tsf = b43_op_get_tsf,
.set_tsf = b43_op_set_tsf,
.start = b43_op_start,
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 6392da25efe..795bb1e3345 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -68,6 +68,10 @@ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
u8 *events, u8 *delays, u8 length);
static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
enum b43_nphy_rf_sequence seq);
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off);
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core);
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -498,8 +502,8 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
}
- /* TODO: Call N PHY RF Ctrl Intc Override with 2, 0, 3 as arguments */
- /* TODO: Call N PHY RF Intc Override with 8, 0, 3, 0 as arguments */
+ b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+ b43_nphy_rf_control_override(dev, 8, 0, 3, false);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
if (core == 0) {
@@ -509,9 +513,8 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
rxval = 4;
txval = 2;
}
-
- /* TODO: Call N PHY RF Ctrl Intc Override with 1, rxval, (core + 1) */
- /* TODO: Call N PHY RF Ctrl Intc Override with 1, txval, (2 - core) */
+ b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+ b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
@@ -714,6 +717,67 @@ static void b43_nphy_stop_playback(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ unsigned int channel;
+ int tone[2] = { 57, 58 };
+ u32 noise[2] = { 0x3FF, 0x3FF };
+
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ /* FIXME: channel = radio_chanspec */
+
+ if (nphy->gband_spurwar_en) {
+ /* TODO: N PHY Adjust Analog Pfbw (7) */
+ if (channel == 11 && dev->phy.is_40mhz)
+ ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ /* TODO: N PHY Adjust CRS Min Power (0x1E) */
+ }
+
+ if (nphy->aband_spurwar_en) {
+ if (channel == 54) {
+ tone[0] = 0x20;
+ noise[0] = 0x25F;
+ } else if (channel == 38 || channel == 102 || channel == 118) {
+ if (0 /* FIXME */) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+ } else if (channel == 134) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else if (channel == 151) {
+ tone[0] = 0x10;
+ noise[0] = 0x23F;
+ } else if (channel == 153 || channel == 161) {
+ tone[0] = 0x30;
+ noise[0] = 0x23F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+
+ if (!tone[0] && !noise[0])
+ ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
{
@@ -953,6 +1017,33 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 0);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
+static int b43_nphy_load_samples(struct b43_wldev *dev,
+ struct b43_c32 *samples, u16 len) {
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 i;
+ u32 *data;
+
+ data = kzalloc(len * sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ b43err(dev->wl, "allocation for samples loading failed\n");
+ return -ENOMEM;
+ }
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ for (i = 0; i < len; i++) {
+ data[i] = (samples[i].i & 0x3FF << 10);
+ data[i] |= samples[i].q & 0x3FF;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data);
+
+ kfree(data);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+ return 0;
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
bool test)
@@ -978,6 +1069,10 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
}
samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
+ if (!samples) {
+ b43err(dev->wl, "allocation for samples generation failed\n");
+ return 0;
+ }
rot = (((freq * 36) / bw) << 16) / 100;
angle = 0;
@@ -988,9 +1083,9 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
samples[i].i = CORDIC_CONVERT(samples[i].i * max);
}
- /* TODO: Call N PHY Load Sample Table with buffer, len as arguments */
+ i = b43_nphy_load_samples(dev, samples, len);
kfree(samples);
- return len;
+ return (i < 0) ? 0 : len;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
@@ -1264,6 +1359,104 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
}
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core)
+{
+ u8 i, j;
+ u16 reg, tmp, val;
+
+ B43_WARN_ON(dev->phy.rev < 3);
+ B43_WARN_ON(field > 4);
+
+ for (i = 0; i < 2; i++) {
+ if ((core == 1 && i == 1) || (core == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+ b43_phy_mask(dev, reg, 0xFBFF);
+
+ switch (field) {
+ case 0:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case 1:
+ if (!i) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_RXTX);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE);
+ }
+ break;
+ case 2:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0020;
+ val = value << 5;
+ } else {
+ tmp = 0x0010;
+ val = value << 4;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 3:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 4:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ }
+ }
+}
+
static void b43_nphy_bphy_init(struct b43_wldev *dev)
{
unsigned int i;
@@ -2161,9 +2354,9 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- /* TODO: Call N PHY RF Ctrl Intc Override with 2, 1, 3 */
- /* TODO: Call N PHY RF Ctrl Intc Override with 1, 2, 1 */
- /* TODO: Call N PHY RF Ctrl Intc Override with 1, 8, 2 */
+ b43_nphy_rf_control_intc_override(dev, 2, 1, 3);
+ b43_nphy_rf_control_intc_override(dev, 1, 2, 1);
+ b43_nphy_rf_control_intc_override(dev, 1, 8, 2);
regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
@@ -2194,6 +2387,55 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
}
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
+static void b43_nphy_save_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+ u16 *txcal_radio_regs = NULL;
+ u8 *iqcal_chanspec;
+ u16 *table = NULL;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_2G;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ } else {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_5G;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ }
+
+ b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs);
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x2021);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0x2022);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x3021);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0x3022);
+ txcal_radio_regs[4] = b43_radio_read(dev, 0x2023);
+ txcal_radio_regs[5] = b43_radio_read(dev, 0x2024);
+ txcal_radio_regs[6] = b43_radio_read(dev, 0x3023);
+ txcal_radio_regs[7] = b43_radio_read(dev, 0x3024);
+ } else {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x8B);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0xBA);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x8D);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0xBC);
+ }
+ *iqcal_chanspec = nphy->radio_chanspec;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 8, table);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
static void b43_nphy_restore_cal(struct b43_wldev *dev)
{
@@ -2486,6 +2728,39 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
return error;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
+static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 buffer[7];
+ bool equal = true;
+
+ if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
+ return;
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+ for (i = 0; i < 4; i++) {
+ if (buffer[i] != nphy->txiqlocal_bestc[i]) {
+ equal = false;
+ break;
+ }
+ }
+
+ if (!equal) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4,
+ nphy->txiqlocal_bestc);
+ for (i = 0; i < 4; i++)
+ buffer[i] = 0;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ &nphy->txiqlocal_bestc[5]);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ &nphy->txiqlocal_bestc[5]);
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
struct nphy_txgains target, u8 type, bool debug)
@@ -2516,7 +2791,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
b43_nphy_stay_in_carrier_search(dev, 1);
if (dev->phy.rev < 2)
- ;/* TODO: Call N PHY Reapply TX Cal Coeffs */
+ b43_nphy_reapply_tx_cal_coeffs(dev);
b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
for (i = 0; i < 2; i++) {
b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
@@ -2858,7 +3133,7 @@ int b43_phy_initn(struct b43_wldev *dev)
if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
- ;/* Call N PHY Save Cal */
+ b43_nphy_save_cal(dev);
else if (nphy->mphase_cal_phase_id == 0)
;/* N PHY Periodic Calibration with argument 3 */
} else {
@@ -2872,7 +3147,8 @@ int b43_phy_initn(struct b43_wldev *dev)
if (phy->rev >= 3 && phy->rev <= 6)
b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
b43_nphy_tx_lp_fbw(dev);
- /* TODO N PHY Spur Workaround */
+ if (phy->rev >= 3)
+ b43_nphy_spur_workaround(dev);
b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index ae82f0fc209..403aad3f894 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -975,6 +975,7 @@ struct b43_phy_n {
u16 papd_epsilon_offset[2];
s32 preamble_override;
u32 bb_mult_save;
+ u16 radio_chanspec;
bool gain_boost;
bool elna_gain_config;
@@ -1001,6 +1002,9 @@ struct b43_phy_n {
u16 classifier_state;
u16 clip_state[2];
+ bool aband_spurwar_en;
+ bool gband_spurwar_en;
+
bool ipa2g_on;
u8 iqcal_chanspec_2G;
u8 rssical_chanspec_2G;
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index c01b8e02412..a6062c3e89a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -559,7 +559,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "PIO transmission failure\n");
goto out;
}
- q->nr_tx_packets++;
B43_WARN_ON(q->buffer_used > q->buffer_size);
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
@@ -605,22 +604,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_pio_txqueue *q;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- q = select_queue_by_priority(dev, i);
-
- stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
- stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
- stats[i].count = q->nr_tx_packets;
- }
-}
-
/* Returns whether we should fetch another frame. */
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
{
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7b3c42f93a1..1e516147424 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -90,9 +90,6 @@ struct b43_pio_txqueue {
struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS];
struct list_head packets_list;
- /* Total number of transmitted packets. */
- unsigned int nr_tx_packets;
-
/* Shortcut to the 802.11 core revision. This is to
* avoid horrible pointer dereferencing in the fastpaths. */
u8 rev;
@@ -160,8 +157,6 @@ void b43_pio_free(struct b43_wldev *dev);
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
void b43_pio_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43_pio_rx(struct b43_pio_rxqueue *q);
void b43_pio_tx_suspend(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 0a86bdf5315..8b9387c6ff3 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1411,7 +1411,6 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1527,25 +1526,6 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
spin_unlock(&ring->lock);
}
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43legacy_dmaring *ring;
- unsigned long flags;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = priority_to_txring(dev, i);
-
- spin_lock_irqsave(&ring->lock, flags);
- stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
- stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
- stats[i].count = ring->nr_tx_packets;
- spin_unlock_irqrestore(&ring->lock, flags);
- }
-}
-
static void dma_rx(struct b43legacy_dmaring *ring,
int *slot)
{
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 2f186003c31..f9681041c2d 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -243,8 +243,6 @@ struct b43legacy_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -292,9 +290,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
@@ -315,11 +310,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
{
}
static inline
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 874a64a6c61..1d070be5a67 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2446,29 +2446,6 @@ static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
return 0;
}
-static int b43legacy_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- unsigned long flags;
- int err = -ENODEV;
-
- if (!dev)
- goto out;
- spin_lock_irqsave(&wl->irq_lock, flags);
- if (likely(b43legacy_status(dev) >= B43legacy_STAT_STARTED)) {
- if (b43legacy_using_pio(dev))
- b43legacy_pio_get_tx_stats(dev, stats);
- else
- b43legacy_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- spin_unlock_irqrestore(&wl->irq_lock, flags);
-out:
- return err;
-}
-
static int b43legacy_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2923,6 +2900,7 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
goto out;
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_STARTED);
/* Start data flow (TX/RX) */
@@ -3343,6 +3321,7 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
b43legacy_leds_init(dev);
@@ -3511,7 +3490,6 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.bss_info_changed = b43legacy_op_bss_info_changed,
.configure_filter = b43legacy_op_configure_filter,
.get_stats = b43legacy_op_get_stats,
- .get_tx_stats = b43legacy_op_get_tx_stats,
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 51866c9a276..017c0e9c37e 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -477,7 +477,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
list_move_tail(&packet->list, &queue->txqueue);
queue->nr_txfree--;
- queue->nr_tx_packets++;
B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
tasklet_schedule(&queue->txtask);
@@ -546,18 +545,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
tasklet_schedule(&queue->txtask);
}
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_pio *pio = &dev->pio;
- struct b43legacy_pioqueue *queue;
-
- queue = pio->queue1;
- stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
- stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
- stats[0].count = queue->nr_tx_packets;
-}
-
static void pio_rx_error(struct b43legacy_pioqueue *queue,
int clear_buffers,
const char *error)
diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h
index 464fec05a06..8e6773ea6e7 100644
--- a/drivers/net/wireless/b43legacy/pio.h
+++ b/drivers/net/wireless/b43legacy/pio.h
@@ -74,10 +74,6 @@ struct b43legacy_pioqueue {
* posted to the device. We are waiting for the txstatus.
*/
struct list_head txrunning;
- /* Total number or packets sent.
- * (This counter can obviously wrap).
- */
- unsigned int nr_tx_packets;
struct tasklet_struct txtask;
struct b43legacy_pio_txpacket
tx_packets_cache[B43legacy_PIO_MAXTXPACKETS];
@@ -106,8 +102,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status);
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
/* Suspend TX queue in hardware. */
@@ -140,11 +134,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
{
}
static inline
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
{
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 9d1820676f3..694ceef8859 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -247,6 +247,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl1000_bg_cfg = {
@@ -274,6 +275,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6d598890134..f3d662c8cbc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -179,14 +179,24 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
data->delta_gain_code[i] = 0;
continue;
}
- delta_g = (1000 * ((s32)average_noise[default_chain] -
+
+ delta_g = (priv->cfg->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
+
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
- /* set negative sign */
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
data->delta_gain_code[i] |= (1 << 2);
}
@@ -1587,6 +1597,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1612,6 +1623,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_abg_cfg = {
@@ -1635,6 +1647,7 @@ struct iwl_cfg iwl5100_abg_cfg = {
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_agn_cfg = {
@@ -1660,6 +1673,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5350_agn_cfg = {
@@ -1685,6 +1699,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_agn_cfg = {
@@ -1710,6 +1725,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_abg_cfg = {
@@ -1733,6 +1749,7 @@ struct iwl_cfg iwl5150_abg_cfg = {
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index a9f8551e0e4..782e23a2698 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -277,21 +277,6 @@ static const struct iwl_ops iwl6000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
- .get_hcmd_size = iwl5000_get_hcmd_size,
- .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
- .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
- .calc_rssi = iwl5000_calc_rssi,
-};
-
-static const struct iwl_ops iwl6050_ops = {
- .ucode = &iwl5000_ucode,
- .lib = &iwl6000_lib,
- .hcmd = &iwl5000_hcmd,
- .utils = &iwl6050_hcmd_utils,
- .led = &iwlagn_led_ops,
-};
-
/*
* "i": Internal configuration, use internal Power Amplifier
*/
@@ -324,6 +309,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -354,6 +340,7 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -384,6 +371,7 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6050_2agn_cfg = {
@@ -392,7 +380,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .ops = &iwl6050_ops,
+ .ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
@@ -415,6 +403,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@@ -423,7 +412,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G,
- .ops = &iwl6050_ops,
+ .ops = &iwl6000_ops,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.eeprom_ver = EEPROM_6050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
@@ -445,6 +434,7 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6000_3agn_cfg = {
@@ -476,6 +466,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index d0268280d67..1854c720b5e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -2631,7 +2631,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
*/
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX + 1;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
@@ -3440,7 +3440,6 @@ static struct ieee80211_ops iwl_hw_ops = {
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
.get_stats = iwl_mac_get_stats,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 02bf17ecaf5..d390eef2efe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2787,6 +2787,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
if ((le16_to_cpu(priv->staging_rxon.channel) != ch))
priv->staging_rxon.flags = 0;
+ iwl_set_rxon_ht(priv, ht_conf);
iwl_set_rxon_channel(priv, conf->channel);
iwl_set_flags_for_band(priv, conf->channel->band);
@@ -2850,42 +2851,6 @@ out:
}
EXPORT_SYMBOL(iwl_mac_config);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
- int i, avail;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < AC_NUM; i++) {
- txq = &priv->txq[i];
- q = &txq->q;
- avail = iwl_queue_space(q);
-
- stats[i].len = q->n_window - avail;
- stats[i].limit = q->n_window - q->high_mark;
- stats[i].count = q->n_window;
-
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_mac_get_tx_stats);
-
void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index ec1fe1d7cc9..8f0c564e68b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -293,6 +293,7 @@ struct iwl_cfg {
bool support_ct_kill_exit;
const bool support_wimax_coexist;
u8 plcp_delta_threshold;
+ s32 chain_noise_scale;
};
/***************************
@@ -341,8 +342,6 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwl_config_ap(struct iwl_priv *priv);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 5df66382d92..0f718f6df5f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1012,7 +1012,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
if (ieee80211_is_mgmt(fc) ||
ieee80211_has_protected(fc) ||
ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+ le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
+ (ieee80211_is_data_qos(fc) &&
+ *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
ret = skb_linearize(skb);
else
ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 08faafae849..f786a407638 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -651,9 +651,20 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (left < 0)
return 0;
*pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
+ if (!priv->is_internal_short_scan &&
+ priv->scan_request->n_ssids) {
+ struct cfg80211_ssid *ssid =
+ priv->scan_request->ssids;
+
+ /* Broadcast if ssid_len is 0 */
+ *pos++ = ssid->ssid_len;
+ memcpy(pos, ssid->ssid, ssid->ssid_len);
+ pos += ssid->ssid_len;
+ len += 2 + ssid->ssid_len;
+ } else {
+ *pos++ = 0;
+ len += 2;
+ }
if (WARN_ON(left < ie_len))
return len;
@@ -782,20 +793,26 @@ static void iwl_bg_request_scan(struct work_struct *data)
if (priv->is_internal_short_scan) {
IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
} else if (priv->scan_request->n_ssids) {
- int i, p = 0;
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
+ /*
+ * The first SSID to scan is stuffed into the probe request
+ * template and the remaining ones are handled through the
+ * direct_scan array.
+ */
+ if (priv->scan_request->n_ssids > 1) {
+ int i, p = 0;
+ for (i = 1; i < priv->scan_request->n_ssids; i++) {
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
}
is_active = true;
} else
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 119da54116d..eac2b9a9571 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -3835,7 +3835,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.config = iwl_mac_config,
.configure_filter = iwl_configure_filter,
.set_key = iwl3945_mac_set_key,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index d32adeab68a..ad8f7eabb5a 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
}
bss->bss = kzalloc(bss_len, GFP_KERNEL);
- if (!bss) {
+ if (!bss->bss) {
kfree(bss);
IWM_ERR(iwm, "Couldn't allocate bss\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 0334a58820e..e7470442f76 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -240,11 +240,6 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
/* Now we got response from FW, cancel the command timer */
del_timer(&priv->command_timer);
priv->cmd_timed_out = 0;
- if (priv->nr_retries) {
- lbs_pr_info("Received result %x to command %x after %d retries\n",
- result, curcmd, priv->nr_retries);
- priv->nr_retries = 0;
- }
/* Store the response code to cur_cmd_retcode. */
priv->cur_cmd_retcode = result;
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index c348aff8f30..6977ee82021 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -109,7 +109,6 @@ struct lbs_private {
struct list_head cmdpendingq; /* pending command buffers */
wait_queue_head_t cmd_pending;
struct timer_list command_timer;
- int nr_retries;
int cmd_timed_out;
/* Command responses sent from the hardware to the driver */
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index bf4bfbae622..3ea03f259ee 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/semaphore.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 60bde1233a3..cd8ed7fdafa 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -536,31 +536,14 @@ static int lbs_thread(void *data)
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
- if (++priv->nr_retries > 3) {
- lbs_pr_info("Excessive timeouts submitting "
- "command 0x%04x\n",
- le16_to_cpu(cmdnode->cmdbuf->command));
- lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
- priv->nr_retries = 0;
- if (priv->reset_card)
- priv->reset_card(priv);
- } else {
- priv->cur_cmd = NULL;
- priv->dnld_sent = DNLD_RES_RECEIVED;
- lbs_pr_info("requeueing command 0x%04x due "
- "to timeout (#%d)\n",
- le16_to_cpu(cmdnode->cmdbuf->command),
- priv->nr_retries);
-
- /* Stick it back at the _top_ of the pending queue
- for immediate resubmission */
- list_add(&cmdnode->list, &priv->cmdpendingq);
- }
+ lbs_pr_info("Timeout submitting command 0x%04x\n",
+ le16_to_cpu(cmdnode->cmdbuf->command));
+ lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
+ if (priv->reset_card)
+ priv->reset_card(priv);
}
priv->cmd_timed_out = 0;
-
-
if (!priv->fw_ready)
continue;
@@ -732,7 +715,7 @@ done:
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
-static void command_timer_fn(unsigned long data)
+static void lbs_cmd_timeout_handler(unsigned long data)
{
struct lbs_private *priv = (struct lbs_private *)data;
unsigned long flags;
@@ -851,7 +834,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
mutex_init(&priv->lock);
- setup_timer(&priv->command_timer, command_timer_fn,
+ setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
(unsigned long)priv);
setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
(unsigned long)priv);
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ba3eb0101d5..6ab30033c26 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -555,6 +555,9 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
SET_IEEE80211_DEV(hw, dmdev);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0dbda8dfbd9..00ffe6dd435 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -32,6 +32,10 @@ static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
+static bool fake_hw_scan;
+module_param(fake_hw_scan, bool, 0444);
+MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -908,8 +912,43 @@ static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
*/
}
+struct hw_scan_done {
+ struct delayed_work w;
+ struct ieee80211_hw *hw;
+};
-static const struct ieee80211_ops mac80211_hwsim_ops =
+static void hw_scan_done(struct work_struct *work)
+{
+ struct hw_scan_done *hsd =
+ container_of(work, struct hw_scan_done, w.work);
+
+ ieee80211_scan_completed(hsd->hw, false);
+ kfree(hsd);
+}
+
+static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
+{
+ struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
+ int i;
+
+ if (!hsd)
+ return -ENOMEM;
+
+ hsd->hw = hw;
+ INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
+
+ printk(KERN_DEBUG "hwsim scan request\n");
+ for (i = 0; i < req->n_channels; i++)
+ printk(KERN_DEBUG "hwsim scan freq %d\n",
+ req->channels[i]->center_freq);
+
+ ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
+
+ return 0;
+}
+
+static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
@@ -1119,6 +1158,9 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
+ if (fake_hw_scan)
+ mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index f0f08f3919c..0cfdb9db66f 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -119,7 +119,7 @@ struct mwl8k_tx_queue {
/* sw appends here */
int tail;
- struct ieee80211_tx_queue_stats stats;
+ unsigned int len;
struct mwl8k_tx_desc *txd;
dma_addr_t txd_dma;
struct sk_buff **skb;
@@ -1136,8 +1136,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
int size;
int i;
- memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
- txq->stats.limit = MWL8K_TX_DESCS;
+ txq->len = 0;
txq->head = 0;
txq->tail = 0;
@@ -1213,7 +1212,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d "
"fw_owned=%d drv_owned=%d unused=%d\n",
wiphy_name(hw->wiphy), i,
- txq->stats.len, txq->head, txq->tail,
+ txq->len, txq->head, txq->tail,
fw_owned, drv_owned, unused);
}
}
@@ -1299,7 +1298,7 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
int processed;
processed = 0;
- while (txq->stats.len > 0 && limit--) {
+ while (txq->len > 0 && limit--) {
int tx;
struct mwl8k_tx_desc *tx_desc;
unsigned long addr;
@@ -1321,8 +1320,8 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
}
txq->head = (tx + 1) % MWL8K_TX_DESCS;
- BUG_ON(txq->stats.len == 0);
- txq->stats.len--;
+ BUG_ON(txq->len == 0);
+ txq->len--;
priv->pending_tx_pkts--;
addr = le32_to_cpu(tx_desc->pkt_phys_addr);
@@ -1454,8 +1453,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
wmb();
tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
- txq->stats.count++;
- txq->stats.len++;
+ txq->len++;
priv->pending_tx_pkts++;
txq->tail++;
@@ -3818,24 +3816,6 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
return rc;
}
-static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_tx_queue *txq;
- int index;
-
- spin_lock_bh(&priv->tx_lock);
- for (index = 0; index < MWL8K_TX_QUEUES; index++) {
- txq = priv->txq + index;
- memcpy(&stats[index], &txq->stats,
- sizeof(struct ieee80211_tx_queue_stats));
- }
- spin_unlock_bh(&priv->tx_lock);
-
- return 0;
-}
-
static int mwl8k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -3871,7 +3851,6 @@ static const struct ieee80211_ops mwl8k_ops = {
.set_rts_threshold = mwl8k_set_rts_threshold,
.sta_notify = mwl8k_sta_notify,
.conf_tx = mwl8k_conf_tx,
- .get_tx_stats = mwl8k_get_tx_stats,
.get_stats = mwl8k_get_stats,
.ampdu_action = mwl8k_ampdu_action,
};
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 753a1804eee..a9e9cea2d76 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1668,12 +1668,12 @@ __orinoco_set_multicast_list(struct net_device *dev)
/* The Hermes doesn't seem to have an allmulti mode, so we go
* into promiscuous mode and let the upper levels deal. */
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > MAX_MULTICAST(priv))) {
+ (netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
promisc = 1;
mc_count = 0;
} else {
promisc = 0;
- mc_count = dev->mc_count;
+ mc_count = netdev_mc_count(dev);
}
err = __orinoco_hw_set_multicast_list(priv, dev->mc_list, mc_count,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 26428e4c9c6..3fe6366e567 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -358,16 +358,6 @@ static int p54_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int p54_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct p54_common *priv = dev->priv;
-
- memcpy(stats, &priv->tx_stats[P54_QUEUE_DATA],
- sizeof(stats[0]) * dev->queues);
- return 0;
-}
-
static void p54_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -522,7 +512,6 @@ static const struct ieee80211_ops p54_ops = {
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
.get_stats = p54_get_stats,
- .get_tx_stats = p54_get_tx_stats
};
struct ieee80211_hw *p54_init_common(size_t priv_data_len)
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 1afc39410e8..43a3b2ead81 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -157,6 +157,12 @@ struct p54_led_dev {
#endif /* CONFIG_P54_LEDS */
+struct p54_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
+};
+
struct p54_common {
struct ieee80211_hw *hw;
struct ieee80211_vif *vif;
@@ -183,7 +189,7 @@ struct p54_common {
/* (e)DCF / QOS state */
bool use_short_slot;
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[8];
+ struct p54_tx_queue_stats tx_stats[8];
struct p54_edcf_queue_param qos_params[8];
/* Radio data */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index b6dda2b27fb..0e8f69461ff 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -183,7 +183,7 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
struct sk_buff *skb,
const u16 p54_queue)
{
- struct ieee80211_tx_queue_stats *queue;
+ struct p54_tx_queue_stats *queue;
unsigned long flags;
if (WARN_ON(p54_queue > P54_QUEUE_NUM))
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88e1e4e32b2..85905cab4f1 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1950,7 +1950,7 @@ static void set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
ray_update_multi_list(dev, 1);
else {
- if (local->num_multi != dev->mc_count)
+ if (local->num_multi != netdev_mc_count(dev))
ray_update_multi_list(dev, 0);
}
} /* end set_multicast_list */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 305c106fdc1..14692bc51b5 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1492,10 +1492,10 @@ static void set_multicast_list(struct usbnet *usbdev)
filter |= RNDIS_PACKET_TYPE_PROMISCUOUS |
RNDIS_PACKET_TYPE_ALL_LOCAL;
} else if (usbdev->net->flags & IFF_ALLMULTI ||
- usbdev->net->mc_count > priv->multicast_size) {
+ netdev_mc_count(usbdev->net) > priv->multicast_size) {
filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
- } else if (usbdev->net->mc_count > 0) {
- size = min(priv->multicast_size, usbdev->net->mc_count);
+ } else if (!netdev_mc_empty(usbdev->net)) {
+ size = min(priv->multicast_size, netdev_mc_count(usbdev->net));
buf = kmalloc(size * ETH_ALEN, GFP_KERNEL);
if (!buf) {
devwarn(usbdev,
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index aa579eb8723..108982762d4 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1561,7 +1561,6 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2400pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2400pci_get_tsf,
.tx_last_beacon = rt2400pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 77ee1df7933..f6440bb0e5f 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1859,7 +1859,6 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2500pci_get_tsf,
.tx_last_beacon = rt2500pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9e6f865c57f..81ca4ec068d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1761,7 +1761,6 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 529a37364eb..a45e027f2d1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -2297,7 +2297,6 @@ const struct ieee80211_ops rt2800_mac80211_ops = {
.set_rts_threshold = rt2800_set_rts_threshold,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2800_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 096da85a66f..43b70c6e4e9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1044,8 +1044,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 00f1f939f1b..abbd857ec75 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -555,22 +555,6 @@ int rt2x00mac_get_stats(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
- unsigned int i;
-
- for (i = 0; i < rt2x00dev->ops->tx_queues; i++) {
- stats[i].len = rt2x00dev->tx[i].length;
- stats[i].limit = rt2x00dev->tx[i].limit;
- stats[i].count = rt2x00dev->tx[i].count;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_get_tx_stats);
-
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 1f97a797bc4..74de53e68b4 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2730,7 +2730,6 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt61pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt61pci_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index a0269129439..3781eb7b4aa 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2245,7 +2245,6 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt73usb_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt73usb_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index a0538255778..0fb850e0c65 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
/* Sitecom */
{USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+ {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
/* Sphairon Access Systems GmbH */
{USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
/* Dick Smith Electronics */
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index a717dde4822..24ae6a360ac 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -1144,9 +1144,10 @@ static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
if (ret < 0)
goto out;
+ /* mac80211 uses units of 32 usec */
ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
params->cw_min, params->cw_max,
- params->aifs, params->txop);
+ params->aifs, params->txop * 32);
if (ret < 0)
goto out_sleep;
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 33c8be7ec8e..5d2b52f4717 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -879,16 +879,15 @@ static void zd1201_set_multicast(struct net_device *dev)
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
- if (dev->mc_count > ZD1201_MAXMULTI)
+ if (netdev_mc_count(dev) > ZD1201_MAXMULTI)
return;
- for (i=0; i<dev->mc_count; i++) {
+ for (i=0; i<netdev_mc_count(dev); i++) {
memcpy(reqbuf+i*ETH_ALEN, mc->dmi_addr, ETH_ALEN);
mc = mc->next;
}
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
- dev->mc_count*ETH_ALEN, 0);
-
+ netdev_mc_count(dev) * ETH_ALEN, 0);
}
static int zd1201_config_commit(struct net_device *dev,
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index f7fe1aa03b4..1a74594224b 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -22,11 +22,17 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
+#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
+#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
+#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
#define XEL_TSR_OFFSET 0x07FC /* Tx status */
#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
@@ -37,6 +43,22 @@
#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
+/* MDIO Address Register Bit Masks */
+#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
+#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
+#define XEL_MDIOADDR_PHYADR_SHIFT 5
+#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
+
+/* MDIO Write Data Register Bit Masks */
+#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
+
+/* MDIO Read Data Register Bit Masks */
+#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
+
+/* MDIO Control Register Bit Masks */
+#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
+#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
+
/* Global Interrupt Enable Register (GIER) Bit Masks */
#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
@@ -87,6 +109,12 @@
* @reset_lock: lock used for synchronization
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
+ * @phy_dev: pointer to the PHY device
+ * @phy_node: pointer to the PHY device node
+ * @mii_bus: pointer to the MII bus
+ * @mdio_irqs: IRQs table for MDIO bus
+ * @last_link: last link status
+ * @has_mdio: indicates whether MDIO is included in the HW
*/
struct net_local {
@@ -100,6 +128,15 @@ struct net_local {
spinlock_t reset_lock;
struct sk_buff *deferred_skb;
+
+ struct phy_device *phy_dev;
+ struct device_node *phy_node;
+
+ struct mii_bus *mii_bus;
+ int mdio_irqs[PHY_MAX_ADDR];
+
+ int last_link;
+ bool has_mdio;
};
@@ -431,7 +468,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
}
/**
- * xemaclite_set_mac_address - Set the MAC address for this device
+ * xemaclite_update_address - Update the MAC address in the device
* @drvdata: Pointer to the Emaclite device private data
* @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
*
@@ -441,8 +478,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
* The MAC address can be programmed using any of the two transmit
* buffers (if configured).
*/
-static void xemaclite_set_mac_address(struct net_local *drvdata,
- u8 *address_ptr)
+static void xemaclite_update_address(struct net_local *drvdata,
+ u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
@@ -465,6 +502,30 @@ static void xemaclite_set_mac_address(struct net_local *drvdata,
}
/**
+ * xemaclite_set_mac_address - Set the MAC address for this device
+ * @dev: Pointer to the network device instance
+ * @addr: Void pointer to the sockaddr structure
+ *
+ * This function copies the HW address from the sockaddr strucutre to the
+ * net_device structure and updates the address in HW.
+ *
+ * Return: Error if the net device is busy or 0 if the addr is set
+ * successfully
+ */
+static int xemaclite_set_mac_address(struct net_device *dev, void *address)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct sockaddr *addr = address;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ xemaclite_update_address(lp, dev->dev_addr);
+ return 0;
+}
+
+/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
*
@@ -641,12 +702,219 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/**********************/
+/* MDIO Bus functions */
+/**********************/
+
+/**
+ * xemaclite_mdio_wait - Wait for the MDIO to be ready to use
+ * @lp: Pointer to the Emaclite device private data
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request.
+ *
+ * Return: 0 for success or ETIMEDOUT for a timeout
+ */
+
+static int xemaclite_mdio_wait(struct net_local *lp)
+{
+ long end = jiffies + 2;
+
+ /* wait for the MDIO interface to not be busy or timeout
+ after some time.
+ */
+ while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ XEL_MDIOCTRL_MDIOSTS_MASK) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_read - Read from a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to read from
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request and then writes the phy address to the MDIO Address register
+ * and reads data from MDIO Read Data register, when its available.
+ *
+ * Return: Value read from the MII management register
+ */
+static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+ u32 rc;
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and set the OP bit in the
+ * MDIO Address register. Set the Status bit in the MDIO Control
+ * register to start a MDIO read transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+/**
+ * xemaclite_mdio_write - Write to a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to write to
+ * @val: value to write to the register number specified by reg
+ *
+ * This fucntion waits till the device is ready to accept a new MDIO
+ * request and then writes the val to the MDIO Write Data register.
+ */
+static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and clear the OP bit in the
+ * MDIO Address register and then write the value into the MDIO Write
+ * Data register. Finally, set the Status bit in the MDIO Control
+ * register to start a MDIO write transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ ~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_reset - Reset the mdio bus.
+ * @bus: Pointer to the MII bus
+ *
+ * This function is required(?) as per Documentation/networking/phy.txt.
+ * There is no reset in this device; this function always returns 0.
+ */
+static int xemaclite_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
+ * @lp: Pointer to the Emaclite device private data
+ * @ofdev: Pointer to OF device structure
+ *
+ * This function enables MDIO bus in the Emaclite device and registers a
+ * mii_bus.
+ *
+ * Return: 0 upon success or a negative error upon failure
+ */
+static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
+{
+ struct mii_bus *bus;
+ int rc;
+ struct resource res;
+ struct device_node *np = of_get_parent(lp->phy_node);
+
+ /* Don't register the MDIO bus if the phy_node or its parent node
+ * can't be found.
+ */
+ if (!np)
+ return -ENODEV;
+
+ /* Enable the MDIO bus by asserting the enable bit in MDIO Control
+ * register.
+ */
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ XEL_MDIOCTRL_MDIOEN_MASK);
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = lp;
+ bus->name = "Xilinx Emaclite MDIO";
+ bus->read = xemaclite_mdio_read;
+ bus->write = xemaclite_mdio_write;
+ bus->reset = xemaclite_mdio_reset;
+ bus->parent = dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+
+ lp->mii_bus = bus;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ mdiobus_free(bus);
+ return rc;
+}
+
+/**
+ * xemaclite_adjust_link - Link state callback for the Emaclite device
+ * @ndev: pointer to net_device struct
+ *
+ * There's nothing in the Emaclite device to be configured when the link
+ * state changes. We just print the status.
+ */
+void xemaclite_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+ int link_state;
+
+ /* hash together the state values to decide if something has changed */
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+
+ if (lp->last_link != link_state) {
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ }
+}
+
/**
* xemaclite_open - Open the network device
* @dev: Pointer to the network device
*
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
+ * It also connects to the phy device, if MDIO is included in Emaclite device.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -656,14 +924,47 @@ static int xemaclite_open(struct net_device *dev)
/* Just to be safe, stop the device first */
xemaclite_disable_interrupts(lp);
+ if (lp->phy_node) {
+ u32 bmcr;
+
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ xemaclite_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (!lp->phy_dev) {
+ dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ /* EmacLite doesn't support giga-bit speeds */
+ lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
+ lp->phy_dev->advertising = lp->phy_dev->supported;
+
+ /* Don't advertise 1000BASE-T Full/Half duplex speeds */
+ phy_write(lp->phy_dev, MII_CTRL1000, 0);
+
+ /* Advertise only 10 and 100mbps full/half duplex speeds */
+ phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+
+ /* Restart auto negotiation */
+ bmcr = phy_read(lp->phy_dev, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(lp->phy_dev, MII_BMCR, bmcr);
+
+ phy_start(lp->phy_dev);
+ }
+
/* Set the MAC address each time opened */
- xemaclite_set_mac_address(lp, dev->dev_addr);
+ xemaclite_update_address(lp, dev->dev_addr);
/* Grab the IRQ */
retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
if (retval) {
dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
dev->irq);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return retval;
}
@@ -682,6 +983,7 @@ static int xemaclite_open(struct net_device *dev)
*
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
+ * It also disconnects the phy device associated with the Emaclite device.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -691,6 +993,10 @@ static int xemaclite_close(struct net_device *dev)
xemaclite_disable_interrupts(lp);
free_irq(dev->irq, dev);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return 0;
}
@@ -754,42 +1060,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
}
/**
- * xemaclite_ioctl - Perform IO Control operations on the network device
- * @dev: Pointer to the network device
- * @rq: Pointer to the interface request structure
- * @cmd: IOCTL command
- *
- * The only IOCTL operation supported by this function is setting the MAC
- * address. An error is reported if any other operations are requested.
- *
- * Return: 0 to indicate success, or a negative error for failure.
- */
-static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
- struct hw_addr_data *hw_addr = (struct hw_addr_data *) &rq->ifr_hwaddr;
-
- switch (cmd) {
- case SIOCETHTOOL:
- return -EIO;
-
- case SIOCSIFHWADDR:
- dev_err(&lp->ndev->dev, "SIOCSIFHWADDR\n");
-
- /* Copy MAC address in from user space */
- copy_from_user((void __force *) dev->dev_addr,
- (void __user __force *) hw_addr,
- IFHWADDRLEN);
- xemaclite_set_mac_address(lp, dev->dev_addr);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-/**
* xemaclite_remove_ndev - Free the network device
* @ndev: Pointer to the network device to be freed
*
@@ -840,6 +1110,8 @@ static struct net_device_ops xemaclite_netdev_ops;
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
* address and registers the network device.
+ * It also registers a mii_bus for the Emaclite device, if MDIO is included
+ * in the device.
*
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
@@ -880,6 +1152,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
}
dev_set_drvdata(dev, ndev);
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
ndev->irq = r_irq.start;
ndev->mem_start = r_mem.start;
@@ -923,7 +1196,12 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
/* Set the MAC address in the EmacLite device */
- xemaclite_set_mac_address(lp, ndev->dev_addr);
+ xemaclite_update_address(lp, ndev->dev_addr);
+
+ lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
+ rc = xemaclite_mdio_setup(lp, &ofdev->dev);
+ if (rc)
+ dev_warn(&ofdev->dev, "error registering MDIO bus\n");
dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
@@ -968,12 +1246,25 @@ static int __devexit xemaclite_of_remove(struct of_device *of_dev)
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+
+ /* Un-register the mii_bus, if configured */
+ if (lp->has_mdio) {
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+ }
+
unregister_netdev(ndev);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+
release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
xemaclite_remove_ndev(ndev);
-
dev_set_drvdata(dev, NULL);
return 0;
@@ -983,7 +1274,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
- .ndo_do_ioctl = xemaclite_ioctl,
+ .ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
.ndo_get_stats = xemaclite_get_stats,
};
@@ -995,6 +1286,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
+ { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 8b231b30fd1..5c880240a64 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1301,15 +1301,16 @@ static void set_rx_mode(struct net_device *dev)
iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
iowrite16(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 64) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
struct dev_mc_list *mclist;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
unsigned int bit;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b2a448e19fe..3e5ab2bf6a5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -706,6 +706,21 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_sriov_migration);
+/**
+ * pci_num_vf - return number of VFs associated with a PF device_release_driver
+ * @dev: the PCI device
+ *
+ * Returns number of VFs, or 0 if SR-IOV is not enabled.
+ */
+int pci_num_vf(struct pci_dev *dev)
+{
+ if (!dev || !dev->is_physfn)
+ return 0;
+ else
+ return dev->sriov->nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(pci_num_vf);
+
static int ats_alloc_one(struct pci_dev *dev, int ps)
{
int pos;
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 3c6feed46f6..97efce184a8 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -270,7 +270,6 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
set_irq(dev, irq++);
}
break;
- /* fallthrough */
case SSB_DEV_PCI:
case SSB_DEV_ETHERNET:
case SSB_DEV_ETHERNET_GBIT:
@@ -281,6 +280,10 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
set_irq(dev, irq++);
break;
}
+ /* fallthrough */
+ case SSB_DEV_EXTIF:
+ set_irq(dev, 0);
+ break;
}
}
ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n");
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5681ebed9c6..03dfd27c4bf 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
#endif
break;
case SSB_BUSTYPE_SDIO:
-#ifdef CONFIG_SSB_SDIO
- sdev->irq = bus->host_sdio->dev.irq;
+#ifdef CONFIG_SSB_SDIOHOST
dev->parent = &bus->host_sdio->dev;
#endif
break;
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index 24d97b4fa6f..bc1fad24895 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -444,11 +444,11 @@ void et131x_multicast(struct net_device *netdev)
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count > NIC_MAX_MCAST_LIST) {
+ if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) {
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count < 1) {
+ if (netdev_mc_count(netdev) < 1) {
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
} else {
@@ -456,10 +456,10 @@ void et131x_multicast(struct net_device *netdev)
}
/* Set values in the private adapter struct */
- adapter->MCAddressCount = netdev->mc_count;
+ adapter->MCAddressCount = netdev_mc_count(netdev);
- if (netdev->mc_count) {
- count = netdev->mc_count - 1;
+ if (!netdev_mc_empty(netdev)) {
+ count = netdev_mc_count(netdev) - 1;
memcpy(adapter->MCList[count], mclist->dmi_addr, ETH_ALEN);
}
diff --git a/drivers/staging/netwave/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index e61e6b9440a..e936717d1f4 100644
--- a/drivers/staging/netwave/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
@@ -1341,15 +1341,15 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
xstatic int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
+ if (old != netdev_mc_count(dev)) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
}
}
#endif
- if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
/* Multicast Mode */
rcvMode = rxConfRxEna + rxConfAMP + rxConfBcast;
} else if (dev->flags & IFF_PROMISC) {
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 5b191afc144..8c9d5e5c770 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1365,7 +1365,7 @@ static void slic_mcast_set_list(struct net_device *dev)
int i;
char *addresses;
struct dev_mc_list *mc_list = dev->mc_list;
- int mc_count = dev->mc_count;
+ int mc_count = netdev_mc_count(dev);
ASSERT(adapter);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 0db8d7b6e79..82b3a6e0b15 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3093,7 +3093,7 @@ static void device_set_multi(struct net_device *dev) {
/* Unconditionally log net taps. */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit)
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
|| (dev->flags & IFF_ALLMULTI)) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
@@ -3103,7 +3103,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ for (i = 0, mclist = dev->mc_list; mclist && i < netdev_mc_count(dev);
i++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ef17c4958c6..2c6a5350547 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1619,7 +1619,8 @@ static void device_set_multi(struct net_device *dev) {
// Unconditionally log net taps.
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit) || (dev->flags & IFF_ALLMULTI)) {
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
MAC_REG_MAR0,
@@ -1631,7 +1632,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (ii = 0, mclist = dev->mc_list; mclist && ii < dev->mc_count;
+ for (ii = 0, mclist = dev->mc_list; mclist && ii < netdev_mc_count(dev);
ii++, mclist = mclist->next) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
index d634b2da3b8..961f1417fb5 100644
--- a/drivers/staging/wavelan/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
@@ -1367,7 +1367,7 @@ static void wavelan_set_multicast_list(struct net_device * dev)
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG
"%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
/* Are we asking for promiscuous mode,
@@ -1375,7 +1375,7 @@ static void wavelan_set_multicast_list(struct net_device * dev)
* or too many multicast addresses for the hardware filter? */
if ((dev->flags & IFF_PROMISC) ||
(dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82586_MAX_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(dev) > I82586_MAX_MULTICAST_ADDRESSES)) {
/*
* Enable promiscuous mode: receive all packets.
*/
@@ -1393,11 +1393,11 @@ static void wavelan_set_multicast_list(struct net_device * dev)
* in multicast list
*/
#ifdef MULTICAST_AVOID
- if (lp->promiscuous || (dev->mc_count != lp->mc_count))
+ if (lp->promiscuous || (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82586_reconfig(dev);
}
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 10c702b5be4..08fcb226d7d 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -1373,7 +1373,7 @@ wavelan_set_multicast_list(struct net_device * dev)
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
if(dev->flags & IFF_PROMISC)
@@ -1394,7 +1394,7 @@ wavelan_set_multicast_list(struct net_device * dev)
/* If all multicast addresses
* or too much multicast addresses for the hardware filter */
if((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82593_MAX_MULTICAST_ADDRESSES))
+ (netdev_mc_count(dev) > I82593_MAX_MULTICAST_ADDRESSES))
{
/*
* Disable promiscuous mode, but active the all multicast mode
@@ -1418,12 +1418,12 @@ wavelan_set_multicast_list(struct net_device * dev)
*/
#ifdef MULTICAST_AVOID
if(lp->promiscuous || lp->allmulticast ||
- (dev->mc_count != lp->mc_count))
+ (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
lp->allmulticast = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82593_reconfig(dev);
}
@@ -3622,7 +3622,8 @@ wv_82593_config(struct net_device * dev)
if(!wv_82593_cmd(dev, "wv_82593_config(): mc-setup",
OP0_MC_SETUP, SR0_MC_SETUP_DONE))
ret = FALSE;
- lp->mc_count = dev->mc_count; /* remember to avoid repeated reset */
+ /* remember to avoid repeated reset */
+ lp->mc_count = netdev_mc_count(dev);
}
/* Job done, clear the flag */
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 0d22e3692fe..a95ebf881fc 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1070,9 +1070,9 @@ void wl_multicast( struct net_device *dev )
( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
- DBG_PRINT( " mc_count: %d\n", dev->mc_count );
+ DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- for( x = 0, mclist = dev->mc_list; mclist && x < dev->mc_count;
+ for( x = 0, mclist = dev->mc_list; mclist && x < netdev_mc_count(dev);
x++, mclist = mclist->next ) {
DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
mclist->dmi_addrlen );
@@ -1103,7 +1103,7 @@ void wl_multicast( struct net_device *dev )
DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
}
- else if(( dev->mc_count > HCF_MAX_MULTICAST ) ||
+ else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
( dev->flags & IFF_ALLMULTI )) {
/* Shutting off this filter will enable all multicast frames to
be sent up from the device; however, this is a static RID, so
@@ -1115,13 +1115,13 @@ void wl_multicast( struct net_device *dev )
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
- else if( dev->mc_count != 0 ) {
+ else if (!netdev_mc_empty(dev)) {
/* Set the multicast addresses */
- lp->ltvRecord.len = ( dev->mc_count * 3 ) + 1;
+ lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
for( x = 0, mclist = dev->mc_list;
- ( x < dev->mc_count ) && ( mclist != NULL );
+ ( x < netdev_mc_count(dev)) && ( mclist != NULL );
x++, mclist = mclist->next ) {
memcpy( &( lp->ltvRecord.u.u8[x * ETH_ALEN] ),
mclist->dmi_addr, ETH_ALEN );
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c8c25dbc585..6eb15259f5a 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -685,7 +685,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
int i, r;
/* Make sure data written is seen before log. */
- wmb();
+ smp_wmb();
for (i = 0; i < log_num; ++i) {
u64 l = min(log[i].len, len);
r = log_write(vq->log_base, log[i].addr, l);
@@ -884,7 +884,7 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
return vq->num;
/* Only get avail ring entries after they have been exposed by guest. */
- rmb();
+ smp_rmb();
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
@@ -996,14 +996,14 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
return -EFAULT;
}
/* Make sure buffer is written before we update index. */
- wmb();
+ smp_wmb();
if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Make sure data is seen before log. */
- wmb();
+ smp_wmb();
log_write(vq->log_base, vq->log_addr + sizeof *vq->used->ring *
(vq->last_used_idx % vq->num),
sizeof *vq->used->ring);
@@ -1060,7 +1060,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
}
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
- mb();
+ smp_mb();
r = get_user(avail_idx, &vq->avail->idx);
if (r) {
vq_err(vq, "Failed to check avail idx at %p: %d\n",
diff --git a/fs/seq_file.c b/fs/seq_file.c
index eae7d9dbf3f..f65b16f02da 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -674,7 +674,6 @@ struct list_head *seq_list_start(struct list_head *head, loff_t pos)
return NULL;
}
-
EXPORT_SYMBOL(seq_list_start);
struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
@@ -684,7 +683,6 @@ struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
return seq_list_start(head, pos - 1);
}
-
EXPORT_SYMBOL(seq_list_start_head);
struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
@@ -695,5 +693,60 @@ struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
++*ppos;
return lh == head ? NULL : lh;
}
-
EXPORT_SYMBOL(seq_list_next);
+
+/**
+ * seq_hlist_start - start an iteration of a hlist
+ * @head: the head of the hlist
+ * @pos: the start position of the sequence
+ *
+ * Called at seq_file->op->start().
+ */
+struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos)
+{
+ struct hlist_node *node;
+
+ hlist_for_each(node, head)
+ if (pos-- == 0)
+ return node;
+ return NULL;
+}
+EXPORT_SYMBOL(seq_hlist_start);
+
+/**
+ * seq_hlist_start_head - start an iteration of a hlist
+ * @head: the head of the hlist
+ * @pos: the start position of the sequence
+ *
+ * Called at seq_file->op->start(). Call this function if you want to
+ * print a header at the top of the output.
+ */
+struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos)
+{
+ if (!pos)
+ return SEQ_START_TOKEN;
+
+ return seq_hlist_start(head, pos - 1);
+}
+EXPORT_SYMBOL(seq_hlist_start_head);
+
+/**
+ * seq_hlist_next - move to the next position of the hlist
+ * @v: the current iterator
+ * @head: the head of the hlist
+ * @pos: the current posision
+ *
+ * Called at seq_file->op->next().
+ */
+struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
+ loff_t *ppos)
+{
+ struct hlist_node *node = v;
+
+ ++*ppos;
+ if (v == SEQ_START_TOKEN)
+ return head->first;
+ else
+ return node->next;
+}
+EXPORT_SYMBOL(seq_hlist_next);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index ef4a2d84d92..cca1c3de140 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -242,6 +242,7 @@ enum ethtool_stringset {
ETH_SS_TEST = 0,
ETH_SS_STATS,
ETH_SS_PRIV_FLAGS,
+ ETH_SS_NTUPLE_FILTERS,
};
/* for passing string sets for data tagging */
@@ -290,6 +291,7 @@ struct ethtool_perm_addr {
*/
enum ethtool_flags {
ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */
+ ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */
};
/* The following structures are for supporting RX network flow
@@ -363,6 +365,35 @@ struct ethtool_rxnfc {
__u32 rule_locs[0];
};
+struct ethtool_rx_ntuple_flow_spec {
+ __u32 flow_type;
+ union {
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
+ struct ethtool_tcpip4_spec udp_ip4_spec;
+ struct ethtool_tcpip4_spec sctp_ip4_spec;
+ struct ethtool_ah_espip4_spec ah_ip4_spec;
+ struct ethtool_ah_espip4_spec esp_ip4_spec;
+ struct ethtool_rawip4_spec raw_ip4_spec;
+ struct ethtool_ether_spec ether_spec;
+ struct ethtool_usrip4_spec usr_ip4_spec;
+ __u8 hdata[64];
+ } h_u, m_u; /* entry, mask */
+
+ __u16 vlan_tag;
+ __u16 vlan_tag_mask;
+ __u64 data; /* user-defined flow spec data */
+ __u64 data_mask; /* user-defined flow spec mask */
+
+ /* signed to distinguish between queue and actions (DROP) */
+ __s32 action;
+#define ETHTOOL_RXNTUPLE_ACTION_DROP -1
+};
+
+struct ethtool_rx_ntuple {
+ __u32 cmd;
+ struct ethtool_rx_ntuple_flow_spec fs;
+};
+
#define ETHTOOL_FLASH_MAX_FILENAME 128
enum ethtool_flash_op_type {
ETHTOOL_FLASH_ALL_REGIONS = 0,
@@ -377,6 +408,20 @@ struct ethtool_flash {
#ifdef __KERNEL__
+#include <linux/rculist.h>
+
+struct ethtool_rx_ntuple_flow_spec_container {
+ struct ethtool_rx_ntuple_flow_spec fs;
+ struct list_head list;
+};
+
+struct ethtool_rx_ntuple_list {
+#define ETHTOOL_MAX_NTUPLE_LIST_ENTRY 1024
+#define ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY 14
+ struct list_head list;
+ unsigned int count;
+};
+
struct net_device;
/* Some generic methods drivers may use in their ethtool_ops */
@@ -394,6 +439,7 @@ u32 ethtool_op_get_ufo(struct net_device *dev);
int ethtool_op_set_ufo(struct net_device *dev, u32 data);
u32 ethtool_op_get_flags(struct net_device *dev);
int ethtool_op_set_flags(struct net_device *dev, u32 data);
+void ethtool_ntuple_flush(struct net_device *dev);
/**
* &ethtool_ops - Alter and report network device settings
@@ -500,6 +546,8 @@ struct ethtool_ops {
int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
int (*flash_device)(struct net_device *, struct ethtool_flash *);
int (*reset)(struct net_device *, u32 *);
+ int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *);
+ int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
};
#endif /* __KERNEL__ */
@@ -559,6 +607,9 @@ struct ethtool_ops {
#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
#define ETHTOOL_RESET 0x00000034 /* Reset hardware */
+#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
+#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
+
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 6674791622c..c9bf92cd765 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -78,6 +78,11 @@ enum {
#define IFLA_LINKINFO IFLA_LINKINFO
IFLA_NET_NS_PID,
IFLA_IFALIAS,
+ IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */
+ IFLA_VF_MAC, /* Hardware queue specific attributes */
+ IFLA_VF_VLAN,
+ IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
+ IFLA_VFINFO,
__IFLA_MAX
};
@@ -196,4 +201,29 @@ enum macvlan_mode {
MACVLAN_MODE_BRIDGE = 4, /* talk to bridge ports directly */
};
+/* SR-IOV virtual function managment section */
+
+struct ifla_vf_mac {
+ __u32 vf;
+ __u8 mac[32]; /* MAX_ADDR_LEN */
+};
+
+struct ifla_vf_vlan {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+};
+
+struct ifla_vf_tx_rate {
+ __u32 vf;
+ __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
+};
+
+struct ifla_vf_info {
+ __u32 vf;
+ __u8 mac[32];
+ __u32 vlan;
+ __u32 qos;
+ __u32 tx_rate;
+};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e535700a3b7..682d02521bb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -28,6 +28,7 @@
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
+#include <linux/if_link.h>
#ifdef __KERNEL__
#include <linux/timer.h>
@@ -621,6 +622,13 @@ struct netdev_queue {
* this function is called when a VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
+ *
+ * SR-IOV management functions.
+ * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
+ * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
+ * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
+ * int (*ndo_get_vf_config)(struct net_device *dev,
+ * int vf, struct ifla_vf_info *ivf);
*/
#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
@@ -660,6 +668,15 @@ struct net_device_ops {
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
#endif
+ int (*ndo_set_vf_mac)(struct net_device *dev,
+ int queue, u8 *mac);
+ int (*ndo_set_vf_vlan)(struct net_device *dev,
+ int queue, u16 vlan, u8 qos);
+ int (*ndo_set_vf_tx_rate)(struct net_device *dev,
+ int vf, int rate);
+ int (*ndo_get_vf_config)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_info *ivf);
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
@@ -746,6 +763,7 @@ struct net_device {
#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
+#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
/* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16
@@ -954,6 +972,8 @@ struct net_device {
/* max exchange id for FCoE LRO by ddp */
unsigned int fcoe_ddp_xid;
#endif
+ /* n-tuple filter list attached to this device */
+ struct ethtool_rx_ntuple_list ethtool_ntuple_list;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2092,6 +2112,130 @@ static inline u32 dev_ethtool_get_flags(struct net_device *dev)
return 0;
return dev->ethtool_ops->get_flags(dev);
}
+
+/* Logging, debugging and troubleshooting/diagnostic helpers. */
+
+/* netdev_printk helpers, similar to dev_printk */
+
+static inline const char *netdev_name(const struct net_device *dev)
+{
+ if (dev->reg_state != NETREG_REGISTERED)
+ return "(unregistered net_device)";
+ return dev->name;
+}
+
+#define netdev_printk(level, netdev, format, args...) \
+ dev_printk(level, (netdev)->dev.parent, \
+ "%s: " format, \
+ netdev_name(netdev), ##args)
+
+#define netdev_emerg(dev, format, args...) \
+ netdev_printk(KERN_EMERG, dev, format, ##args)
+#define netdev_alert(dev, format, args...) \
+ netdev_printk(KERN_ALERT, dev, format, ##args)
+#define netdev_crit(dev, format, args...) \
+ netdev_printk(KERN_CRIT, dev, format, ##args)
+#define netdev_err(dev, format, args...) \
+ netdev_printk(KERN_ERR, dev, format, ##args)
+#define netdev_warn(dev, format, args...) \
+ netdev_printk(KERN_WARNING, dev, format, ##args)
+#define netdev_notice(dev, format, args...) \
+ netdev_printk(KERN_NOTICE, dev, format, ##args)
+#define netdev_info(dev, format, args...) \
+ netdev_printk(KERN_INFO, dev, format, ##args)
+
+#if defined(DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netdev_dbg(__dev, format, args...) \
+do { \
+ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
+ netdev_name(__dev), ##args); \
+} while (0)
+#else
+#define netdev_dbg(__dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, __dev, format, ##args); \
+ 0; \
+})
+#endif
+
+#if defined(VERBOSE_DEBUG)
+#define netdev_vdbg netdev_dbg
+#else
+
+#define netdev_vdbg(dev, format, args...) \
+({ \
+ if (0) \
+ netdev_printk(KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+/*
+ * netdev_WARN() acts like dev_printk(), but with the key difference
+ * of using a WARN/WARN_ON to get the message out, including the
+ * file/line information and a backtrace.
+ */
+#define netdev_WARN(dev, format, args...) \
+ WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
+
+/* netif printk helpers, similar to netdev_printk */
+
+#define netif_printk(priv, type, level, dev, fmt, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ netdev_printk(level, (dev), fmt, ##args); \
+} while (0)
+
+#define netif_emerg(priv, type, dev, fmt, args...) \
+ netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args)
+#define netif_alert(priv, type, dev, fmt, args...) \
+ netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args)
+#define netif_crit(priv, type, dev, fmt, args...) \
+ netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args)
+#define netif_err(priv, type, dev, fmt, args...) \
+ netif_printk(priv, type, KERN_ERR, dev, fmt, ##args)
+#define netif_warn(priv, type, dev, fmt, args...) \
+ netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args)
+#define netif_notice(priv, type, dev, fmt, args...) \
+ netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args)
+#define netif_info(priv, type, dev, fmt, args...) \
+ netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args)
+
+#if defined(DEBUG)
+#define netif_dbg(priv, type, dev, format, args...) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
+#elif defined(CONFIG_DYNAMIC_DEBUG)
+#define netif_dbg(priv, type, netdev, format, args...) \
+do { \
+ if (netif_msg_##type(priv)) \
+ dynamic_dev_dbg((netdev)->dev.parent, \
+ "%s: " format, \
+ netdev_name(netdev), ##args); \
+} while (0)
+#else
+#define netif_dbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
+#if defined(VERBOSE_DEBUG)
+#define netif_vdbg netdev_dbg
+#else
+#define netif_vdbg(priv, type, dev, format, args...) \
+({ \
+ if (0) \
+ netif_printk(KERN_DEBUG, dev, format, ##args); \
+ 0; \
+})
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 174e5392e51..59a98e2ee2c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -612,6 +612,9 @@ extern void pci_remove_bus_device(struct pci_dev *dev);
extern void pci_stop_bus_device(struct pci_dev *dev);
void pci_setup_cardbus(struct pci_bus *bus);
extern void pci_sort_breadthfirst(void);
+#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
+#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
+#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
/* Generic PCI functions exported to card drivers */
@@ -1129,6 +1132,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
unsigned int devfn)
{ return NULL; }
+#define dev_is_pci(d) (false)
+#define dev_is_pf(d) (false)
+#define dev_num_vf(d) (0)
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
@@ -1286,6 +1292,7 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
extern void pci_disable_sriov(struct pci_dev *dev);
extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
+extern int pci_num_vf(struct pci_dev *dev);
#else
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{
@@ -1298,6 +1305,10 @@ static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev)
{
return IRQ_NONE;
}
+static inline int pci_num_vf(struct pci_dev *dev)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 8366d8f12e5..c95bcdc18f4 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -135,4 +135,15 @@ extern struct list_head *seq_list_start_head(struct list_head *head,
extern struct list_head *seq_list_next(void *v, struct list_head *head,
loff_t *ppos);
+/*
+ * Helpers for iteration over hlist_head-s in seq_files
+ */
+
+extern struct hlist_node *seq_hlist_start(struct hlist_head *head,
+ loff_t pos);
+extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head,
+ loff_t pos);
+extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
+ loff_t *ppos);
+
#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ae836fded53..ba0f8e3a9cd 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -738,7 +738,7 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
}
/**
- * skb_peek
+ * skb_peek - peek at the head of an &sk_buff_head
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
@@ -759,7 +759,7 @@ static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
}
/**
- * skb_peek_tail
+ * skb_peek_tail - peek at the tail of an &sk_buff_head
* @list_: list to peek at
*
* Peek an &sk_buff. Unlike most other operations you _MUST_
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a3f0a7ed31a..5b3569b2a74 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1561,37 +1561,82 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
* Documentation in Documentation/networking/radiotap-headers.txt
*/
+struct radiotap_align_size {
+ uint8_t align:4, size:4;
+};
+
+struct ieee80211_radiotap_namespace {
+ const struct radiotap_align_size *align_size;
+ int n_bits;
+ uint32_t oui;
+ uint8_t subns;
+};
+
+struct ieee80211_radiotap_vendor_namespaces {
+ const struct ieee80211_radiotap_namespace *ns;
+ int n_ns;
+};
+
/**
* struct ieee80211_radiotap_iterator - tracks walk thru present radiotap args
- * @rtheader: pointer to the radiotap header we are walking through
- * @max_length: length of radiotap header in cpu byte ordering
- * @this_arg_index: IEEE80211_RADIOTAP_... index of current arg
- * @this_arg: pointer to current radiotap arg
- * @arg_index: internal next argument index
- * @arg: internal next argument pointer
- * @next_bitmap: internal pointer to next present u32
- * @bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present
+ * @this_arg_index: index of current arg, valid after each successful call
+ * to ieee80211_radiotap_iterator_next()
+ * @this_arg: pointer to current radiotap arg; it is valid after each
+ * call to ieee80211_radiotap_iterator_next() but also after
+ * ieee80211_radiotap_iterator_init() where it will point to
+ * the beginning of the actual data portion
+ * @this_arg_size: length of the current arg, for convenience
+ * @current_namespace: pointer to the current namespace definition
+ * (or internally %NULL if the current namespace is unknown)
+ * @is_radiotap_ns: indicates whether the current namespace is the default
+ * radiotap namespace or not
+ *
+ * @overrides: override standard radiotap fields
+ * @n_overrides: number of overrides
+ *
+ * @_rtheader: pointer to the radiotap header we are walking through
+ * @_max_length: length of radiotap header in cpu byte ordering
+ * @_arg_index: next argument index
+ * @_arg: next argument pointer
+ * @_next_bitmap: internal pointer to next present u32
+ * @_bitmap_shifter: internal shifter for curr u32 bitmap, b0 set == arg present
+ * @_vns: vendor namespace definitions
+ * @_next_ns_data: beginning of the next namespace's data
+ * @_reset_on_ext: internal; reset the arg index to 0 when going to the
+ * next bitmap word
+ *
+ * Describes the radiotap parser state. Fields prefixed with an underscore
+ * must not be used by users of the parser, only by the parser internally.
*/
struct ieee80211_radiotap_iterator {
- struct ieee80211_radiotap_header *rtheader;
- int max_length;
+ struct ieee80211_radiotap_header *_rtheader;
+ const struct ieee80211_radiotap_vendor_namespaces *_vns;
+ const struct ieee80211_radiotap_namespace *current_namespace;
+
+ unsigned char *_arg, *_next_ns_data;
+ uint32_t *_next_bitmap;
+
+ unsigned char *this_arg;
int this_arg_index;
- u8 *this_arg;
+ int this_arg_size;
- int arg_index;
- u8 *arg;
- __le32 *next_bitmap;
- u32 bitmap_shifter;
+ int is_radiotap_ns;
+
+ int _max_length;
+ int _arg_index;
+ uint32_t _bitmap_shifter;
+ int _reset_on_ext;
};
extern int ieee80211_radiotap_iterator_init(
- struct ieee80211_radiotap_iterator *iterator,
- struct ieee80211_radiotap_header *radiotap_header,
- int max_length);
+ struct ieee80211_radiotap_iterator *iterator,
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns);
extern int ieee80211_radiotap_iterator_next(
- struct ieee80211_radiotap_iterator *iterator);
+ struct ieee80211_radiotap_iterator *iterator);
+
extern const unsigned char rfc1042_header[6];
extern const unsigned char bridge_tunnel_header[6];
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 9d3d86aaccb..af49f8ab7f8 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -198,6 +198,10 @@ enum ieee80211_radiotap_type {
IEEE80211_RADIOTAP_TX_FLAGS = 15,
IEEE80211_RADIOTAP_RTS_RETRIES = 16,
IEEE80211_RADIOTAP_DATA_RETRIES = 17,
+
+ /* valid in every it_present bitmap, even vendor namespaces */
+ IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE = 30,
IEEE80211_RADIOTAP_EXT = 31
};
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 257808188ad..8f279ddb359 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -129,6 +129,8 @@ struct fib6_walker_t {
struct rt6_info *leaf;
unsigned char state;
unsigned char prune;
+ unsigned int skip;
+ unsigned int count;
int (*func)(struct fib6_walker_t *);
void *args;
};
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 74ccf30fdf8..314e9817316 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -117,19 +117,6 @@ struct ieee80211_tx_queue_params {
bool uapsd;
};
-/**
- * struct ieee80211_tx_queue_stats - transmit queue statistics
- *
- * @len: number of packets in queue
- * @limit: queue length limit
- * @count: number of frames sent
- */
-struct ieee80211_tx_queue_stats {
- unsigned int len;
- unsigned int limit;
- unsigned int count;
-};
-
struct ieee80211_low_level_stats {
unsigned int dot11ACKFailureCount;
unsigned int dot11RTSFailureCount;
@@ -814,7 +801,7 @@ enum set_key_cmd {
* mac80211, any ieee80211_sta pointer you get access to must
* either be protected by rcu_read_lock() explicitly or implicitly,
* or you must take good care to not use such a pointer after a
- * call to your sta_notify callback that removed it.
+ * call to your sta_remove callback that removed it.
*
* @addr: MAC address
* @aid: AID we assigned to the station if we're an AP
@@ -840,8 +827,8 @@ struct ieee80211_sta {
* indicates addition and removal of a station to station table,
* or if a associated station made a power state transition.
*
- * @STA_NOTIFY_ADD: a station was added to the station table
- * @STA_NOTIFY_REMOVE: a station being removed from the station table
+ * @STA_NOTIFY_ADD: (DEPRECATED) a station was added to the station table
+ * @STA_NOTIFY_REMOVE: (DEPRECATED) a station being removed from the station table
* @STA_NOTIFY_SLEEP: a station is now sleeping
* @STA_NOTIFY_AWAKE: a sleeping station woke up
*/
@@ -958,6 +945,11 @@ enum ieee80211_tkip_key_type {
* Hardware supports Unscheduled Automatic Power Save Delivery
* (U-APSD) in managed mode. The mode is configured with
* conf_tx() operation.
+ *
+ * @IEEE80211_HW_REPORTS_TX_ACK_STATUS:
+ * Hardware can provide ack status reports of Tx frames to
+ * the stack.
+ *
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -978,6 +970,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15,
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
};
/**
@@ -1534,22 +1527,20 @@ enum ieee80211_ampdu_mlme_action {
* @set_rts_threshold: Configuration of RTS threshold (if device needs it)
* The callback can sleep.
*
- * @sta_notify: Notifies low level driver about addition, removal or power
- * state transition of an associated station, AP, IBSS/WDS/mesh peer etc.
- * Must be atomic.
+ * @sta_add: Notifies low level driver about addition of an associated station,
+ * AP, IBSS/WDS/mesh peer etc. This callback can sleep.
+ *
+ * @sta_remove: Notifies low level driver about removal of an associated
+ * station, AP, IBSS/WDS/mesh peer etc. This callback can sleep.
+ *
+ * @sta_notify: Notifies low level driver about power state transition of an
+ * associated station, AP, IBSS/WDS/mesh peer etc. Must be atomic.
*
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
* Returns a negative error code on failure.
* The callback can sleep.
*
- * @get_tx_stats: Get statistics of the current TX queue status. This is used
- * to get number of currently queued packets (queue length), maximum queue
- * size (limit), and total number of packets sent using each TX queue
- * (count). The 'stats' pointer points to an array that has hw->queues
- * items.
- * The callback must be atomic.
- *
* @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
* this is only used for IBSS mode BSSID merging and debugging. Is not a
* required function.
@@ -1635,12 +1626,14 @@ struct ieee80211_ops {
void (*get_tkip_seq)(struct ieee80211_hw *hw, u8 hw_key_idx,
u32 *iv32, u16 *iv16);
int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
+ int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+ int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
- int (*get_tx_stats)(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
u64 (*get_tsf)(struct ieee80211_hw *hw);
void (*set_tsf)(struct ieee80211_hw *hw, u64 tsf);
void (*reset_tsf)(struct ieee80211_hw *hw);
diff --git a/include/net/sock.h b/include/net/sock.h
index 3f1a4804bb3..580d51fa28e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -317,6 +317,11 @@ struct sock {
/*
* Hashed lists helper routines
*/
+static inline struct sock *sk_entry(const struct hlist_node *node)
+{
+ return hlist_entry(node, struct sock, sk_node);
+}
+
static inline struct sock *__sk_head(const struct hlist_head *head)
{
return hlist_entry(head->first, struct sock, sk_node);
@@ -1044,7 +1049,7 @@ extern void sk_common_release(struct sock *sk);
extern void sock_init_data(struct socket *sock, struct sock *sk);
/**
- * sk_filter_release: Release a socket filter
+ * sk_filter_release - release a socket filter
* @fp: filter to remove
*
* Remove a filter from a socket and release its resources.
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fcee547ca7e..0beb413c01c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -317,8 +317,8 @@ extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
struct xfrm_type {
char *description;
struct module *owner;
- __u8 proto;
- __u8 flags;
+ u8 proto;
+ u8 flags;
#define XFRM_TYPE_NON_FRAGMENT 1
#define XFRM_TYPE_REPLAY_PROT 2
#define XFRM_TYPE_LOCAL_COADDR 4
@@ -434,24 +434,24 @@ struct xfrm_tmpl {
unsigned short encap_family;
- __u32 reqid;
+ u32 reqid;
/* Mode: transport, tunnel etc. */
- __u8 mode;
+ u8 mode;
/* Sharing mode: unique, this session only, this user only etc. */
- __u8 share;
+ u8 share;
/* May skip this transfomration if no SA is found */
- __u8 optional;
+ u8 optional;
/* Skip aalgos/ealgos/calgos checks. */
- __u8 allalgs;
+ u8 allalgs;
/* Bit mask of algos allowed for acquisition */
- __u32 aalgos;
- __u32 ealgos;
- __u32 calgos;
+ u32 aalgos;
+ u32 ealgos;
+ u32 calgos;
};
#define XFRM_MAX_DEPTH 6
@@ -770,7 +770,7 @@ static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
int pdw;
int pbi;
- pdw = prefixlen >> 5; /* num of whole __u32 in prefix */
+ pdw = prefixlen >> 5; /* num of whole u32 in prefix */
pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
if (pdw)
@@ -1259,7 +1259,7 @@ struct xfrm_algo_desc {
/* XFRM tunnel handlers. */
struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
- int (*err_handler)(struct sk_buff *skb, __u32 info);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
struct xfrm_tunnel *next;
int priority;
@@ -1500,7 +1500,7 @@ static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
switch (family) {
default:
case AF_INET:
- return (__force __u32)a->a4 - (__force __u32)b->a4;
+ return (__force u32)a->a4 - (__force u32)b->a4;
case AF_INET6:
return ipv6_addr_cmp((struct in6_addr *)a,
(struct in6_addr *)b);
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index 80caad1a31a..6ef0e761e5d 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -144,40 +144,16 @@ out:
return 0;
}
-static __inline__ struct sock *atalk_get_socket_idx(loff_t pos)
-{
- struct sock *s;
- struct hlist_node *node;
-
- sk_for_each(s, node, &atalk_sockets)
- if (!pos--)
- goto found;
- s = NULL;
-found:
- return s;
-}
-
static void *atalk_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(atalk_sockets_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&atalk_sockets_lock);
- return l ? atalk_get_socket_idx(--l) : SEQ_START_TOKEN;
+ return seq_hlist_start_head(&atalk_sockets, *pos);
}
static void *atalk_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct sock *i;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- i = sk_head(&atalk_sockets);
- goto out;
- }
- i = sk_next(v);
-out:
- return i;
+ return seq_hlist_next(v, &atalk_sockets, pos);
}
static void atalk_seq_socket_stop(struct seq_file *seq, void *v)
@@ -197,7 +173,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
goto out;
}
- s = v;
+ s = sk_entry(v);
at = at_sk(s);
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 476779d845e..7a96b2376bd 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -238,7 +238,7 @@ static int atm_dev_seq_show(struct seq_file *seq, void *v)
"Itf Type ESI/\"MAC\"addr "
"AAL(TX,err,RX,err,drop) ... [refcnt]\n";
- if (v == SEQ_START_TOKEN)
+ if (v == &atm_devs)
seq_puts(seq, atm_dev_banner);
else {
struct atm_dev *dev = list_entry(v, struct atm_dev, dev_list);
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 447ed89205d..90082904f20 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -444,21 +444,10 @@ done:
return error;
}
-static inline void *dev_get_idx(loff_t left)
-{
- struct list_head *p;
-
- list_for_each(p, &atm_devs) {
- if (!--left)
- break;
- }
- return (p != &atm_devs) ? p : NULL;
-}
-
void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
mutex_lock(&atm_dev_mutex);
- return *pos ? dev_get_idx(*pos) : SEQ_START_TOKEN;
+ return seq_list_start_head(&atm_devs, *pos);
}
void atm_dev_seq_stop(struct seq_file *seq, void *v)
@@ -468,8 +457,5 @@ void atm_dev_seq_stop(struct seq_file *seq, void *v)
void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
- v = (v == SEQ_START_TOKEN)
- ? atm_devs.next : ((struct list_head *)v)->next;
- return (v == &atm_devs) ? NULL : v;
+ return seq_list_next(v, &atm_devs, pos);
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 5588ba69c46..a5beedf43e2 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1863,25 +1863,13 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static void *ax25_info_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_list_lock)
{
- struct ax25_cb *ax25;
- struct hlist_node *node;
- int i = 0;
-
spin_lock_bh(&ax25_list_lock);
- ax25_for_each(ax25, node, &ax25_list) {
- if (i == *pos)
- return ax25;
- ++i;
- }
- return NULL;
+ return seq_hlist_start(&ax25_list, *pos);
}
static void *ax25_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
-
- return hlist_entry( ((struct ax25_cb *)v)->ax25_node.next,
- struct ax25_cb, ax25_node);
+ return seq_hlist_next(v, &ax25_list, pos);
}
static void ax25_info_stop(struct seq_file *seq, void *v)
@@ -1892,7 +1880,7 @@ static void ax25_info_stop(struct seq_file *seq, void *v)
static int ax25_info_show(struct seq_file *seq, void *v)
{
- ax25_cb *ax25 = v;
+ ax25_cb *ax25 = hlist_entry(v, struct ax25_cb, ax25_node);
char buf[11];
int k;
diff --git a/net/ax25/ax25_uid.c b/net/ax25/ax25_uid.c
index 832bcf092a0..9f13f6eefcb 100644
--- a/net/ax25/ax25_uid.c
+++ b/net/ax25/ax25_uid.c
@@ -146,31 +146,13 @@ int ax25_uid_ioctl(int cmd, struct sockaddr_ax25 *sax)
static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(ax25_uid_lock)
{
- struct ax25_uid_assoc *pt;
- struct hlist_node *node;
- int i = 1;
-
read_lock(&ax25_uid_lock);
-
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- ax25_uid_for_each(pt, node, &ax25_uid_list) {
- if (i == *pos)
- return pt;
- ++i;
- }
- return NULL;
+ return seq_hlist_start_head(&ax25_uid_list, *pos);
}
static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
- if (v == SEQ_START_TOKEN)
- return ax25_uid_list.first;
- else
- return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
- ax25_uid_assoc, uid_node);
+ return seq_hlist_next(v, &ax25_uid_list, pos);
}
static void ax25_uid_seq_stop(struct seq_file *seq, void *v)
@@ -186,8 +168,9 @@ static int ax25_uid_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_printf(seq, "Policy: %d\n", ax25_uid_policy);
else {
- struct ax25_uid_assoc *pt = v;
+ struct ax25_uid_assoc *pt;
+ pt = hlist_entry(v, struct ax25_uid_assoc, uid_node);
seq_printf(seq, "%6d %s\n", pt->uid, ax2asc(buf, &pt->call));
}
return 0;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 26fb831ef7e..b6234b73c4c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -64,7 +64,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
struct sk_buff *skb;
int size;
- BT_DBG("%s mc_count %d", dev->name, dev->mc_count);
+ BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev));
size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2;
skb = alloc_skb(size, GFP_ATOMIC);
@@ -97,7 +97,9 @@ static void bnep_net_set_mc_list(struct net_device *dev)
/* FIXME: We should group addresses here. */
- for (i = 0; i < dev->mc_count && i < BNEP_MAX_MULTICAST_FILTERS; i++) {
+ for (i = 0;
+ i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS;
+ i++) {
memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN);
dmi = dmi->next;
diff --git a/net/core/dev.c b/net/core/dev.c
index 94c1eeed25e..d1cf53d0d59 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4263,7 +4263,7 @@ static void dev_addr_discard(struct net_device *dev)
netif_addr_lock_bh(dev);
__dev_addr_discard(&dev->mc_list);
- dev->mc_count = 0;
+ netdev_mc_count(dev) = 0;
netif_addr_unlock_bh(dev);
}
@@ -5419,6 +5419,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
netdev_init_queues(dev);
+ INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
+ dev->ethtool_ntuple_list.count = 0;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->link_watch_list);
@@ -5455,6 +5457,9 @@ void free_netdev(struct net_device *dev)
/* Flush device addresses */
dev_addr_flush(dev);
+ /* Clear ethtool n-tuple list */
+ ethtool_ntuple_flush(dev);
+
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
netif_napi_del(p);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d8aee584e8d..82cae3bca78 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -120,7 +120,7 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
* NETIF_F_xxx values in include/linux/netdevice.h
*/
static const u32 flags_dup_features =
- ETH_FLAG_LRO;
+ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE);
u32 ethtool_op_get_flags(struct net_device *dev)
{
@@ -134,19 +134,42 @@ u32 ethtool_op_get_flags(struct net_device *dev)
int ethtool_op_set_flags(struct net_device *dev, u32 data)
{
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
if (data & ETH_FLAG_LRO)
dev->features |= NETIF_F_LRO;
else
dev->features &= ~NETIF_F_LRO;
+ if (data & ETH_FLAG_NTUPLE) {
+ if (!ops->set_rx_ntuple)
+ return -EOPNOTSUPP;
+ dev->features |= NETIF_F_NTUPLE;
+ } else {
+ /* safe to clear regardless */
+ dev->features &= ~NETIF_F_NTUPLE;
+ }
+
return 0;
}
+void ethtool_ntuple_flush(struct net_device *dev)
+{
+ struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
+
+ list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
+ list_del(&fsc->list);
+ kfree(fsc);
+ }
+ dev->ethtool_ntuple_list.count = 0;
+}
+EXPORT_SYMBOL(ethtool_ntuple_flush);
+
/* Handlers for each ethtool command */
static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
int err;
if (!dev->ethtool_ops->get_settings)
@@ -174,7 +197,10 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_settings(dev, &cmd);
}
-static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
{
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -209,7 +235,10 @@ static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
return 0;
}
-static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc cmd;
@@ -222,7 +251,10 @@ static int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr)
return dev->ethtool_ops->set_rxnfc(dev, &cmd);
}
-static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
{
struct ethtool_rxnfc info;
const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -266,6 +298,315 @@ err_out:
return ret;
}
+static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
+ struct ethtool_rx_ntuple_flow_spec *spec,
+ struct ethtool_rx_ntuple_flow_spec_container *fsc)
+{
+
+ /* don't add filters forever */
+ if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
+ /* free the container */
+ kfree(fsc);
+ return;
+ }
+
+ /* Copy the whole filter over */
+ fsc->fs.flow_type = spec->flow_type;
+ memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
+ memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
+
+ fsc->fs.vlan_tag = spec->vlan_tag;
+ fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
+ fsc->fs.data = spec->data;
+ fsc->fs.data_mask = spec->data_mask;
+ fsc->fs.action = spec->action;
+
+ /* add to the list */
+ list_add_tail_rcu(&fsc->list, &list->list);
+ list->count++;
+}
+
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rx_ntuple cmd;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
+ int ret;
+
+ if (!(dev->features & NETIF_F_NTUPLE))
+ return -EINVAL;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ /*
+ * Cache filter in dev struct for GET operation only if
+ * the underlying driver doesn't have its own GET operation, and
+ * only if the filter was added successfully. First make sure we
+ * can allocate the filter, then continue if successful.
+ */
+ if (!ops->get_rx_ntuple) {
+ fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
+ if (!fsc)
+ return -ENOMEM;
+ }
+
+ ret = ops->set_rx_ntuple(dev, &cmd);
+ if (ret) {
+ kfree(fsc);
+ return ret;
+ }
+
+ if (!ops->get_rx_ntuple)
+ __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
+
+ return ret;
+}
+
+static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ struct ethtool_rx_ntuple_flow_spec_container *fsc;
+ u8 *data;
+ char *p;
+ int ret, i, num_strings = 0;
+
+ if (!ops->get_sset_count)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ ret = ops->get_sset_count(dev, gstrings.string_set);
+ if (ret < 0)
+ return ret;
+
+ gstrings.len = ret;
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ if (ops->get_rx_ntuple) {
+ /* driver-specific filter grab */
+ ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
+ goto copy;
+ }
+
+ /* default ethtool filter grab */
+ i = 0;
+ p = (char *)data;
+ list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
+ sprintf(p, "Filter %d:\n", i);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+
+ switch (fsc->fs.flow_type) {
+ case TCP_V4_FLOW:
+ sprintf(p, "\tFlow Type: TCP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case UDP_V4_FLOW:
+ sprintf(p, "\tFlow Type: UDP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case SCTP_V4_FLOW:
+ sprintf(p, "\tFlow Type: SCTP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case AH_ESP_V4_FLOW:
+ sprintf(p, "\tFlow Type: AH ESP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case ESP_V4_FLOW:
+ sprintf(p, "\tFlow Type: ESP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IP_USER_FLOW:
+ sprintf(p, "\tFlow Type: Raw IP\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IPV4_FLOW:
+ sprintf(p, "\tFlow Type: IPv4\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ default:
+ sprintf(p, "\tFlow Type: Unknown\n");
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ goto unknown_filter;
+ };
+
+ /* now the rest of the filters */
+ switch (fsc->fs.flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.tcp_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.tcp_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.psrc,
+ fsc->fs.m_u.tcp_ip4_spec.psrc);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.pdst,
+ fsc->fs.m_u.tcp_ip4_spec.pdst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.tcp_ip4_spec.tos,
+ fsc->fs.m_u.tcp_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case AH_ESP_V4_FLOW:
+ case ESP_V4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.ah_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.ah_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSPI: %d, mask: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.spi,
+ fsc->fs.m_u.ah_ip4_spec.spi);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.ah_ip4_spec.tos,
+ fsc->fs.m_u.ah_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IP_USER_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.raw_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.raw_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.raw_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.raw_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ case IPV4_FLOW:
+ sprintf(p, "\tSrc IP addr: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tSrc IP mask: 0x%x\n",
+ fsc->fs.m_u.usr_ip4_spec.ip4src);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP addr: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tDest IP mask: 0x%x\n",
+ fsc->fs.m_u.usr_ip4_spec.ip4dst);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
+ fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tTOS: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.tos,
+ fsc->fs.m_u.usr_ip4_spec.tos);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.ip_ver,
+ fsc->fs.m_u.usr_ip4_spec.ip_ver);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
+ fsc->fs.h_u.usr_ip4_spec.proto,
+ fsc->fs.m_u.usr_ip4_spec.proto);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ break;
+ };
+ sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
+ fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+ if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ sprintf(p, "\tAction: Drop\n");
+ else
+ sprintf(p, "\tAction: Direct to queue %d\n",
+ fsc->fs.action);
+ p += ETH_GSTRING_LEN;
+ num_strings++;
+unknown_filter:
+ i++;
+ }
+copy:
+ /* indicate to userspace how many strings we actually have */
+ gstrings.len = num_strings;
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
{
struct ethtool_regs regs;
@@ -313,6 +654,9 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
if (copy_from_user(&reset, useraddr, sizeof(reset)))
return -EFAULT;
+ /* Clear ethtool n-tuple list */
+ ethtool_ntuple_flush(dev);
+
ret = dev->ethtool_ops->reset(dev, &reset.data);
if (ret)
return ret;
@@ -324,7 +668,7 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
{
- struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
if (!dev->ethtool_ops->get_wol)
return -EOPNOTSUPP;
@@ -456,9 +800,12 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
return ret;
}
-static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+ struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
if (!dev->ethtool_ops->get_coalesce)
return -EOPNOTSUPP;
@@ -470,7 +817,10 @@ static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
return 0;
}
-static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
{
struct ethtool_coalesce coalesce;
@@ -485,7 +835,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
{
- struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+ struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
if (!dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
@@ -839,7 +1189,7 @@ static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
u32 cmd, u32 (*actor)(struct net_device *))
{
- struct ethtool_value edata = { cmd };
+ struct ethtool_value edata = { .cmd = cmd };
if (!actor)
return -EOPNOTSUPP;
@@ -880,7 +1230,10 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
return actor(dev, edata.data);
}
-static int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+/*
+ * noinline attribute so that gcc doesnt use too much stack in dev_ethtool()
+ */
+static noinline int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
{
struct ethtool_flash efl;
@@ -1112,6 +1465,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_RESET:
rc = ethtool_reset(dev, useraddr);
break;
+ case ETHTOOL_SRXNTUPLE:
+ rc = ethtool_set_rx_ntuple(dev, useraddr);
+ break;
+ case ETHTOOL_GRXNTUPLE:
+ rc = ethtool_get_rx_ntuple(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 62f3878a601..42da96a4eee 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,6 +35,7 @@
#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/if_addr.h>
+#include <linux/pci.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -580,6 +581,15 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
a->tx_compressed = b->tx_compressed;
};
+static inline int rtnl_vfinfo_size(const struct net_device *dev)
+{
+ if (dev->dev.parent && dev_is_pci(dev->dev.parent))
+ return dev_num_vf(dev->dev.parent) *
+ sizeof(struct ifla_vf_info);
+ else
+ return 0;
+}
+
static inline size_t if_nlmsg_size(const struct net_device *dev)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -597,6 +607,8 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ + nla_total_size(4) /* IFLA_NUM_VF */
+ + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */
+ rtnl_link_get_size(dev); /* IFLA_LINKINFO */
}
@@ -665,6 +677,17 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
stats = dev_get_stats(dev);
copy_rtnl_link_stats(nla_data(attr), stats);
+ if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
+ int i;
+ struct ifla_vf_info ivi;
+
+ NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent));
+ for (i = 0; i < dev_num_vf(dev->dev.parent); i++) {
+ if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
+ break;
+ NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi);
+ }
+ }
if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0)
goto nla_put_failure;
@@ -725,6 +748,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
+ [IFLA_VF_MAC] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_mac) },
+ [IFLA_VF_VLAN] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_vlan) },
+ [IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct ifla_vf_tx_rate) },
};
EXPORT_SYMBOL(ifla_policy);
@@ -898,6 +927,44 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
write_unlock_bh(&dev_base_lock);
}
+ if (tb[IFLA_VF_MAC]) {
+ struct ifla_vf_mac *ivm;
+ ivm = nla_data(tb[IFLA_VF_MAC]);
+ write_lock_bh(&dev_base_lock);
+ if (ops->ndo_set_vf_mac)
+ err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac);
+ write_unlock_bh(&dev_base_lock);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_VF_VLAN]) {
+ struct ifla_vf_vlan *ivv;
+ ivv = nla_data(tb[IFLA_VF_VLAN]);
+ write_lock_bh(&dev_base_lock);
+ if (ops->ndo_set_vf_vlan)
+ err = ops->ndo_set_vf_vlan(dev, ivv->vf,
+ (u16)ivv->vlan,
+ (u8)ivv->qos);
+ write_unlock_bh(&dev_base_lock);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+ err = 0;
+
+ if (tb[IFLA_VF_TX_RATE]) {
+ struct ifla_vf_tx_rate *ivt;
+ ivt = nla_data(tb[IFLA_VF_TX_RATE]);
+ write_lock_bh(&dev_base_lock);
+ if (ops->ndo_set_vf_tx_rate)
+ err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate);
+ write_unlock_bh(&dev_base_lock);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
err = 0;
errout:
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index ff16e9df196..49d27c556be 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -63,14 +63,13 @@ int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
u8 *ccid_array, array_len;
int err = 0;
- if (len < ARRAY_SIZE(ccids))
- return -EINVAL;
-
if (ccid_get_builtin_ccids(&ccid_array, &array_len))
return -ENOBUFS;
- if (put_user(array_len, optlen) ||
- copy_to_user(optval, ccid_array, array_len))
+ if (put_user(array_len, optlen))
+ err = -EFAULT;
+ else if (len > 0 && copy_to_user(optval, ccid_array,
+ len > array_len ? array_len : len))
err = -EFAULT;
kfree(ccid_array);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 671cd1413d5..85ec1cb7fd4 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -835,6 +835,8 @@ verify_sock_status:
len = -EFAULT;
break;
}
+ if (flags & MSG_TRUNC)
+ len = skb->len;
found_fin_ok:
if (!(flags & MSG_PEEK))
sk_eat_skb(sk, skb, 0);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d2836399874..63bf298ca10 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb)
break;
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
- case IGMPV3_HOST_MEMBERSHIP_REPORT:
/* Is it our report looped back? */
if (skb_rtable(skb)->fl.iif == 0)
break;
@@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb)
in_dev_put(in_dev);
return pim_rcv_v1(skb);
#endif
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
case IGMP_DVMRP:
case IGMP_TRACE:
case IGMP_HOST_LEAVE_MESSAGE:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28e02963249..3fddc69cccc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5783,11 +5783,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* tcp_ack considers this ACK as duplicate
* and does not calculate rtt.
- * Fix it at least with timestamps.
+ * Force it here.
*/
- if (tp->rx_opt.saw_tstamp &&
- tp->rx_opt.rcv_tsecr && !tp->srtt)
- tcp_ack_saw_tstamp(sk, 0);
+ tcp_ack_update_rtt(sk, 0, 0);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4f7d2122d81..608a5446d05 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1117,7 +1117,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
- unsigned int ulen, copied;
+ unsigned int ulen;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -1138,10 +1138,9 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- copied = len;
- if (copied > ulen)
- copied = ulen;
- else if (copied < ulen)
+ if (len > ulen)
+ len = ulen;
+ else if (len < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
@@ -1150,14 +1149,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied);
+ msg->msg_iov, len);
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
@@ -1186,7 +1185,7 @@ try_again:
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- err = copied;
+ err = len;
if (flags & MSG_TRUNC)
err = ulen;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 1593289155f..764ad37ca07 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2646,7 +2646,8 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&addrconf_hash_lock);
while ((ifa = *bifa) != NULL) {
- if (ifa->idev == idev) {
+ if (ifa->idev == idev &&
+ (how || !(ifa->flags&IFA_F_PERMANENT))) {
*bifa = ifa->lst_next;
ifa->lst_next = NULL;
addrconf_del_timer(ifa);
@@ -2686,18 +2687,30 @@ static int addrconf_ifdown(struct net_device *dev, int how)
write_lock_bh(&idev->lock);
}
#endif
- while ((ifa = idev->addr_list) != NULL) {
- idev->addr_list = ifa->if_next;
- ifa->if_next = NULL;
- ifa->dead = 1;
- addrconf_del_timer(ifa);
- write_unlock_bh(&idev->lock);
+ bifa = &idev->addr_list;
+ while ((ifa = *bifa) != NULL) {
+ if (how == 0 && (ifa->flags&IFA_F_PERMANENT)) {
+ /* Retain permanent address on admin down */
+ bifa = &ifa->if_next;
+
+ /* Restart DAD if needed when link comes back up */
+ if ( !((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
+ idev->cnf.accept_dad <= 0 ||
+ (ifa->flags & IFA_F_NODAD)))
+ ifa->flags |= IFA_F_TENTATIVE;
+ } else {
+ *bifa = ifa->if_next;
+ ifa->if_next = NULL;
- __ipv6_ifa_notify(RTM_DELADDR, ifa);
- atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
- in6_ifa_put(ifa);
+ ifa->dead = 1;
+ write_unlock_bh(&idev->lock);
- write_lock_bh(&idev->lock);
+ __ipv6_ifa_notify(RTM_DELADDR, ifa);
+ atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
+ in6_ifa_put(ifa);
+
+ write_lock_bh(&idev->lock);
+ }
}
write_unlock_bh(&idev->lock);
@@ -2789,14 +2802,14 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
read_lock_bh(&idev->lock);
if (ifp->dead)
goto out;
- spin_lock_bh(&ifp->lock);
+ spin_lock(&ifp->lock);
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
addrconf_dad_completed(ifp);
@@ -2804,7 +2817,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
}
if (!(idev->if_flags & IF_READY)) {
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
/*
* If the device is not ready:
@@ -2824,7 +2837,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
ip6_ins_rt(ifp->rt);
addrconf_dad_kick(ifp);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
out:
read_unlock_bh(&idev->lock);
}
@@ -2840,14 +2853,15 @@ static void addrconf_dad_timer(unsigned long data)
read_unlock_bh(&idev->lock);
goto out;
}
- spin_lock_bh(&ifp->lock);
+
+ spin_lock(&ifp->lock);
if (ifp->probes == 0) {
/*
* DAD was successful
*/
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
addrconf_dad_completed(ifp);
@@ -2857,7 +2871,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->probes--;
addrconf_mod_timer(ifp, AC_DAD, ifp->idev->nd_parms->retrans_time);
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
/* send a neighbour solicitation for our addr */
@@ -2905,12 +2919,12 @@ static void addrconf_dad_run(struct inet6_dev *idev) {
read_lock_bh(&idev->lock);
for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
- spin_lock_bh(&ifp->lock);
+ spin_lock(&ifp->lock);
if (!(ifp->flags & IFA_F_TENTATIVE)) {
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
continue;
}
- spin_unlock_bh(&ifp->lock);
+ spin_unlock(&ifp->lock);
addrconf_dad_kick(ifp);
}
read_unlock_bh(&idev->lock);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index f626ea2b304..77e122f53ea 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -319,12 +319,26 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
w->root = &table->tb6_root;
if (cb->args[4] == 0) {
+ w->count = 0;
+ w->skip = 0;
+
read_lock_bh(&table->tb6_lock);
res = fib6_walk(w);
read_unlock_bh(&table->tb6_lock);
- if (res > 0)
+ if (res > 0) {
cb->args[4] = 1;
+ cb->args[5] = w->root->fn_sernum;
+ }
} else {
+ if (cb->args[5] != w->root->fn_sernum) {
+ /* Begin at the root if the tree changed */
+ cb->args[5] = w->root->fn_sernum;
+ w->state = FWS_INIT;
+ w->node = w->root;
+ w->skip = w->count;
+ } else
+ w->skip = 0;
+
read_lock_bh(&table->tb6_lock);
res = fib6_walk_continue(w);
read_unlock_bh(&table->tb6_lock);
@@ -1250,9 +1264,18 @@ static int fib6_walk_continue(struct fib6_walker_t *w)
w->leaf = fn->leaf;
case FWS_C:
if (w->leaf && fn->fn_flags&RTN_RTINFO) {
- int err = w->func(w);
+ int err;
+
+ if (w->count < w->skip) {
+ w->count++;
+ continue;
+ }
+
+ err = w->func(w);
if (err)
return err;
+
+ w->count++;
continue;
}
w->state = FWS_U;
@@ -1346,6 +1369,8 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
c.w.root = root;
c.w.func = fib6_clean_node;
c.w.prune = prune;
+ c.w.count = 0;
+ c.w.skip = 0;
c.func = func;
c.arg = arg;
c.net = net;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index d93812d09c1..b2847ed6a7d 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -237,8 +237,7 @@ out:
}
static __inline__ struct frag_queue *
-fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
- struct inet6_dev *idev)
+fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst)
{
struct inet_frag_queue *q;
struct ip6_create_arg arg;
@@ -254,13 +253,9 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
if (q == NULL)
- goto oom;
+ return NULL;
return container_of(q, struct frag_queue, q);
-
-oom:
- IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS);
- return NULL;
}
static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
@@ -606,8 +601,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
- if ((fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
- ip6_dst_idev(skb_dst(skb)))) != NULL) {
+ fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
+ if (fq != NULL) {
int ret;
spin_lock(&fq->q.lock);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 34efb3589ff..a7af9d68cd6 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -322,7 +322,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
- unsigned int ulen, copied;
+ unsigned int ulen;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
@@ -341,10 +341,9 @@ try_again:
goto out;
ulen = skb->len - sizeof(struct udphdr);
- copied = len;
- if (copied > ulen)
- copied = ulen;
- else if (copied < ulen)
+ if (len > ulen)
+ len = ulen;
+ else if (len < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -355,14 +354,14 @@ try_again:
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied );
+ msg->msg_iov,len);
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
@@ -411,7 +410,7 @@ try_again:
datagram_recv_ctl(sk, msg, skb);
}
- err = copied;
+ err = len;
if (flags & MSG_TRUNC)
err = ulen;
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 576178482f8..26b5bfcf1d0 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -13,45 +13,15 @@
#include <net/tcp_states.h>
#include <net/ipx.h>
-static __inline__ struct ipx_interface *ipx_get_interface_idx(loff_t pos)
-{
- struct ipx_interface *i;
-
- list_for_each_entry(i, &ipx_interfaces, node)
- if (!pos--)
- goto out;
- i = NULL;
-out:
- return i;
-}
-
-static struct ipx_interface *ipx_interfaces_next(struct ipx_interface *i)
-{
- struct ipx_interface *rc = NULL;
-
- if (i->node.next != &ipx_interfaces)
- rc = list_entry(i->node.next, struct ipx_interface, node);
- return rc;
-}
-
static void *ipx_seq_interface_start(struct seq_file *seq, loff_t *pos)
{
- loff_t l = *pos;
-
spin_lock_bh(&ipx_interfaces_lock);
- return l ? ipx_get_interface_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&ipx_interfaces, *pos);
}
static void *ipx_seq_interface_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct ipx_interface *i;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- i = ipx_interfaces_head();
- else
- i = ipx_interfaces_next(v);
- return i;
+ return seq_list_next(v, &ipx_interfaces, pos);
}
static void ipx_seq_interface_stop(struct seq_file *seq, void *v)
@@ -63,7 +33,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
{
struct ipx_interface *i;
- if (v == SEQ_START_TOKEN) {
+ if (v == &ipx_interfaces) {
seq_puts(seq, "Network Node_Address Primary Device "
"Frame_Type");
#ifdef IPX_REFCNT_DEBUG
@@ -73,7 +43,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v)
goto out;
}
- i = v;
+ i = list_entry(v, struct ipx_interface, node);
seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum));
seq_printf(seq, "%02X%02X%02X%02X%02X%02X ",
i->if_node[0], i->if_node[1], i->if_node[2],
@@ -89,53 +59,15 @@ out:
return 0;
}
-static struct ipx_route *ipx_routes_head(void)
-{
- struct ipx_route *rc = NULL;
-
- if (!list_empty(&ipx_routes))
- rc = list_entry(ipx_routes.next, struct ipx_route, node);
- return rc;
-}
-
-static struct ipx_route *ipx_routes_next(struct ipx_route *r)
-{
- struct ipx_route *rc = NULL;
-
- if (r->node.next != &ipx_routes)
- rc = list_entry(r->node.next, struct ipx_route, node);
- return rc;
-}
-
-static __inline__ struct ipx_route *ipx_get_route_idx(loff_t pos)
-{
- struct ipx_route *r;
-
- list_for_each_entry(r, &ipx_routes, node)
- if (!pos--)
- goto out;
- r = NULL;
-out:
- return r;
-}
-
static void *ipx_seq_route_start(struct seq_file *seq, loff_t *pos)
{
- loff_t l = *pos;
read_lock_bh(&ipx_routes_lock);
- return l ? ipx_get_route_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&ipx_routes, *pos);
}
static void *ipx_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct ipx_route *r;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- r = ipx_routes_head();
- else
- r = ipx_routes_next(v);
- return r;
+ return seq_list_next(v, &ipx_routes, pos);
}
static void ipx_seq_route_stop(struct seq_file *seq, void *v)
@@ -147,11 +79,13 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v)
{
struct ipx_route *rt;
- if (v == SEQ_START_TOKEN) {
+ if (v == &ipx_routes) {
seq_puts(seq, "Network Router_Net Router_Node\n");
goto out;
}
- rt = v;
+
+ rt = list_entry(v, struct ipx_route, node);
+
seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net));
if (rt->ir_routed)
seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n",
@@ -226,9 +160,9 @@ static void *ipx_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
spin_unlock_bh(&i->if_sklist_lock);
sk = NULL;
for (;;) {
- i = ipx_interfaces_next(i);
- if (!i)
+ if (i->node.next == &ipx_interfaces)
break;
+ i = list_entry(i->node.next, struct ipx_interface, node);
spin_lock_bh(&i->if_sklist_lock);
if (!hlist_empty(&i->if_sklist)) {
sk = sk_head(&i->if_sklist);
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 315ead3cb92..e486dc89ea5 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1128,34 +1128,14 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
*/
static void *irlan_seq_start(struct seq_file *seq, loff_t *pos)
{
- int i = 1;
- struct irlan_cb *self;
-
rcu_read_lock();
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- list_for_each_entry(self, &irlans, dev_list) {
- if (*pos == i)
- return self;
- ++i;
- }
- return NULL;
+ return seq_list_start_head(&irlans, *pos);
}
/* Return entry after v, and increment pos */
static void *irlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct list_head *nxt;
-
- ++*pos;
- if (v == SEQ_START_TOKEN)
- nxt = irlans.next;
- else
- nxt = ((struct irlan_cb *)v)->dev_list.next;
-
- return (nxt == &irlans) ? NULL
- : list_entry(nxt, struct irlan_cb, dev_list);
+ return seq_list_next(v, &irlans, pos);
}
/* End of reading /proc file */
@@ -1170,10 +1150,10 @@ static void irlan_seq_stop(struct seq_file *seq, void *v)
*/
static int irlan_seq_show(struct seq_file *seq, void *v)
{
- if (v == SEQ_START_TOKEN)
+ if (v == &irlans)
seq_puts(seq, "IrLAN instances:\n");
else {
- struct irlan_cb *self = v;
+ struct irlan_cb *self = list_entry(v, struct irlan_cb, dev_list);
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index d340110f5c0..9616c32d107 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -321,14 +321,15 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
}
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
+ else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > HW_MAX_ADDRS) {
/* Disable promiscuous mode, use normal mode. */
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
/* hardware_set_filter(NULL); */
irlan_set_multicast_filter(self, TRUE);
}
- else if (dev->mc_count) {
+ else if (!netdev_mc_empty(dev)) {
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
/* Walk the address list, and load the filter */
/* hardware_set_filter(dev->mc_list); */
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 41dd2cb07ef..8b8e26a9e40 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1751,7 +1751,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
audit_info.secid = 0;
err = xfrm_state_flush(net, proto, &audit_info);
if (err)
- return err;
+ return 0;
c.data.proto = proto;
c.seq = hdr->sadb_msg_seq;
c.pid = hdr->sadb_msg_pid;
@@ -2713,7 +2713,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
audit_info.secid = 0;
err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
if (err)
- return err;
+ return 0;
c.data.type = XFRM_POLICY_TYPE_MAIN;
c.event = XFRM_MSG_FLUSHPOLICY;
c.pid = hdr->sadb_msg_pid;
@@ -3654,9 +3654,8 @@ static const struct net_proto_family pfkey_family_ops = {
#ifdef CONFIG_PROC_FS
static int pfkey_seq_show(struct seq_file *f, void *v)
{
- struct sock *s;
+ struct sock *s = sk_entry(v);
- s = (struct sock *)v;
if (v == SEQ_START_TOKEN)
seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
else
@@ -3675,19 +3674,9 @@ static void *pfkey_seq_start(struct seq_file *f, loff_t *ppos)
{
struct net *net = seq_file_net(f);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
- struct sock *s;
- struct hlist_node *node;
- loff_t pos = *ppos;
read_lock(&pfkey_table_lock);
- if (pos == 0)
- return SEQ_START_TOKEN;
-
- sk_for_each(s, node, &net_pfkey->table)
- if (pos-- == 1)
- return s;
-
- return NULL;
+ return seq_hlist_start_head(&net_pfkey->table, *ppos);
}
static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
@@ -3695,10 +3684,7 @@ static void *pfkey_seq_next(struct seq_file *f, void *v, loff_t *ppos)
struct net *net = seq_file_net(f);
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
- ++*ppos;
- return (v == SEQ_START_TOKEN) ?
- sk_head(&net_pfkey->table) :
- sk_next((struct sock *)v);
+ return seq_hlist_next(v, &net_pfkey->table, ppos);
}
static void pfkey_seq_stop(struct seq_file *f, void *v)
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 718fbcff84d..5538e1b4a69 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -237,6 +237,14 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
sdata->vif.type != NL80211_IFTYPE_AP)
return -EINVAL;
+ if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG "Disassociation is in progress. "
+ "Denying BA session request\n");
+#endif
+ return -EINVAL;
+ }
+
if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Suspend in progress. "
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index facf233843e..e1731b7c252 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -515,6 +515,8 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
if (old)
memcpy(new->tail, old->tail, new_tail_len);
+ sdata->vif.bss_conf.dtim_period = new->dtim_period;
+
rcu_assign_pointer(sdata->u.ap.beacon, new);
synchronize_rcu();
@@ -747,9 +749,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_AP;
- rcu_read_lock();
-
- err = sta_info_insert(sta);
+ err = sta_info_insert_rcu(sta);
if (err) {
rcu_read_unlock();
return err;
@@ -768,26 +768,13 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
{
struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
- struct sta_info *sta;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (mac) {
- rcu_read_lock();
-
- sta = sta_info_get_bss(sdata, mac);
- if (!sta) {
- rcu_read_unlock();
- return -ENOENT;
- }
-
- sta_info_unlink(&sta);
- rcu_read_unlock();
-
- sta_info_destroy(sta);
- } else
- sta_info_flush(local, sdata);
+ if (mac)
+ return sta_info_destroy_addr_bss(sdata, mac);
+ sta_info_flush(local, sdata);
return 0;
}
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index b3bc32b62a5..637929b65cc 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -250,6 +250,38 @@ static const struct file_operations uapsd_max_sp_len_ops = {
.open = mac80211_open_file_generic
};
+static ssize_t channel_type_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ const char *buf;
+
+ switch (local->hw.conf.channel_type) {
+ case NL80211_CHAN_NO_HT:
+ buf = "no ht\n";
+ break;
+ case NL80211_CHAN_HT20:
+ buf = "ht20\n";
+ break;
+ case NL80211_CHAN_HT40MINUS:
+ buf = "ht40-\n";
+ break;
+ case NL80211_CHAN_HT40PLUS:
+ buf = "ht40+\n";
+ break;
+ default:
+ buf = "???";
+ break;
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static const struct file_operations channel_type_ops = {
+ .read = channel_type_read,
+ .open = mac80211_open_file_generic
+};
+
static ssize_t queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -408,6 +440,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(noack);
DEBUGFS_ADD(uapsd_queues);
DEBUGFS_ADD(uapsd_max_sp_len);
+ DEBUGFS_ADD(channel_type);
statsd = debugfs_create_dir("statistics", phyd);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 6c31f38ac7f..c3d844093a2 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -243,6 +243,40 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
trace_drv_sta_notify(local, sdata, cmd, sta);
}
+static inline int drv_sta_add(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta)
+{
+ int ret = 0;
+
+ might_sleep();
+
+ if (local->ops->sta_add)
+ ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
+ else if (local->ops->sta_notify)
+ local->ops->sta_notify(&local->hw, &sdata->vif,
+ STA_NOTIFY_ADD, sta);
+
+ trace_drv_sta_add(local, sdata, sta, ret);
+
+ return ret;
+}
+
+static inline void drv_sta_remove(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta)
+{
+ might_sleep();
+
+ if (local->ops->sta_remove)
+ local->ops->sta_remove(&local->hw, &sdata->vif, sta);
+ else if (local->ops->sta_notify)
+ local->ops->sta_notify(&local->hw, &sdata->vif,
+ STA_NOTIFY_REMOVE, sta);
+
+ trace_drv_sta_remove(local, sdata, sta);
+}
+
static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
@@ -256,14 +290,6 @@ static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
return ret;
}
-static inline int drv_get_tx_stats(struct ieee80211_local *local,
- struct ieee80211_tx_queue_stats *stats)
-{
- int ret = local->ops->get_tx_stats(&local->hw, stats);
- trace_drv_get_tx_stats(local, stats, ret);
- return ret;
-}
-
static inline u64 drv_get_tsf(struct ieee80211_local *local)
{
u64 ret = -1ULL;
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 502424b2538..41baf730a5c 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -545,59 +545,88 @@ TRACE_EVENT(drv_sta_notify,
)
);
-TRACE_EVENT(drv_conf_tx,
- TP_PROTO(struct ieee80211_local *local, u16 queue,
- const struct ieee80211_tx_queue_params *params,
- int ret),
+TRACE_EVENT(drv_sta_add,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, int ret),
- TP_ARGS(local, queue, params, ret),
+ TP_ARGS(local, sdata, sta, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(u16, queue)
- __field(u16, txop)
- __field(u16, cw_min)
- __field(u16, cw_max)
- __field(u8, aifs)
+ VIF_ENTRY
+ STA_ENTRY
__field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->queue = queue;
+ VIF_ASSIGN;
+ STA_ASSIGN;
__entry->ret = ret;
- __entry->txop = params->txop;
- __entry->cw_max = params->cw_max;
- __entry->cw_min = params->cw_min;
- __entry->aifs = params->aifs;
),
TP_printk(
- LOCAL_PR_FMT " queue:%d ret:%d",
- LOCAL_PR_ARG, __entry->queue, __entry->ret
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
)
);
-TRACE_EVENT(drv_get_tx_stats,
+TRACE_EVENT(drv_sta_remove,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_tx_queue_stats *stats,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+
+ TP_ARGS(local, sdata, sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_conf_tx,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ const struct ieee80211_tx_queue_params *params,
int ret),
- TP_ARGS(local, stats, ret),
+ TP_ARGS(local, queue, params, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
+ __field(u16, queue)
+ __field(u16, txop)
+ __field(u16, cw_min)
+ __field(u16, cw_max)
+ __field(u8, aifs)
__field(int, ret)
),
TP_fast_assign(
LOCAL_ASSIGN;
+ __entry->queue = queue;
__entry->ret = ret;
+ __entry->txop = params->txop;
+ __entry->cw_max = params->cw_max;
+ __entry->cw_min = params->cw_min;
+ __entry->aifs = params->aifs;
),
TP_printk(
- LOCAL_PR_FMT " ret:%d",
- LOCAL_PR_ARG, __entry->ret
+ LOCAL_PR_FMT " queue:%d ret:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->ret
)
);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f95750b423e..f3e94248674 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -275,10 +275,12 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
(unsigned long long) supp_rates,
(unsigned long long) sta->sta.supp_rates[band]);
#endif
- } else
- ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates);
-
- rcu_read_unlock();
+ rcu_read_unlock();
+ } else {
+ rcu_read_unlock();
+ ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
+ supp_rates, GFP_KERNEL);
+ }
}
bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
@@ -368,7 +370,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sdata->name, mgmt->bssid);
#endif
ieee80211_sta_join_ibss(sdata, bss);
- ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates);
+ ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
+ supp_rates, GFP_KERNEL);
}
put_bss:
@@ -381,7 +384,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
* must be callable in atomic context.
*/
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid,u8 *addr, u32 supp_rates)
+ u8 *bssid,u8 *addr, u32 supp_rates,
+ gfp_t gfp)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
@@ -410,7 +414,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
wiphy_name(local->hw.wiphy), addr, sdata->name);
#endif
- sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
+ sta = sta_info_alloc(sdata, addr, gfp);
if (!sta)
return NULL;
@@ -422,9 +426,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
rate_control_rate_init(sta);
+ /* If it fails, maybe we raced another insertion? */
if (sta_info_insert(sta))
- return NULL;
-
+ return sta_info_get(sdata, addr);
return sta;
}
@@ -652,7 +656,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
}
if (pos[1] != 0 &&
(pos[1] != ifibss->ssid_len ||
- !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
+ memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
/* Ignore ProbeReq for foreign SSID */
return;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3067fbd69d6..9dd98b674cb 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -316,6 +316,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_CSA_RECEIVED = BIT(5),
IEEE80211_STA_MFP_ENABLED = BIT(6),
IEEE80211_STA_UAPSD_ENABLED = BIT(7),
+ IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
};
struct ieee80211_if_managed {
@@ -688,15 +689,18 @@ struct ieee80211_local {
/* Station data */
/*
- * The lock only protects the list, hash, timer and counter
- * against manipulation, reads are done in RCU. Additionally,
- * the lock protects each BSS's TIM bitmap.
+ * The mutex only protects the list and counter,
+ * reads are done in RCU.
+ * Additionally, the lock protects the hash table,
+ * the pending list and each BSS's TIM bitmap.
*/
+ struct mutex sta_mtx;
spinlock_t sta_lock;
unsigned long num_sta;
- struct list_head sta_list;
+ struct list_head sta_list, sta_pending_list;
struct sta_info *sta_hash[STA_HASH_SIZE];
struct timer_list sta_cleanup;
+ struct work_struct sta_finish_work;
int sta_generation;
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
@@ -770,10 +774,6 @@ struct ieee80211_local {
assoc_led_name[32], radio_led_name[32];
#endif
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct work_struct sta_debugfs_add;
-#endif
-
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
/* TX/RX handler statistics */
unsigned int tx_handlers_drop;
@@ -985,7 +985,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
ieee80211_rx_result
ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
- u8 *bssid, u8 *addr, u32 supp_rates);
+ u8 *bssid, u8 *addr, u32 supp_rates,
+ gfp_t gfp);
int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params);
int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7985e515089..bc4e20e57ff 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -102,7 +102,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
if (local->num_sta >= MESH_MAX_PLINKS)
return NULL;
- sta = sta_info_alloc(sdata, hw_addr, GFP_ATOMIC);
+ sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
if (!sta)
return NULL;
@@ -236,12 +236,12 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
sta = sta_info_get(sdata, hw_addr);
if (!sta) {
+ rcu_read_unlock();
+
sta = mesh_plink_alloc(sdata, hw_addr, rates);
- if (!sta) {
- rcu_read_unlock();
+ if (!sta)
return;
- }
- if (sta_info_insert(sta)) {
+ if (sta_info_insert_rcu(sta)) {
rcu_read_unlock();
return;
}
@@ -485,9 +485,11 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
} else if (!sta) {
/* ftype == PLINK_OPEN */
u32 rates;
+
+ rcu_read_unlock();
+
if (!mesh_plink_free_count(sdata)) {
mpl_dbg("Mesh plink error: no more free plinks\n");
- rcu_read_unlock();
return;
}
@@ -495,10 +497,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
if (!sta) {
mpl_dbg("Mesh plink error: plink table full\n");
- rcu_read_unlock();
return;
}
- if (sta_info_insert(sta)) {
+ if (sta_info_insert_rcu(sta)) {
rcu_read_unlock();
return;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 86c6ad1b058..bfc4a507001 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -27,10 +27,6 @@
#include "rate.h"
#include "led.h"
-#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
-#define IEEE80211_AUTH_MAX_TRIES 3
-#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
-#define IEEE80211_ASSOC_MAX_TRIES 3
#define IEEE80211_MAX_PROBE_TRIES 5
/*
@@ -438,8 +434,11 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
} else {
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
ieee80211_send_nullfunc(local, sdata, 1);
- conf->flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+
+ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)) {
+ conf->flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
}
}
@@ -545,6 +544,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
container_of(work, struct ieee80211_local,
dynamic_ps_enable_work);
struct ieee80211_sub_if_data *sdata = local->ps_sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
/* can only happen when PS was just disabled anyway */
if (!sdata)
@@ -553,11 +553,16 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
if (local->hw.conf.flags & IEEE80211_CONF_PS)
return;
- if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
+ if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
+ (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)))
ieee80211_send_nullfunc(local, sdata, 1);
- local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ if (!(local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) ||
+ (ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED)) {
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
}
void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -792,8 +797,10 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
rcu_read_lock();
sta = sta_info_get(sdata, bssid);
- if (sta)
+ if (sta) {
+ set_sta_flags(sta, WLAN_STA_DISASSOC);
ieee80211_sta_tear_down_BA_sessions(sta);
+ }
rcu_read_unlock();
changed |= ieee80211_reset_erp_info(sdata);
@@ -826,19 +833,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
changed |= BSS_CHANGED_BSSID;
ieee80211_bss_info_change_notify(sdata, changed);
- rcu_read_lock();
-
- sta = sta_info_get(sdata, bssid);
- if (!sta) {
- rcu_read_unlock();
- return;
- }
-
- sta_info_unlink(&sta);
-
- rcu_read_unlock();
-
- sta_info_destroy(sta);
+ sta_info_destroy_addr(sdata, bssid);
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1844,7 +1839,11 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
wk->probe_auth.algorithm = auth_alg;
wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
- wk->type = IEEE80211_WORK_DIRECT_PROBE;
+ /* if we already have a probe, don't probe again */
+ if (req->bss->proberesp_ies)
+ wk->type = IEEE80211_WORK_AUTH;
+ else
+ wk->type = IEEE80211_WORK_DIRECT_PROBE;
wk->chan = req->bss->channel;
wk->sdata = sdata;
wk->done = ieee80211_probe_auth_done;
@@ -1904,6 +1903,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
+ ifmgd->flags &= ~IEEE80211_STA_NULLFUNC_ACKED;
for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
@@ -2007,12 +2007,18 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->work_mtx);
list_for_each_entry(wk, &local->work_list, list) {
- if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
+ if (wk->sdata != sdata)
+ continue;
+
+ if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
+ wk->type != IEEE80211_WORK_AUTH)
continue;
+
if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
continue;
- not_auth_yet = true;
- list_del(&wk->list);
+
+ not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
+ list_del_rcu(&wk->list);
free_work(wk);
break;
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 47f818959ad..0e64484e861 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -11,7 +11,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
- unsigned long flags;
ieee80211_scan_cancel(local);
@@ -55,22 +54,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
rcu_read_unlock();
/* remove STAs */
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
- if (local->ops->sta_notify) {
+ if (sta->uploaded) {
sdata = sta->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, sdata, STA_NOTIFY_REMOVE,
- &sta->sta);
+ drv_sta_remove(local, sdata, &sta->sta);
}
mesh_plink_quiesce(sta);
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
/* remove all interfaces */
list_for_each_entry(sdata, &local->interfaces, list) {
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index c74b7c85403..0b299d236fa 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -145,7 +145,7 @@ static const struct file_operations rcname_ops = {
};
#endif
-struct rate_control_ref *rate_control_alloc(const char *name,
+static struct rate_control_ref *rate_control_alloc(const char *name,
struct ieee80211_local *local)
{
struct dentry *debugfsdir = NULL;
@@ -303,6 +303,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
info->control.rates[i].count = 1;
}
+ if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ return;
+
ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
/*
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 998cf7a935b..b6108bca96d 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -26,10 +26,6 @@ struct rate_control_ref {
struct kref kref;
};
-/* Get a reference to the rate control algorithm. If `name' is NULL, get the
- * first available algorithm. */
-struct rate_control_ref *rate_control_alloc(const char *name,
- struct ieee80211_local *local);
void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_tx_rate_control *txrc);
@@ -116,7 +112,8 @@ static inline void rate_control_remove_sta_debugfs(struct sta_info *sta)
#endif
}
-/* functions for rate control related to a device */
+/* Get a reference to the rate control algorithm. If `name' is NULL, get the
+ * first available algorithm. */
int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
const char *name);
void rate_control_deinitialize(struct ieee80211_local *local);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5709307fcb9..c9755f3d986 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1719,6 +1719,7 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_local *local = rx->local;
struct net_device *dev = sdata->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
__le16 fc = hdr->frame_control;
@@ -1750,6 +1751,13 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
dev->stats.rx_packets++;
dev->stats.rx_bytes += rx->skb->len;
+ if (ieee80211_is_data(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1) &&
+ local->hw.conf.dynamic_ps_timeout > 0 && local->ps_sdata) {
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
+ }
+
ieee80211_deliver_skb(rx);
return RX_QUEUED;
@@ -2244,8 +2252,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
rate_idx = 0; /* TODO: HT rates */
else
rate_idx = status->rate_idx;
- rx->sta = ieee80211_ibss_add_sta(sdata, bssid, hdr->addr2,
- BIT(rate_idx));
+ rx->sta = ieee80211_ibss_add_sta(sdata, bssid,
+ hdr->addr2, BIT(rate_idx), GFP_ATOMIC);
}
break;
case NL80211_IFTYPE_MESH_POINT:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index bc061f62967..b822dce9786 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -345,6 +345,13 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->scan_req)
return -EBUSY;
+ if (!list_empty(&local->work_list)) {
+ /* wait for the work to finish/time out */
+ local->scan_req = req;
+ local->scan_sdata = sdata;
+ return 0;
+ }
+
if (local->ops->hw_scan) {
u8 *ies;
@@ -364,29 +371,33 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->hw_scan_req->ie = ies;
local->hw_scan_band = 0;
+
+ /*
+ * After allocating local->hw_scan_req, we must
+ * go through until ieee80211_prep_hw_scan(), so
+ * anything that might be changed here and leave
+ * this function early must not go after this
+ * allocation.
+ */
}
local->scan_req = req;
local->scan_sdata = sdata;
- if (!list_empty(&local->work_list)) {
- /* wait for the work to finish/time out */
- return 0;
- }
-
if (local->ops->hw_scan)
__set_bit(SCAN_HW_SCANNING, &local->scanning);
else
__set_bit(SCAN_SW_SCANNING, &local->scanning);
+
/*
* Kicking off the scan need not be protected,
* only the scan variable stuff, since now
* local->scan_req is assigned and other callers
* will abort their scan attempts.
*
- * This avoids getting a scan_mtx -> iflist_mtx
- * dependency, so that the scan completed calls
- * have more locking freedom.
+ * This avoids too many locking dependencies
+ * so that the scan completed calls have more
+ * locking freedom.
*/
ieee80211_recalc_idle(local);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f735826f055..211c475f73c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -32,49 +32,33 @@
* for faster lookup and a list for iteration. They are managed using
* RCU, i.e. access to the list and hash table is protected by RCU.
*
- * Upon allocating a STA info structure with sta_info_alloc(), the caller owns
- * that structure. It must then either destroy it using sta_info_destroy()
- * (which is pretty useless) or insert it into the hash table using
- * sta_info_insert() which demotes the reference from ownership to a regular
- * RCU-protected reference; if the function is called without protection by an
- * RCU critical section the reference is instantly invalidated. Note that the
- * caller may not do much with the STA info before inserting it, in particular,
- * it may not start any mesh peer link management or add encryption keys.
+ * Upon allocating a STA info structure with sta_info_alloc(), the caller
+ * owns that structure. It must then insert it into the hash table using
+ * either sta_info_insert() or sta_info_insert_rcu(); only in the latter
+ * case (which acquires an rcu read section but must not be called from
+ * within one) will the pointer still be valid after the call. Note that
+ * the caller may not do much with the STA info before inserting it, in
+ * particular, it may not start any mesh peer link management or add
+ * encryption keys.
*
* When the insertion fails (sta_info_insert()) returns non-zero), the
* structure will have been freed by sta_info_insert()!
*
- * sta entries are added by mac80211 when you establish a link with a
+ * Station entries are added by mac80211 when you establish a link with a
* peer. This means different things for the different type of interfaces
* we support. For a regular station this mean we add the AP sta when we
* receive an assocation response from the AP. For IBSS this occurs when
- * we receive a probe response or a beacon from target IBSS network. For
- * WDS we add the sta for the peer imediately upon device open. When using
- * AP mode we add stations for each respective station upon request from
- * userspace through nl80211.
+ * get to know about a peer on the same IBSS. For WDS we add the sta for
+ * the peer imediately upon device open. When using AP mode we add stations
+ * for each respective station upon request from userspace through nl80211.
*
- * Because there are debugfs entries for each station, and adding those
- * must be able to sleep, it is also possible to "pin" a station entry,
- * that means it can be removed from the hash table but not be freed.
- * See the comment in __sta_info_unlink() for more information, this is
- * an internal capability only.
+ * In order to remove a STA info structure, various sta_info_destroy_*()
+ * calls are available.
*
- * In order to remove a STA info structure, the caller needs to first
- * unlink it (sta_info_unlink()) from the list and hash tables and
- * then destroy it; sta_info_destroy() will wait for an RCU grace period
- * to elapse before actually freeing it. Due to the pinning and the
- * possibility of multiple callers trying to remove the same STA info at
- * the same time, sta_info_unlink() can clear the STA info pointer it is
- * passed to indicate that the STA info is owned by somebody else now.
- *
- * If sta_info_unlink() did not clear the pointer then the caller owns
- * the STA info structure now and is responsible of destroying it with
- * a call to sta_info_destroy().
- *
- * In all other cases, there is no concept of ownership on a STA entry,
- * each structure is owned by the global hash table/list until it is
- * removed. All users of the structure need to be RCU protected so that
- * the structure won't be freed before they are done using it.
+ * There is no concept of ownership on a STA entry, each structure is
+ * owned by the global hash table/list until it is removed. All users of
+ * the structure need to be RCU protected so that the structure won't be
+ * freed before they are done using it.
*/
/* Caller must hold local->sta_lock */
@@ -185,101 +169,6 @@ static void __sta_info_free(struct ieee80211_local *local,
kfree(sta);
}
-void sta_info_destroy(struct sta_info *sta)
-{
- struct ieee80211_local *local;
- struct sk_buff *skb;
- int i;
-
- might_sleep();
-
- if (!sta)
- return;
-
- local = sta->local;
-
- cancel_work_sync(&sta->drv_unblock_wk);
-
- rate_control_remove_sta_debugfs(sta);
- ieee80211_sta_debugfs_remove(sta);
-
-#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- mesh_plink_deactivate(sta);
-#endif
-
- /*
- * We have only unlinked the key, and actually destroying it
- * may mean it is removed from hardware which requires that
- * the key->sta pointer is still valid, so flush the key todo
- * list here.
- *
- * ieee80211_key_todo() will synchronize_rcu() so after this
- * nothing can reference this sta struct any more.
- */
- ieee80211_key_todo();
-
-#ifdef CONFIG_MAC80211_MESH
- if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- del_timer_sync(&sta->plink_timer);
-#endif
-
- while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
- local->total_ps_buffered--;
- dev_kfree_skb_any(skb);
- }
-
- while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
- dev_kfree_skb_any(skb);
-
- for (i = 0; i < STA_TID_NUM; i++) {
- struct tid_ampdu_rx *tid_rx;
- struct tid_ampdu_tx *tid_tx;
-
- spin_lock_bh(&sta->lock);
- tid_rx = sta->ampdu_mlme.tid_rx[i];
- /* Make sure timer won't free the tid_rx struct, see below */
- if (tid_rx)
- tid_rx->shutdown = true;
-
- spin_unlock_bh(&sta->lock);
-
- /*
- * Outside spinlock - shutdown is true now so that the timer
- * won't free tid_rx, we have to do that now. Can't let the
- * timer do it because we have to sync the timer outside the
- * lock that it takes itself.
- */
- if (tid_rx) {
- del_timer_sync(&tid_rx->session_timer);
- kfree(tid_rx);
- }
-
- /*
- * No need to do such complications for TX agg sessions, the
- * path leading to freeing the tid_tx struct goes via a call
- * from the driver, and thus needs to look up the sta struct
- * again, which cannot be found when we get here. Hence, we
- * just need to delete the timer and free the aggregation
- * info; we won't be telling the peer about it then but that
- * doesn't matter if we're not talking to it again anyway.
- */
- tid_tx = sta->ampdu_mlme.tid_tx[i];
- if (tid_tx) {
- del_timer_sync(&tid_tx->addba_resp_timer);
- /*
- * STA removed while aggregation session being
- * started? Bit odd, but purge frames anyway.
- */
- skb_queue_purge(&tid_tx->pending);
- kfree(tid_tx);
- }
- }
-
- __sta_info_free(local, sta);
-}
-
-
/* Caller must hold local->sta_lock */
static void sta_info_hash_add(struct ieee80211_local *local,
struct sta_info *sta)
@@ -376,7 +265,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-int sta_info_insert(struct sta_info *sta)
+static int sta_info_finish_insert(struct sta_info *sta, bool async)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
@@ -384,6 +273,91 @@ int sta_info_insert(struct sta_info *sta)
unsigned long flags;
int err = 0;
+ WARN_ON(!mutex_is_locked(&local->sta_mtx));
+
+ /* notify driver */
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ err = drv_sta_add(local, sdata, &sta->sta);
+ if (err) {
+ if (!async)
+ return err;
+ printk(KERN_DEBUG "%s: failed to add IBSS STA %pM to driver (%d)"
+ " - keeping it anyway.\n",
+ sdata->name, sta->sta.addr, err);
+ } else {
+ sta->uploaded = true;
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (async)
+ printk(KERN_DEBUG "%s: Finished adding IBSS STA %pM\n",
+ wiphy_name(local->hw.wiphy), sta->sta.addr);
+#endif
+ }
+
+ sdata = sta->sdata;
+
+ if (!async) {
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+
+ /* make the station visible */
+ spin_lock_irqsave(&local->sta_lock, flags);
+ sta_info_hash_add(local, sta);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ }
+
+ list_add(&sta->list, &local->sta_list);
+
+ ieee80211_sta_debugfs_add(sta);
+ rate_control_add_sta_debugfs(sta);
+
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+
+
+ return 0;
+}
+
+static void sta_info_finish_pending(struct ieee80211_local *local)
+{
+ struct sta_info *sta;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ while (!list_empty(&local->sta_pending_list)) {
+ sta = list_first_entry(&local->sta_pending_list,
+ struct sta_info, list);
+ list_del(&sta->list);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+
+ sta_info_finish_insert(sta, true);
+
+ spin_lock_irqsave(&local->sta_lock, flags);
+ }
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+}
+
+static void sta_info_finish_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, sta_finish_work);
+
+ mutex_lock(&local->sta_mtx);
+ sta_info_finish_pending(local);
+ mutex_unlock(&local->sta_mtx);
+}
+
+int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU)
+{
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ unsigned long flags;
+ int err = 0;
+
/*
* Can't be a WARN_ON because it can be triggered through a race:
* something inserts a STA (on one CPU) without holding the RTNL
@@ -391,36 +365,87 @@ int sta_info_insert(struct sta_info *sta)
*/
if (unlikely(!ieee80211_sdata_running(sdata))) {
err = -ENETDOWN;
+ rcu_read_lock();
goto out_free;
}
if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
is_multicast_ether_addr(sta->sta.addr))) {
err = -EINVAL;
+ rcu_read_lock();
goto out_free;
}
+ /*
+ * In ad-hoc mode, we sometimes need to insert stations
+ * from tasklet context from the RX path. To avoid races,
+ * always do so in that case -- see the comment below.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ spin_lock_irqsave(&local->sta_lock, flags);
+ /* check if STA exists already */
+ if (sta_info_get_bss(sdata, sta->sta.addr)) {
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ rcu_read_lock();
+ err = -EEXIST;
+ goto out_free;
+ }
+
+ local->num_sta++;
+ local->sta_generation++;
+ smp_mb();
+ sta_info_hash_add(local, sta);
+
+ list_add_tail(&sta->list, &local->sta_pending_list);
+
+ rcu_read_lock();
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ printk(KERN_DEBUG "%s: Added IBSS STA %pM\n",
+ wiphy_name(local->hw.wiphy), sta->sta.addr);
+#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+
+ ieee80211_queue_work(&local->hw, &local->sta_finish_work);
+
+ return 0;
+ }
+
+ /*
+ * On first glance, this will look racy, because the code
+ * below this point, which inserts a station with sleeping,
+ * unlocks the sta_lock between checking existence in the
+ * hash table and inserting into it.
+ *
+ * However, it is not racy against itself because it keeps
+ * the mutex locked. It still seems to race against the
+ * above code that atomically inserts the station... That,
+ * however, is not true because the above code can only
+ * be invoked for IBSS interfaces, and the below code will
+ * not be -- and the two do not race against each other as
+ * the hash table also keys off the interface.
+ */
+
+ might_sleep();
+
+ mutex_lock(&local->sta_mtx);
+
spin_lock_irqsave(&local->sta_lock, flags);
/* check if STA exists already */
- if (sta_info_get(sdata, sta->sta.addr)) {
+ if (sta_info_get_bss(sdata, sta->sta.addr)) {
spin_unlock_irqrestore(&local->sta_lock, flags);
+ rcu_read_lock();
err = -EEXIST;
goto out_free;
}
- list_add(&sta->list, &local->sta_list);
- local->sta_generation++;
- local->num_sta++;
- sta_info_hash_add(local, sta);
- /* notify driver */
- if (local->ops->sta_notify) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
- drv_sta_notify(local, sdata, STA_NOTIFY_ADD, &sta->sta);
- sdata = sta->sdata;
+ err = sta_info_finish_insert(sta, false);
+ if (err) {
+ mutex_unlock(&local->sta_mtx);
+ rcu_read_lock();
+ goto out_free;
}
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -428,22 +453,9 @@ int sta_info_insert(struct sta_info *sta)
wiphy_name(local->hw.wiphy), sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
- sinfo.filled = 0;
- sinfo.generation = local->sta_generation;
- cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_ATOMIC);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- /*
- * Debugfs entry adding might sleep, so schedule process
- * context task for adding entry for STAs that do not yet
- * have one.
- * NOTE: due to auto-freeing semantics this may only be done
- * if the insertion is successful!
- */
- schedule_work(&local->sta_debugfs_add);
-#endif
+ /* move reference to rcu-protected */
+ rcu_read_lock();
+ mutex_unlock(&local->sta_mtx);
if (ieee80211_vif_is_mesh(&sdata->vif))
mesh_accept_plinks_update(sdata);
@@ -455,6 +467,15 @@ int sta_info_insert(struct sta_info *sta)
return err;
}
+int sta_info_insert(struct sta_info *sta)
+{
+ int err = sta_info_insert_rcu(sta);
+
+ rcu_read_unlock();
+
+ return err;
+}
+
static inline void __bss_tim_set(struct ieee80211_if_ap *bss, u16 aid)
{
/*
@@ -523,108 +544,6 @@ void sta_info_clear_tim_bit(struct sta_info *sta)
spin_unlock_irqrestore(&sta->local->sta_lock, flags);
}
-static void __sta_info_unlink(struct sta_info **sta)
-{
- struct ieee80211_local *local = (*sta)->local;
- struct ieee80211_sub_if_data *sdata = (*sta)->sdata;
- /*
- * pull caller's reference if we're already gone.
- */
- if (sta_info_hash_del(local, *sta)) {
- *sta = NULL;
- return;
- }
-
- if ((*sta)->key) {
- ieee80211_key_free((*sta)->key);
- WARN_ON((*sta)->key);
- }
-
- list_del(&(*sta)->list);
- (*sta)->dead = true;
-
- if (test_and_clear_sta_flags(*sta,
- WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
- BUG_ON(!sdata->bss);
-
- atomic_dec(&sdata->bss->num_sta_ps);
- __sta_info_clear_tim_bit(sdata->bss, *sta);
- }
-
- local->num_sta--;
- local->sta_generation++;
-
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- rcu_assign_pointer(sdata->u.vlan.sta, NULL);
-
- if (local->ops->sta_notify) {
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
-
- drv_sta_notify(local, sdata, STA_NOTIFY_REMOVE,
- &(*sta)->sta);
- sdata = (*sta)->sdata;
- }
-
- if (ieee80211_vif_is_mesh(&sdata->vif)) {
- mesh_accept_plinks_update(sdata);
-#ifdef CONFIG_MAC80211_MESH
- del_timer(&(*sta)->plink_timer);
-#endif
- }
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Removed STA %pM\n",
- wiphy_name(local->hw.wiphy), (*sta)->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
-
- /*
- * Finally, pull caller's reference if the STA is pinned by the
- * task that is adding the debugfs entries. In that case, we
- * leave the STA "to be freed".
- *
- * The rules are not trivial, but not too complex either:
- * (1) pin_status is only modified under the sta_lock
- * (2) STAs may only be pinned under the RTNL so that
- * sta_info_flush() is guaranteed to actually destroy
- * all STAs that are active for a given interface, this
- * is required for correctness because otherwise we
- * could notify a driver that an interface is going
- * away and only after that (!) notify it about a STA
- * on that interface going away.
- * (3) sta_info_debugfs_add_work() will set the status
- * to PINNED when it found an item that needs a new
- * debugfs directory created. In that case, that item
- * must not be freed although all *RCU* users are done
- * with it. Hence, we tell the caller of _unlink()
- * that the item is already gone (as can happen when
- * two tasks try to unlink/destroy at the same time)
- * (4) We set the pin_status to DESTROY here when we
- * find such an item.
- * (5) sta_info_debugfs_add_work() will reset the pin_status
- * from PINNED to NORMAL when it is done with the item,
- * but will check for DESTROY before resetting it in
- * which case it will free the item.
- */
- if ((*sta)->pin_status == STA_INFO_PIN_STAT_PINNED) {
- (*sta)->pin_status = STA_INFO_PIN_STAT_DESTROY;
- *sta = NULL;
- return;
- }
-}
-
-void sta_info_unlink(struct sta_info **sta)
-{
- struct ieee80211_local *local = (*sta)->local;
- unsigned long flags;
-
- spin_lock_irqsave(&local->sta_lock, flags);
- __sta_info_unlink(sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
-}
-
static int sta_info_buffer_expired(struct sta_info *sta,
struct sk_buff *skb)
{
@@ -681,109 +600,209 @@ static void sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
}
}
-
-static void sta_info_cleanup(unsigned long data)
+static int __must_check __sta_info_destroy(struct sta_info *sta)
{
- struct ieee80211_local *local = (struct ieee80211_local *) data;
- struct sta_info *sta;
+ struct ieee80211_local *local;
+ struct ieee80211_sub_if_data *sdata;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int ret, i;
- rcu_read_lock();
- list_for_each_entry_rcu(sta, &local->sta_list, list)
- sta_info_cleanup_expire_buffered(local, sta);
- rcu_read_unlock();
+ might_sleep();
- if (local->quiescing)
- return;
+ if (!sta)
+ return -ENOENT;
- local->sta_cleanup.expires =
- round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
- add_timer(&local->sta_cleanup);
-}
+ local = sta->local;
+ sdata = sta->sdata;
-#ifdef CONFIG_MAC80211_DEBUGFS
-/*
- * See comment in __sta_info_unlink,
- * caller must hold local->sta_lock.
- */
-static void __sta_info_pin(struct sta_info *sta)
-{
- WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_NORMAL);
- sta->pin_status = STA_INFO_PIN_STAT_PINNED;
+ spin_lock_irqsave(&local->sta_lock, flags);
+ ret = sta_info_hash_del(local, sta);
+ /* this might still be the pending list ... which is fine */
+ if (!ret)
+ list_del(&sta->list);
+ spin_unlock_irqrestore(&local->sta_lock, flags);
+ if (ret)
+ return ret;
+
+ if (sta->key) {
+ ieee80211_key_free(sta->key);
+ /*
+ * We have only unlinked the key, and actually destroying it
+ * may mean it is removed from hardware which requires that
+ * the key->sta pointer is still valid, so flush the key todo
+ * list here.
+ *
+ * ieee80211_key_todo() will synchronize_rcu() so after this
+ * nothing can reference this sta struct any more.
+ */
+ ieee80211_key_todo();
+
+ WARN_ON(sta->key);
+ }
+
+ sta->dead = true;
+
+ if (test_and_clear_sta_flags(sta,
+ WLAN_STA_PS_STA | WLAN_STA_PS_DRIVER)) {
+ BUG_ON(!sdata->bss);
+
+ atomic_dec(&sdata->bss->num_sta_ps);
+ __sta_info_clear_tim_bit(sdata->bss, sta);
+ }
+
+ local->num_sta--;
+ local->sta_generation++;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ rcu_assign_pointer(sdata->u.vlan.sta, NULL);
+
+ if (sta->uploaded) {
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data,
+ u.ap);
+ drv_sta_remove(local, sdata, &sta->sta);
+ sdata = sta->sdata;
+ }
+
+#ifdef CONFIG_MAC80211_MESH
+ if (ieee80211_vif_is_mesh(&sdata->vif)) {
+ mesh_accept_plinks_update(sdata);
+ del_timer(&sta->plink_timer);
+ }
+#endif
+
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ printk(KERN_DEBUG "%s: Removed STA %pM\n",
+ wiphy_name(local->hw.wiphy), sta->sta.addr);
+#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ cancel_work_sync(&sta->drv_unblock_wk);
+
+ rate_control_remove_sta_debugfs(sta);
+ ieee80211_sta_debugfs_remove(sta);
+
+#ifdef CONFIG_MAC80211_MESH
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
+ mesh_plink_deactivate(sta);
+ del_timer_sync(&sta->plink_timer);
+ }
+#endif
+
+ while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
+ local->total_ps_buffered--;
+ dev_kfree_skb_any(skb);
+ }
+
+ while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
+ dev_kfree_skb_any(skb);
+
+ for (i = 0; i < STA_TID_NUM; i++) {
+ struct tid_ampdu_rx *tid_rx;
+ struct tid_ampdu_tx *tid_tx;
+
+ spin_lock_bh(&sta->lock);
+ tid_rx = sta->ampdu_mlme.tid_rx[i];
+ /* Make sure timer won't free the tid_rx struct, see below */
+ if (tid_rx)
+ tid_rx->shutdown = true;
+
+ spin_unlock_bh(&sta->lock);
+
+ /*
+ * Outside spinlock - shutdown is true now so that the timer
+ * won't free tid_rx, we have to do that now. Can't let the
+ * timer do it because we have to sync the timer outside the
+ * lock that it takes itself.
+ */
+ if (tid_rx) {
+ del_timer_sync(&tid_rx->session_timer);
+ kfree(tid_rx);
+ }
+
+ /*
+ * No need to do such complications for TX agg sessions, the
+ * path leading to freeing the tid_tx struct goes via a call
+ * from the driver, and thus needs to look up the sta struct
+ * again, which cannot be found when we get here. Hence, we
+ * just need to delete the timer and free the aggregation
+ * info; we won't be telling the peer about it then but that
+ * doesn't matter if we're not talking to it again anyway.
+ */
+ tid_tx = sta->ampdu_mlme.tid_tx[i];
+ if (tid_tx) {
+ del_timer_sync(&tid_tx->addba_resp_timer);
+ /*
+ * STA removed while aggregation session being
+ * started? Bit odd, but purge frames anyway.
+ */
+ skb_queue_purge(&tid_tx->pending);
+ kfree(tid_tx);
+ }
+ }
+
+ __sta_info_free(local, sta);
+
+ return 0;
}
-/*
- * See comment in __sta_info_unlink, returns sta if it
- * needs to be destroyed.
- */
-static struct sta_info *__sta_info_unpin(struct sta_info *sta)
+int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata, const u8 *addr)
{
- struct sta_info *ret = NULL;
- unsigned long flags;
+ struct sta_info *sta;
+ int ret;
- spin_lock_irqsave(&sta->local->sta_lock, flags);
- WARN_ON(sta->pin_status != STA_INFO_PIN_STAT_DESTROY &&
- sta->pin_status != STA_INFO_PIN_STAT_PINNED);
- if (sta->pin_status == STA_INFO_PIN_STAT_DESTROY)
- ret = sta;
- sta->pin_status = STA_INFO_PIN_STAT_NORMAL;
- spin_unlock_irqrestore(&sta->local->sta_lock, flags);
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get(sdata, addr);
+ ret = __sta_info_destroy(sta);
+ mutex_unlock(&sdata->local->sta_mtx);
return ret;
}
-static void sta_info_debugfs_add_work(struct work_struct *work)
+int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
{
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, sta_debugfs_add);
- struct sta_info *sta, *tmp;
- unsigned long flags;
+ struct sta_info *sta;
+ int ret;
- /* We need to keep the RTNL across the whole pinned status. */
- rtnl_lock();
- while (1) {
- sta = NULL;
+ mutex_lock(&sdata->local->sta_mtx);
+ sta = sta_info_get_bss(sdata, addr);
+ ret = __sta_info_destroy(sta);
+ mutex_unlock(&sdata->local->sta_mtx);
- spin_lock_irqsave(&local->sta_lock, flags);
- list_for_each_entry(tmp, &local->sta_list, list) {
- /*
- * debugfs.add_has_run will be set by
- * ieee80211_sta_debugfs_add regardless
- * of what else it does.
- */
- if (!tmp->debugfs.add_has_run) {
- sta = tmp;
- __sta_info_pin(sta);
- break;
- }
- }
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ return ret;
+}
- if (!sta)
- break;
+static void sta_info_cleanup(unsigned long data)
+{
+ struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &local->sta_list, list)
+ sta_info_cleanup_expire_buffered(local, sta);
+ rcu_read_unlock();
- ieee80211_sta_debugfs_add(sta);
- rate_control_add_sta_debugfs(sta);
+ if (local->quiescing)
+ return;
- sta = __sta_info_unpin(sta);
- sta_info_destroy(sta);
- }
- rtnl_unlock();
+ local->sta_cleanup.expires =
+ round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
+ add_timer(&local->sta_cleanup);
}
-#endif
void sta_info_init(struct ieee80211_local *local)
{
spin_lock_init(&local->sta_lock);
+ mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
+ INIT_LIST_HEAD(&local->sta_pending_list);
+ INIT_WORK(&local->sta_finish_work, sta_info_finish_work);
setup_timer(&local->sta_cleanup, sta_info_cleanup,
(unsigned long)local);
local->sta_cleanup.expires =
round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- INIT_WORK(&local->sta_debugfs_add, sta_info_debugfs_add_work);
-#endif
}
int sta_info_start(struct ieee80211_local *local)
@@ -795,16 +814,6 @@ int sta_info_start(struct ieee80211_local *local)
void sta_info_stop(struct ieee80211_local *local)
{
del_timer(&local->sta_cleanup);
-#ifdef CONFIG_MAC80211_DEBUGFS
- /*
- * Make sure the debugfs adding work isn't pending after this
- * because we're about to be destroyed. It doesn't matter
- * whether it ran or not since we're going to flush all STAs
- * anyway.
- */
- cancel_work_sync(&local->sta_debugfs_add);
-#endif
-
sta_info_flush(local, NULL);
}
@@ -820,26 +829,19 @@ int sta_info_flush(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
struct sta_info *sta, *tmp;
- LIST_HEAD(tmp_list);
int ret = 0;
- unsigned long flags;
might_sleep();
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
+
+ sta_info_finish_pending(local);
+
list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
- if (!sdata || sdata == sta->sdata) {
- __sta_info_unlink(&sta);
- if (sta) {
- list_add_tail(&sta->list, &tmp_list);
- ret++;
- }
- }
+ if (!sdata || sdata == sta->sdata)
+ WARN_ON(__sta_info_destroy(sta));
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
- list_for_each_entry_safe(sta, tmp, &tmp_list, list)
- sta_info_destroy(sta);
+ mutex_unlock(&local->sta_mtx);
return ret;
}
@@ -849,24 +851,17 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct sta_info *sta, *tmp;
- LIST_HEAD(tmp_list);
- unsigned long flags;
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
list_for_each_entry_safe(sta, tmp, &local->sta_list, list)
if (time_after(jiffies, sta->last_rx + exp_time)) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
sdata->name, sta->sta.addr);
#endif
- __sta_info_unlink(&sta);
- if (sta)
- list_add(&sta->list, &tmp_list);
+ WARN_ON(__sta_info_destroy(sta));
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
-
- list_for_each_entry_safe(sta, tmp, &tmp_list, list)
- sta_info_destroy(sta);
+ mutex_unlock(&local->sta_mtx);
}
struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 6f79bba5706..822d8452293 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,6 +42,9 @@
* be in the queues
* @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
* station in power-save mode, reply when the driver unblocks.
+ * @WLAN_STA_DISASSOC: Disassociation in progress.
+ * This is used to reject TX BA session requests when disassociation
+ * is in progress.
*/
enum ieee80211_sta_info_flags {
WLAN_STA_AUTH = 1<<0,
@@ -57,6 +60,7 @@ enum ieee80211_sta_info_flags {
WLAN_STA_SUSPEND = 1<<11,
WLAN_STA_PS_DRIVER = 1<<12,
WLAN_STA_PSPOLL = 1<<13,
+ WLAN_STA_DISASSOC = 1<<14,
};
#define STA_TID_NUM 16
@@ -162,11 +166,6 @@ struct sta_ampdu_mlme {
};
-/* see __sta_info_unlink */
-#define STA_INFO_PIN_STAT_NORMAL 0
-#define STA_INFO_PIN_STAT_PINNED 1
-#define STA_INFO_PIN_STAT_DESTROY 2
-
/**
* struct sta_info - STA information
*
@@ -187,7 +186,6 @@ struct sta_ampdu_mlme {
* @flaglock: spinlock for flags accesses
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
- * @pin_status: used internally for pinning a STA struct into memory
* @flags: STA flags, see &enum ieee80211_sta_info_flags
* @ps_tx_buf: buffer of frames to transmit to this station
* when it leaves power saving state
@@ -226,6 +224,7 @@ struct sta_ampdu_mlme {
* @debugfs: debug filesystem info
* @sta: station information we share with the driver
* @dead: set to true when sta is unlinked
+ * @uploaded: set to true when sta is uploaded to the driver
*/
struct sta_info {
/* General information, mostly static */
@@ -245,11 +244,7 @@ struct sta_info {
bool dead;
- /*
- * for use by the internal lifetime management,
- * see __sta_info_unlink
- */
- u8 pin_status;
+ bool uploaded;
/*
* frequently updated, locked with own spinlock (flaglock),
@@ -449,18 +444,19 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
* Insert STA info into hash table/list, returns zero or a
* -EEXIST if (if the same MAC address is already present).
*
- * Calling this without RCU protection makes the caller
- * relinquish its reference to @sta.
+ * Calling the non-rcu version makes the caller relinquish,
+ * the _rcu version calls read_lock_rcu() and must be called
+ * without it held.
*/
int sta_info_insert(struct sta_info *sta);
-/*
- * Unlink a STA info from the hash table/list.
- * This can NULL the STA pointer if somebody else
- * has already unlinked it.
- */
-void sta_info_unlink(struct sta_info **sta);
+int sta_info_insert_rcu(struct sta_info *sta) __acquires(RCU);
+int sta_info_insert_atomic(struct sta_info *sta);
+
+int sta_info_destroy_addr(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+int sta_info_destroy_addr_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
-void sta_info_destroy(struct sta_info *sta);
void sta_info_set_tim_bit(struct sta_info *sta);
void sta_info_clear_tim_bit(struct sta_info *sta);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index e57ad6b1d7e..ded98730c11 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -188,6 +188,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_lock();
sband = local->hw.wiphy->bands[info->band];
+ fc = hdr->frame_control;
for_each_sta_info(local, hdr->addr1, sta, tmp) {
/* skip wrong virtual interface */
@@ -205,8 +206,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
}
- fc = hdr->frame_control;
-
if ((info->flags & IEEE80211_TX_STAT_AMPDU_NO_BACK) &&
(ieee80211_is_data_qos(fc))) {
u16 tid, ssn;
@@ -275,6 +274,20 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
local->dot11FailedCount++;
}
+ if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc) &&
+ (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
+ !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ local->ps_sdata && !(local->scanning)) {
+ if (info->flags & IEEE80211_TX_STAT_ACK) {
+ local->ps_sdata->u.mgd.flags |=
+ IEEE80211_STA_NULLFUNC_ACKED;
+ ieee80211_queue_work(&local->hw,
+ &local->dynamic_ps_enable_work);
+ } else
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(10));
+ }
+
/* this was a transmitted frame, but now we want to reuse it */
skb_orphan(skb);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 85e382aa894..cbe53ed4fb0 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -571,7 +571,7 @@ ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- if (tx->sta)
+ if (tx->sta && tx->sta->uploaded)
info->control.sta = &tx->sta->sta;
return TX_CONTINUE;
@@ -1010,7 +1010,8 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_supported_band *sband;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
+ int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
+ NULL);
sband = tx->local->hw.wiphy->bands[tx->channel->band];
@@ -1046,7 +1047,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
* because it will be recomputed and added
* on transmission
*/
- if (skb->len < (iterator.max_length + FCS_LEN))
+ if (skb->len < (iterator._max_length + FCS_LEN))
return false;
skb_trim(skb, skb->len - FCS_LEN);
@@ -1073,10 +1074,10 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
/*
* remove the radiotap header
- * iterator->max_length was sanity-checked against
+ * iterator->_max_length was sanity-checked against
* skb->len by iterator init
*/
- skb_pull(skb, iterator.max_length);
+ skb_pull(skb, iterator._max_length);
return true;
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index ca170b417da..c453226f06b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1082,7 +1082,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
- unsigned long flags;
int res;
if (local->suspended)
@@ -1116,20 +1115,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
/* add STAs back */
- if (local->ops->sta_notify) {
- spin_lock_irqsave(&local->sta_lock, flags);
- list_for_each_entry(sta, &local->sta_list, list) {
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ if (sta->uploaded) {
sdata = sta->sdata;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, sdata, STA_NOTIFY_ADD,
- &sta->sta);
+ WARN_ON(drv_sta_add(local, sdata, &sta->sta));
}
- spin_unlock_irqrestore(&local->sta_lock, flags);
}
+ mutex_unlock(&local->sta_mtx);
/* Clear Suspend state so that ADDBA requests can be processed */
@@ -1180,6 +1178,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ rcu_read_lock();
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ ieee80211_sta_tear_down_BA_sessions(sta);
+ }
+ }
+ rcu_read_unlock();
+
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
if (ieee80211_sdata_running(sdata))
@@ -1219,10 +1225,10 @@ int ieee80211_reconfig(struct ieee80211_local *local)
add_timer(&local->sta_cleanup);
- spin_lock_irqsave(&local->sta_lock, flags);
+ mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list)
mesh_plink_restart(sta);
- spin_unlock_irqrestore(&local->sta_lock, flags);
+ mutex_unlock(&local->sta_mtx);
#else
WARN_ON(1);
#endif
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 71604c6613b..a249127020a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1267,28 +1267,13 @@ static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
{
- struct sock *s;
- struct hlist_node *node;
- int i = 1;
-
spin_lock_bh(&nr_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- sk_for_each(s, node, &nr_list) {
- if (i == *pos)
- return s;
- ++i;
- }
- return NULL;
+ return seq_hlist_start_head(&nr_list, *pos);
}
static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
-
- return (v == SEQ_START_TOKEN) ? sk_head(&nr_list)
- : sk_next((struct sock *)v);
+ return seq_hlist_next(v, &nr_list, pos);
}
static void nr_info_stop(struct seq_file *seq, void *v)
@@ -1298,7 +1283,7 @@ static void nr_info_stop(struct seq_file *seq, void *v)
static int nr_info_show(struct seq_file *seq, void *v)
{
- struct sock *s = v;
+ struct sock *s = sk_entry(v);
struct net_device *dev;
struct nr_sock *nr;
const char *devname;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index e2e2d33cafd..5cc648012f5 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -863,33 +863,13 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
static void *nr_node_start(struct seq_file *seq, loff_t *pos)
{
- struct nr_node *nr_node;
- struct hlist_node *node;
- int i = 1;
-
spin_lock_bh(&nr_node_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- nr_node_for_each(nr_node, node, &nr_node_list) {
- if (i == *pos)
- return nr_node;
- ++i;
- }
-
- return NULL;
+ return seq_hlist_start_head(&nr_node_list, *pos);
}
static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct hlist_node *node;
- ++*pos;
-
- node = (v == SEQ_START_TOKEN)
- ? nr_node_list.first
- : ((struct nr_node *)v)->node_node.next;
-
- return hlist_entry(node, struct nr_node, node_node);
+ return seq_hlist_next(v, &nr_node_list, pos);
}
static void nr_node_stop(struct seq_file *seq, void *v)
@@ -906,7 +886,9 @@ static int nr_node_show(struct seq_file *seq, void *v)
seq_puts(seq,
"callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
else {
- struct nr_node *nr_node = v;
+ struct nr_node *nr_node = hlist_entry(v, struct nr_node,
+ node_node);
+
nr_node_lock(nr_node);
seq_printf(seq, "%-9s %-7s %d %d",
ax2asc(buf, &nr_node->callsign),
@@ -949,31 +931,13 @@ const struct file_operations nr_nodes_fops = {
static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
{
- struct nr_neigh *nr_neigh;
- struct hlist_node *node;
- int i = 1;
-
spin_lock_bh(&nr_neigh_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- nr_neigh_for_each(nr_neigh, node, &nr_neigh_list) {
- if (i == *pos)
- return nr_neigh;
- }
- return NULL;
+ return seq_hlist_start_head(&nr_neigh_list, *pos);
}
static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct hlist_node *node;
- ++*pos;
-
- node = (v == SEQ_START_TOKEN)
- ? nr_neigh_list.first
- : ((struct nr_neigh *)v)->neigh_node.next;
-
- return hlist_entry(node, struct nr_neigh, neigh_node);
+ return seq_hlist_next(v, &nr_neigh_list, pos);
}
static void nr_neigh_stop(struct seq_file *seq, void *v)
@@ -989,8 +953,9 @@ static int nr_neigh_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
else {
- struct nr_neigh *nr_neigh = v;
+ struct nr_neigh *nr_neigh;
+ nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
nr_neigh->number,
ax2asc(buf, &nr_neigh->callsign),
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6ecb426bc0c..10f7295bcef 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2510,33 +2510,19 @@ static struct notifier_block packet_netdev_notifier = {
};
#ifdef CONFIG_PROC_FS
-static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
-{
- struct sock *s;
- struct hlist_node *node;
-
- sk_for_each(s, node, &net->packet.sklist) {
- if (!off--)
- return s;
- }
- return NULL;
-}
static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(seq_file_net(seq)->packet.sklist_lock)
{
struct net *net = seq_file_net(seq);
read_lock(&net->packet.sklist_lock);
- return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN;
+ return seq_hlist_start_head(&net->packet.sklist, *pos);
}
static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
- ++*pos;
- return (v == SEQ_START_TOKEN)
- ? sk_head(&net->packet.sklist)
- : sk_next((struct sock *)v) ;
+ return seq_hlist_next(v, &net->packet.sklist, pos);
}
static void packet_seq_stop(struct seq_file *seq, void *v)
@@ -2551,7 +2537,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
else {
- struct sock *s = v;
+ struct sock *s = sk_entry(v);
const struct packet_sock *po = pkt_sk(s);
seq_printf(seq,
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8feb9e5d662..e90b9b6c16a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1404,29 +1404,13 @@ static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
static void *rose_info_start(struct seq_file *seq, loff_t *pos)
__acquires(rose_list_lock)
{
- int i;
- struct sock *s;
- struct hlist_node *node;
-
spin_lock_bh(&rose_list_lock);
- if (*pos == 0)
- return SEQ_START_TOKEN;
-
- i = 1;
- sk_for_each(s, node, &rose_list) {
- if (i == *pos)
- return s;
- ++i;
- }
- return NULL;
+ return seq_hlist_start_head(&rose_list, *pos);
}
static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
- ++*pos;
-
- return (v == SEQ_START_TOKEN) ? sk_head(&rose_list)
- : sk_next((struct sock *)v);
+ return seq_hlist_next(v, &rose_list, pos);
}
static void rose_info_stop(struct seq_file *seq, void *v)
@@ -1444,7 +1428,7 @@ static int rose_info_show(struct seq_file *seq, void *v)
"dest_addr dest_call src_addr src_call dev lci neigh st vs vr va t t1 t2 t3 hb idle Snd-Q Rcv-Q inode\n");
else {
- struct sock *s = v;
+ struct sock *s = sk_entry(v);
struct rose_sock *rose = rose_sk(s);
const char *devname, *callsign;
const struct net_device *dev = rose->device;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 49278f83036..9ea45383480 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -78,7 +78,7 @@ rpc_timeout_upcall_queue(struct work_struct *work)
}
/**
- * rpc_queue_upcall
+ * rpc_queue_upcall - queue an upcall message to userspace
* @inode: inode of upcall pipe on which to queue given message
* @msg: message to queue
*
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index f591871a7b4..1332c445d1c 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -2,6 +2,16 @@
* Radiotap parser
*
* Copyright 2007 Andy Green <andy@warmcat.com>
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ *
+ * See COPYING for more details.
*/
#include <net/cfg80211.h>
@@ -10,6 +20,35 @@
/* function prototypes and related defs are in include/net/cfg80211.h */
+static const struct radiotap_align_size rtap_namespace_sizes[] = {
+ [IEEE80211_RADIOTAP_TSFT] = { .align = 8, .size = 8, },
+ [IEEE80211_RADIOTAP_FLAGS] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_RATE] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_CHANNEL] = { .align = 2, .size = 4, },
+ [IEEE80211_RADIOTAP_FHSS] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DBM_ANTNOISE] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_LOCK_QUALITY] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_TX_ATTENUATION] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_DBM_TX_POWER] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_ANTENNA] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DB_ANTNOISE] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_RX_FLAGS] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_TX_FLAGS] = { .align = 2, .size = 2, },
+ [IEEE80211_RADIOTAP_RTS_RETRIES] = { .align = 1, .size = 1, },
+ [IEEE80211_RADIOTAP_DATA_RETRIES] = { .align = 1, .size = 1, },
+ /*
+ * add more here as they are defined in radiotap.h
+ */
+};
+
+static const struct ieee80211_radiotap_namespace radiotap_ns = {
+ .n_bits = sizeof(rtap_namespace_sizes) / sizeof(rtap_namespace_sizes[0]),
+ .align_size = rtap_namespace_sizes,
+};
+
/**
* ieee80211_radiotap_iterator_init - radiotap parser iterator initialization
* @iterator: radiotap_iterator to initialize
@@ -50,9 +89,9 @@
*/
int ieee80211_radiotap_iterator_init(
- struct ieee80211_radiotap_iterator *iterator,
- struct ieee80211_radiotap_header *radiotap_header,
- int max_length)
+ struct ieee80211_radiotap_iterator *iterator,
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
{
/* Linux only supports version 0 radiotap format */
if (radiotap_header->it_version)
@@ -62,19 +101,24 @@ int ieee80211_radiotap_iterator_init(
if (max_length < get_unaligned_le16(&radiotap_header->it_len))
return -EINVAL;
- iterator->rtheader = radiotap_header;
- iterator->max_length = get_unaligned_le16(&radiotap_header->it_len);
- iterator->arg_index = 0;
- iterator->bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
- iterator->arg = (u8 *)radiotap_header + sizeof(*radiotap_header);
- iterator->this_arg = NULL;
+ iterator->_rtheader = radiotap_header;
+ iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len);
+ iterator->_arg_index = 0;
+ iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present);
+ iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header);
+ iterator->_reset_on_ext = 0;
+ iterator->_next_bitmap = &radiotap_header->it_present;
+ iterator->_next_bitmap++;
+ iterator->_vns = vns;
+ iterator->current_namespace = &radiotap_ns;
+ iterator->is_radiotap_ns = 1;
/* find payload start allowing for extended bitmap(s) */
- if (unlikely(iterator->bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT))) {
- while (get_unaligned_le32(iterator->arg) &
- (1 << IEEE80211_RADIOTAP_EXT)) {
- iterator->arg += sizeof(u32);
+ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
+ while (get_unaligned_le32(iterator->_arg) &
+ (1 << IEEE80211_RADIOTAP_EXT)) {
+ iterator->_arg += sizeof(uint32_t);
/*
* check for insanity where the present bitmaps
@@ -82,12 +126,13 @@ int ieee80211_radiotap_iterator_init(
* stated radiotap header length
*/
- if (((ulong)iterator->arg -
- (ulong)iterator->rtheader) > iterator->max_length)
+ if ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader >
+ (unsigned long)iterator->_max_length)
return -EINVAL;
}
- iterator->arg += sizeof(u32);
+ iterator->_arg += sizeof(uint32_t);
/*
* no need to check again for blowing past stated radiotap
@@ -96,12 +141,36 @@ int ieee80211_radiotap_iterator_init(
*/
}
+ iterator->this_arg = iterator->_arg;
+
/* we are all initialized happily */
return 0;
}
EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
+static void find_ns(struct ieee80211_radiotap_iterator *iterator,
+ uint32_t oui, uint8_t subns)
+{
+ int i;
+
+ iterator->current_namespace = NULL;
+
+ if (!iterator->_vns)
+ return;
+
+ for (i = 0; i < iterator->_vns->n_ns; i++) {
+ if (iterator->_vns->ns[i].oui != oui)
+ continue;
+ if (iterator->_vns->ns[i].subns != subns)
+ continue;
+
+ iterator->current_namespace = &iterator->_vns->ns[i];
+ break;
+ }
+}
+
+
/**
* ieee80211_radiotap_iterator_next - return next radiotap parser iterator arg
@@ -127,99 +196,80 @@ EXPORT_SYMBOL(ieee80211_radiotap_iterator_init);
*/
int ieee80211_radiotap_iterator_next(
- struct ieee80211_radiotap_iterator *iterator)
+ struct ieee80211_radiotap_iterator *iterator)
{
-
- /*
- * small length lookup table for all radiotap types we heard of
- * starting from b0 in the bitmap, so we can walk the payload
- * area of the radiotap header
- *
- * There is a requirement to pad args, so that args
- * of a given length must begin at a boundary of that length
- * -- but note that compound args are allowed (eg, 2 x u16
- * for IEEE80211_RADIOTAP_CHANNEL) so total arg length is not
- * a reliable indicator of alignment requirement.
- *
- * upper nybble: content alignment for arg
- * lower nybble: content length for arg
- */
-
- static const u8 rt_sizes[] = {
- [IEEE80211_RADIOTAP_TSFT] = 0x88,
- [IEEE80211_RADIOTAP_FLAGS] = 0x11,
- [IEEE80211_RADIOTAP_RATE] = 0x11,
- [IEEE80211_RADIOTAP_CHANNEL] = 0x24,
- [IEEE80211_RADIOTAP_FHSS] = 0x22,
- [IEEE80211_RADIOTAP_DBM_ANTSIGNAL] = 0x11,
- [IEEE80211_RADIOTAP_DBM_ANTNOISE] = 0x11,
- [IEEE80211_RADIOTAP_LOCK_QUALITY] = 0x22,
- [IEEE80211_RADIOTAP_TX_ATTENUATION] = 0x22,
- [IEEE80211_RADIOTAP_DB_TX_ATTENUATION] = 0x22,
- [IEEE80211_RADIOTAP_DBM_TX_POWER] = 0x11,
- [IEEE80211_RADIOTAP_ANTENNA] = 0x11,
- [IEEE80211_RADIOTAP_DB_ANTSIGNAL] = 0x11,
- [IEEE80211_RADIOTAP_DB_ANTNOISE] = 0x11,
- [IEEE80211_RADIOTAP_RX_FLAGS] = 0x22,
- [IEEE80211_RADIOTAP_TX_FLAGS] = 0x22,
- [IEEE80211_RADIOTAP_RTS_RETRIES] = 0x11,
- [IEEE80211_RADIOTAP_DATA_RETRIES] = 0x11,
- /*
- * add more here as they are defined in
- * include/net/ieee80211_radiotap.h
- */
- };
-
- /*
- * for every radiotap entry we can at
- * least skip (by knowing the length)...
- */
-
- while (iterator->arg_index < sizeof(rt_sizes)) {
+ while (1) {
int hit = 0;
- int pad;
+ int pad, align, size, subns, vnslen;
+ uint32_t oui;
- if (!(iterator->bitmap_shifter & 1))
+ /* if no more EXT bits, that's it */
+ if ((iterator->_arg_index % 32) == IEEE80211_RADIOTAP_EXT &&
+ !(iterator->_bitmap_shifter & 1))
+ return -ENOENT;
+
+ if (!(iterator->_bitmap_shifter & 1))
goto next_entry; /* arg not present */
+ /* get alignment/size of data */
+ switch (iterator->_arg_index % 32) {
+ case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
+ case IEEE80211_RADIOTAP_EXT:
+ align = 1;
+ size = 0;
+ break;
+ case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
+ align = 2;
+ size = 6;
+ break;
+ default:
+ if (!iterator->current_namespace ||
+ iterator->_arg_index >= iterator->current_namespace->n_bits) {
+ if (iterator->current_namespace == &radiotap_ns)
+ return -ENOENT;
+ align = 0;
+ } else {
+ align = iterator->current_namespace->align_size[iterator->_arg_index].align;
+ size = iterator->current_namespace->align_size[iterator->_arg_index].size;
+ }
+ if (!align) {
+ /* skip all subsequent data */
+ iterator->_arg = iterator->_next_ns_data;
+ /* give up on this namespace */
+ iterator->current_namespace = NULL;
+ goto next_entry;
+ }
+ break;
+ }
+
/*
* arg is present, account for alignment padding
- * 8-bit args can be at any alignment
- * 16-bit args must start on 16-bit boundary
- * 32-bit args must start on 32-bit boundary
- * 64-bit args must start on 64-bit boundary
*
- * note that total arg size can differ from alignment of
- * elements inside arg, so we use upper nybble of length
- * table to base alignment on
- *
- * also note: these alignments are ** relative to the
- * start of the radiotap header **. There is no guarantee
+ * Note that these alignments are relative to the start
+ * of the radiotap header. There is no guarantee
* that the radiotap header itself is aligned on any
* kind of boundary.
*
- * the above is why get_unaligned() is used to dereference
- * multibyte elements from the radiotap area
+ * The above is why get_unaligned() is used to dereference
+ * multibyte elements from the radiotap area.
*/
- pad = (((ulong)iterator->arg) -
- ((ulong)iterator->rtheader)) &
- ((rt_sizes[iterator->arg_index] >> 4) - 1);
+ pad = ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader) & (align - 1);
if (pad)
- iterator->arg +=
- (rt_sizes[iterator->arg_index] >> 4) - pad;
+ iterator->_arg += align - pad;
/*
* this is what we will return to user, but we need to
* move on first so next call has something fresh to test
*/
- iterator->this_arg_index = iterator->arg_index;
- iterator->this_arg = iterator->arg;
- hit = 1;
+ iterator->this_arg_index = iterator->_arg_index;
+ iterator->this_arg = iterator->_arg;
+ iterator->this_arg_size = size;
/* internally move on the size of this arg */
- iterator->arg += rt_sizes[iterator->arg_index] & 0x0f;
+ iterator->_arg += size;
/*
* check for insanity where we are given a bitmap that
@@ -228,32 +278,73 @@ int ieee80211_radiotap_iterator_next(
* max_length on the last arg, never exceeding it.
*/
- if (((ulong)iterator->arg - (ulong)iterator->rtheader) >
- iterator->max_length)
+ if ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader >
+ (unsigned long)iterator->_max_length)
return -EINVAL;
- next_entry:
- iterator->arg_index++;
- if (unlikely((iterator->arg_index & 31) == 0)) {
- /* completed current u32 bitmap */
- if (iterator->bitmap_shifter & 1) {
- /* b31 was set, there is more */
- /* move to next u32 bitmap */
- iterator->bitmap_shifter =
- get_unaligned_le32(iterator->next_bitmap);
- iterator->next_bitmap++;
- } else
- /* no more bitmaps: end */
- iterator->arg_index = sizeof(rt_sizes);
- } else /* just try the next bit */
- iterator->bitmap_shifter >>= 1;
+ /* these special ones are valid in each bitmap word */
+ switch (iterator->_arg_index % 32) {
+ case IEEE80211_RADIOTAP_VENDOR_NAMESPACE:
+ iterator->_bitmap_shifter >>= 1;
+ iterator->_arg_index++;
+
+ iterator->_reset_on_ext = 1;
+
+ vnslen = get_unaligned_le16(iterator->this_arg + 4);
+ iterator->_next_ns_data = iterator->_arg + vnslen;
+ oui = (*iterator->this_arg << 16) |
+ (*(iterator->this_arg + 1) << 8) |
+ *(iterator->this_arg + 2);
+ subns = *(iterator->this_arg + 3);
+
+ find_ns(iterator, oui, subns);
+
+ iterator->is_radiotap_ns = 0;
+ /* allow parsers to show this information */
+ iterator->this_arg_index =
+ IEEE80211_RADIOTAP_VENDOR_NAMESPACE;
+ iterator->this_arg_size += vnslen;
+ if ((unsigned long)iterator->this_arg +
+ iterator->this_arg_size -
+ (unsigned long)iterator->_rtheader >
+ (unsigned long)(unsigned long)iterator->_max_length)
+ return -EINVAL;
+ hit = 1;
+ break;
+ case IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE:
+ iterator->_bitmap_shifter >>= 1;
+ iterator->_arg_index++;
+
+ iterator->_reset_on_ext = 1;
+ iterator->current_namespace = &radiotap_ns;
+ iterator->is_radiotap_ns = 1;
+ break;
+ case IEEE80211_RADIOTAP_EXT:
+ /*
+ * bit 31 was set, there is more
+ * -- move to next u32 bitmap
+ */
+ iterator->_bitmap_shifter =
+ get_unaligned_le32(iterator->_next_bitmap);
+ iterator->_next_bitmap++;
+ if (iterator->_reset_on_ext)
+ iterator->_arg_index = 0;
+ else
+ iterator->_arg_index++;
+ iterator->_reset_on_ext = 0;
+ break;
+ default:
+ /* we've got a hit! */
+ hit = 1;
+ next_entry:
+ iterator->_bitmap_shifter >>= 1;
+ iterator->_arg_index++;
+ }
/* if we found a valid arg earlier, return it now */
if (hit)
return 0;
}
-
- /* we don't know how to handle any more args, we're done */
- return -ENOENT;
}
EXPORT_SYMBOL(ieee80211_radiotap_iterator_next);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index e3219e4cd04..9796f3ed1ed 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -55,6 +55,7 @@
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/compat.h>
+#include <linux/ctype.h>
#include <net/x25.h>
#include <net/compat.h>
@@ -512,15 +513,20 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
struct x25_sock *x25;
- int rc = -ESOCKTNOSUPPORT;
+ int rc = -EAFNOSUPPORT;
if (!net_eq(net, &init_net))
- return -EAFNOSUPPORT;
+ goto out;
- if (sock->type != SOCK_SEQPACKET || protocol)
+ rc = -ESOCKTNOSUPPORT;
+ if (sock->type != SOCK_SEQPACKET)
goto out;
- rc = -ENOMEM;
+ rc = -EINVAL;
+ if (protocol)
+ goto out;
+
+ rc = -ENOBUFS;
if ((sk = x25_alloc_socket(net)) == NULL)
goto out;
@@ -643,7 +649,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
- int rc = 0;
+ int len, i, rc = 0;
lock_kernel();
if (!sock_flag(sk, SOCK_ZAPPED) ||
@@ -653,6 +659,14 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
+ len = strlen(addr->sx25_addr.x25_addr);
+ for (i = 0; i < len; i++) {
+ if (!isdigit(addr->sx25_addr.x25_addr[i])) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index 0a04e62e0e1..7ff37379232 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -25,49 +25,17 @@
#include <net/x25.h>
#ifdef CONFIG_PROC_FS
-static __inline__ struct x25_route *x25_get_route_idx(loff_t pos)
-{
- struct list_head *route_entry;
- struct x25_route *rt = NULL;
-
- list_for_each(route_entry, &x25_route_list) {
- rt = list_entry(route_entry, struct x25_route, node);
- if (!pos--)
- goto found;
- }
- rt = NULL;
-found:
- return rt;
-}
static void *x25_seq_route_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_route_list_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&x25_route_list_lock);
- return l ? x25_get_route_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&x25_route_list, *pos);
}
static void *x25_seq_route_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct x25_route *rt;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- rt = NULL;
- if (!list_empty(&x25_route_list))
- rt = list_entry(x25_route_list.next,
- struct x25_route, node);
- goto out;
- }
- rt = v;
- if (rt->node.next != &x25_route_list)
- rt = list_entry(rt->node.next, struct x25_route, node);
- else
- rt = NULL;
-out:
- return rt;
+ return seq_list_next(v, &x25_route_list, pos);
}
static void x25_seq_route_stop(struct seq_file *seq, void *v)
@@ -78,9 +46,9 @@ static void x25_seq_route_stop(struct seq_file *seq, void *v)
static int x25_seq_route_show(struct seq_file *seq, void *v)
{
- struct x25_route *rt;
+ struct x25_route *rt = list_entry(v, struct x25_route, node);
- if (v == SEQ_START_TOKEN) {
+ if (v == &x25_route_list) {
seq_puts(seq, "Address Digits Device\n");
goto out;
}
@@ -93,40 +61,16 @@ out:
return 0;
}
-static __inline__ struct sock *x25_get_socket_idx(loff_t pos)
-{
- struct sock *s;
- struct hlist_node *node;
-
- sk_for_each(s, node, &x25_list)
- if (!pos--)
- goto found;
- s = NULL;
-found:
- return s;
-}
-
static void *x25_seq_socket_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_list_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&x25_list_lock);
- return l ? x25_get_socket_idx(--l) : SEQ_START_TOKEN;
+ return seq_hlist_start_head(&x25_list, *pos);
}
static void *x25_seq_socket_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct sock *s;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- s = sk_head(&x25_list);
- goto out;
- }
- s = sk_next(v);
-out:
- return s;
+ return seq_hlist_next(v, &x25_list, pos);
}
static void x25_seq_socket_stop(struct seq_file *seq, void *v)
@@ -148,7 +92,7 @@ static int x25_seq_socket_show(struct seq_file *seq, void *v)
goto out;
}
- s = v;
+ s = sk_entry(v);
x25 = x25_sk(s);
if (!x25->neighbour || (dev = x25->neighbour->dev) == NULL)
@@ -170,51 +114,16 @@ out:
return 0;
}
-static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
-{
- struct x25_forward *f;
- struct list_head *entry;
-
- list_for_each(entry, &x25_forward_list) {
- f = list_entry(entry, struct x25_forward, node);
- if (!pos--)
- goto found;
- }
-
- f = NULL;
-found:
- return f;
-}
-
static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
__acquires(x25_forward_list_lock)
{
- loff_t l = *pos;
-
read_lock_bh(&x25_forward_list_lock);
- return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN;
+ return seq_list_start_head(&x25_forward_list, *pos);
}
static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct x25_forward *f;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- f = NULL;
- if (!list_empty(&x25_forward_list))
- f = list_entry(x25_forward_list.next,
- struct x25_forward, node);
- goto out;
- }
- f = v;
- if (f->node.next != &x25_forward_list)
- f = list_entry(f->node.next, struct x25_forward, node);
- else
- f = NULL;
-out:
- return f;
-
+ return seq_list_next(v, &x25_forward_list, pos);
}
static void x25_seq_forward_stop(struct seq_file *seq, void *v)
@@ -225,9 +134,9 @@ static void x25_seq_forward_stop(struct seq_file *seq, void *v)
static int x25_seq_forward_show(struct seq_file *seq, void *v)
{
- struct x25_forward *f;
+ struct x25_forward *f = list_entry(v, struct x25_forward, node);
- if (v == SEQ_START_TOKEN) {
+ if (v == &x25_forward_list) {
seq_printf(seq, "lci dev1 dev2\n");
goto out;
}
@@ -236,7 +145,6 @@ static int x25_seq_forward_show(struct seq_file *seq, void *v)
seq_printf(seq, "%d %-10s %-10s\n",
f->lci, f->dev1->name, f->dev2->name);
-
out:
return 0;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 0ecb16a9a88..eb870fcc29c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -771,7 +771,8 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
{
- int dir, err = 0;
+ int dir, err = 0, cnt = 0;
+ struct xfrm_policy *dp;
write_lock_bh(&xfrm_policy_lock);
@@ -789,8 +790,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
&net->xfrm.policy_inexact[dir], bydst) {
if (pol->type != type)
continue;
- __xfrm_policy_unlink(pol, dir);
+ dp = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
+ if (dp)
+ cnt++;
xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
audit_info->sessionid,
@@ -809,8 +812,10 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
bydst) {
if (pol->type != type)
continue;
- __xfrm_policy_unlink(pol, dir);
+ dp = __xfrm_policy_unlink(pol, dir);
write_unlock_bh(&xfrm_policy_lock);
+ if (dp)
+ cnt++;
xfrm_audit_policy_delete(pol, 1,
audit_info->loginuid,
@@ -824,6 +829,8 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
}
}
+ if (!cnt)
+ err = -ESRCH;
atomic_inc(&flow_cache_genid);
out:
write_unlock_bh(&xfrm_policy_lock);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b36cc344474..f50ee9badf4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -603,13 +603,14 @@ xfrm_state_flush_secctx_check(struct net *net, u8 proto, struct xfrm_audit *audi
int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info)
{
- int i, err = 0;
+ int i, err = 0, cnt = 0;
spin_lock_bh(&xfrm_state_lock);
err = xfrm_state_flush_secctx_check(net, proto, audit_info);
if (err)
goto out;
+ err = -ESRCH;
for (i = 0; i <= net->xfrm.state_hmask; i++) {
struct hlist_node *entry;
struct xfrm_state *x;
@@ -626,13 +627,16 @@ restart:
audit_info->sessionid,
audit_info->secid);
xfrm_state_put(x);
+ if (!err)
+ cnt++;
spin_lock_bh(&xfrm_state_lock);
goto restart;
}
}
}
- err = 0;
+ if (cnt)
+ err = 0;
out:
spin_unlock_bh(&xfrm_state_lock);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d5a71297600..b0fb7d3bc15 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1525,7 +1525,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
audit_info.secid = NETLINK_CB(skb).sid;
err = xfrm_state_flush(net, p->proto, &audit_info);
if (err)
- return err;
+ return 0;
c.data.proto = p->proto;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
@@ -1677,7 +1677,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
audit_info.secid = NETLINK_CB(skb).sid;
err = xfrm_policy_flush(net, type, &audit_info);
if (err)
- return err;
+ return 0;
c.data.type = type;
c.event = nlh->nlmsg_type;
c.seq = nlh->nlmsg_seq;
@@ -2054,6 +2054,10 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
#undef XMSGSIZE
static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
+ [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
+ [XFRMA_LASTUSED] = { .type = NLA_U64},
+ [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
[XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
[XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
[XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },