summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c501.c896
-rw-r--r--drivers/net/ethernet/3com/3c501.h91
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c7
-rw-r--r--drivers/net/ethernet/3com/3c59x.c1
-rw-r--r--drivers/net/ethernet/3com/Kconfig20
-rw-r--r--drivers/net/ethernet/3com/Makefile1
-rw-r--r--drivers/net/ethernet/8390/3c503.c777
-rw-r--r--drivers/net/ethernet/8390/3c503.h91
-rw-r--r--drivers/net/ethernet/8390/Kconfig119
-rw-r--r--drivers/net/ethernet/8390/Makefile10
-rw-r--r--drivers/net/ethernet/8390/ac3200.c431
-rw-r--r--drivers/net/ethernet/8390/ax88796.c8
-rw-r--r--drivers/net/ethernet/8390/e2100.c489
-rw-r--r--drivers/net/ethernet/8390/es3210.c445
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c505
-rw-r--r--drivers/net/ethernet/8390/hp.c438
-rw-r--r--drivers/net/ethernet/8390/lne390.c433
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c1
-rw-r--r--drivers/net/ethernet/8390/ne3210.c346
-rw-r--r--drivers/net/ethernet/8390/smc-ultra32.c463
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c13
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c13
-rw-r--r--drivers/net/ethernet/amd/Kconfig15
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c12
-rw-r--r--drivers/net/ethernet/amd/depca.c1910
-rw-r--r--drivers/net/ethernet/amd/depca.h183
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c50
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c3
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c7
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig18
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c9
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c17
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1461
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h453
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h174
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c993
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h123
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h3274
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c456
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h32
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1665
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h58
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c203
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c3198
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h809
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c134
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c1651
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h360
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c10
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h1
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c1099
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h65
-rw-r--r--drivers/net/ethernet/cadence/macb.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c42
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c92
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c7
-rw-r--r--drivers/net/ethernet/dec/Kconfig16
-rw-r--r--drivers/net/ethernet/dec/Makefile1
-rw-r--r--drivers/net/ethernet/dec/ewrk3.c1961
-rw-r--r--drivers/net/ethernet/dec/ewrk3.h322
-rw-r--r--drivers/net/ethernet/dlink/Kconfig32
-rw-r--r--drivers/net/ethernet/dlink/Makefile2
-rw-r--r--drivers/net/ethernet/dlink/de600.c529
-rw-r--r--drivers/net/ethernet/dlink/de600.h168
-rw-r--r--drivers/net/ethernet/dlink/de620.c987
-rw-r--r--drivers/net/ethernet/dlink/de620.h117
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/dlink/sundance.c1
-rw-r--r--drivers/net/ethernet/dnet.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c46
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c164
-rw-r--r--drivers/net/ethernet/ethoc.c63
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec.c356
-rw-r--r--drivers/net/ethernet/freescale/fec.h23
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c62
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c252
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h210
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c35
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig25
-rw-r--r--drivers/net/ethernet/fujitsu/Makefile2
-rw-r--r--drivers/net/ethernet/fujitsu/at1700.c791
-rw-r--r--drivers/net/ethernet/fujitsu/eth16i.c1483
-rw-r--r--drivers/net/ethernet/i825xx/3c505.c1671
-rw-r--r--drivers/net/ethernet/i825xx/3c505.h292
-rw-r--r--drivers/net/ethernet/i825xx/3c507.c938
-rw-r--r--drivers/net/ethernet/i825xx/82596.c94
-rw-r--r--drivers/net/ethernet/i825xx/Kconfig92
-rw-r--r--drivers/net/ethernet/i825xx/Makefile8
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c1822
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.c1661
-rw-r--r--drivers/net/ethernet/i825xx/eexpress.h179
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c1337
-rw-r--r--drivers/net/ethernet/i825xx/ni52.c1346
-rw-r--r--drivers/net/ethernet/i825xx/ni52.h310
-rw-r--r--drivers/net/ethernet/i825xx/znet.c928
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c12
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c19
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/Kconfig16
-rw-r--r--drivers/net/ethernet/intel/e100.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h65
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c558
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c339
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c117
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h95
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c57
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h58
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile4
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h186
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h280
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c252
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h367
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c415
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h268
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c164
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h74
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c699
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c354
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h242
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c277
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h252
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c630
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h19
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h18
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h69
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c242
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c865
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c87
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h82
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c65
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c320
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c410
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c219
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c203
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c56
-rw-r--r--drivers/net/ethernet/korina.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c10
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c869
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c193
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c44
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c1
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.c1075
-rw-r--r--drivers/net/ethernet/natsemi/ibmlana.h278
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c3
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c1
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c9
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c7
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c28
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h544
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3011
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h438
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2054
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c225
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c727
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c550
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h108
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c245
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h194
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c97
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c816
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1215
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c650
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c271
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c24
-rw-r--r--drivers/net/ethernet/racal/Kconfig33
-rw-r--r--drivers/net/ethernet/racal/Makefile5
-rw-r--r--drivers/net/ethernet/racal/ni5010.c771
-rw-r--r--drivers/net/ethernet/racal/ni5010.h144
-rw-r--r--drivers/net/ethernet/rdc/r6040.c14
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/8139too.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/seeq/Kconfig11
-rw-r--r--drivers/net/ethernet/seeq/Makefile1
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.c749
-rw-r--r--drivers/net/ethernet/seeq/seeq8005.h156
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/silan/sc92031.c12
-rw-r--r--drivers/net/ethernet/sis/sis900.c22
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c7
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c48
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunqe.c7
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c6
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/cpmac.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c530
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c107
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h24
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c74
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h12
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c1
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c12
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c13
-rw-r--r--drivers/net/ethernet/via/via-rhine.c48
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c37
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c5
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c3
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c7
339 files changed, 31990 insertions, 37305 deletions
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
deleted file mode 100644
index 2038eaabaea..00000000000
--- a/drivers/net/ethernet/3com/3c501.c
+++ /dev/null
@@ -1,896 +0,0 @@
-/* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */
-/*
- Written 1992,1993,1994 Donald Becker
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- This is a device driver for the 3Com Etherlink 3c501.
- Do not purchase this card, even as a joke. It's performance is horrible,
- and it breaks in many ways.
-
- The original author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Fixed (again!) the missing interrupt locking on TX/RX shifting.
- Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Removed calls to init_etherdev since they are no longer needed, and
- cleaned up modularization just a bit. The driver still allows only
- the default address for cards when loaded as a module, but that's
- really less braindead than anyone using a 3c501 board. :)
- 19950208 (invid@msen.com)
-
- Added traps for interrupts hitting the window as we clear and TX load
- the board. Now getting 150K/second FTP with a 3c501 card. Still playing
- with a TX-TX optimisation to see if we can touch 180-200K/second as seems
- theoretically maximum.
- 19950402 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Cleaned up for 2.3.x because we broke SMP now.
- 20000208 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Check up pass for 2.5. Nothing significant changed
- 20021009 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
- Fixed zero fill corner case
- 20030104 Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-
- For the avoidance of doubt the "preferred form" of this code is one which
- is in an open non patent encumbered format. Where cryptographic key signing
- forms part of the process of creating an executable the information
- including keys needed to generate an equivalently functional executable
- are deemed to be part of the source code.
-
-*/
-
-
-/**
- * DOC: 3c501 Card Notes
- *
- * Some notes on this thing if you have to hack it. [Alan]
- *
- * Some documentation is available from 3Com. Due to the boards age
- * standard responses when you ask for this will range from 'be serious'
- * to 'give it to a museum'. The documentation is incomplete and mostly
- * of historical interest anyway.
- *
- * The basic system is a single buffer which can be used to receive or
- * transmit a packet. A third command mode exists when you are setting
- * things up.
- *
- * If it's transmitting it's not receiving and vice versa. In fact the
- * time to get the board back into useful state after an operation is
- * quite large.
- *
- * The driver works by keeping the board in receive mode waiting for a
- * packet to arrive. When one arrives it is copied out of the buffer
- * and delivered to the kernel. The card is reloaded and off we go.
- *
- * When transmitting lp->txing is set and the card is reset (from
- * receive mode) [possibly losing a packet just received] to command
- * mode. A packet is loaded and transmit mode triggered. The interrupt
- * handler runs different code for transmit interrupts and can handle
- * returning to receive mode or retransmissions (yes you have to help
- * out with those too).
- *
- * DOC: Problems
- *
- * There are a wide variety of undocumented error returns from the card
- * and you basically have to kick the board and pray if they turn up. Most
- * only occur under extreme load or if you do something the board doesn't
- * like (eg touching a register at the wrong time).
- *
- * The driver is less efficient than it could be. It switches through
- * receive mode even if more transmits are queued. If this worries you buy
- * a real Ethernet card.
- *
- * The combination of slow receive restart and no real multicast
- * filter makes the board unusable with a kernel compiled for IP
- * multicasting in a real multicast environment. That's down to the board,
- * but even with no multicast programs running a multicast IP kernel is
- * in group 224.0.0.1 and you will therefore be listening to all multicasts.
- * One nv conference running over that Ethernet and you can give up.
- *
- */
-
-#define DRV_NAME "3c501"
-#define DRV_VERSION "2002/10/09"
-
-
-static const char version[] =
- DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n";
-
-/*
- * Braindamage remaining:
- * The 3c501 board.
- */
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/fcntl.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-
-#include "3c501.h"
-
-/*
- * The boilerplate probe code.
- */
-
-static int io = 0x280;
-static int irq = 5;
-static int mem_start;
-
-/**
- * el1_probe - probe for a 3c501
- * @dev: The device structure passed in to probe.
- *
- * This can be called from two places. The network layer will probe using
- * a device structure passed in with the probe information completed. For a
- * modular driver we use #init_module to fill in our own structure and probe
- * for it.
- *
- * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to
- * probe and failing to find anything.
- */
-
-struct net_device * __init el1_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static const unsigned ports[] = { 0x280, 0x300, 0};
- const unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- mem_start = dev->mem_start & 7;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = el1_probe1(dev, io);
- } else if (io != 0) {
- err = -ENXIO; /* Don't probe at all. */
- } else {
- for (port = ports; *port && el1_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, EL1_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops el_netdev_ops = {
- .ndo_open = el_open,
- .ndo_stop = el1_close,
- .ndo_start_xmit = el_start_xmit,
- .ndo_tx_timeout = el_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/**
- * el1_probe1:
- * @dev: The device structure to use
- * @ioaddr: An I/O address to probe at.
- *
- * The actual probe. This is iterated over by #el1_probe in order to
- * check all the applicable device locations.
- *
- * Returns 0 for a success, in which case the device is activated,
- * EAGAIN if the IRQ is in use by another driver, and ENODEV if the
- * board cannot be found.
- */
-
-static int __init el1_probe1(struct net_device *dev, int ioaddr)
-{
- struct net_local *lp;
- const char *mname; /* Vendor name */
- unsigned char station_addr[6];
- int autoirq = 0;
- int i;
-
- /*
- * Reserve I/O resource for exclusive use by this driver
- */
-
- if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME))
- return -ENODEV;
-
- /*
- * Read the station address PROM data from the special port.
- */
-
- for (i = 0; i < 6; i++) {
- outw(i, ioaddr + EL1_DATAPTR);
- station_addr[i] = inb(ioaddr + EL1_SAPROM);
- }
- /*
- * Check the first three octets of the S.A. for 3Com's prefix, or
- * for the Sager NP943 prefix.
- */
-
- if (station_addr[0] == 0x02 && station_addr[1] == 0x60 &&
- station_addr[2] == 0x8c)
- mname = "3c501";
- else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 &&
- station_addr[2] == 0xC8)
- mname = "NP943";
- else {
- release_region(ioaddr, EL1_IO_EXTENT);
- return -ENODEV;
- }
-
- /*
- * We auto-IRQ by shutting off the interrupt line and letting it
- * float high.
- */
-
- dev->irq = irq;
-
- if (dev->irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- inb(RX_STATUS); /* Clear pending interrupts. */
- inb(TX_STATUS);
- outb(AX_LOOP + 1, AX_CMD);
-
- outb(0x00, AX_CMD);
-
- mdelay(20);
- autoirq = probe_irq_off(irq_mask);
-
- if (autoirq == 0) {
- pr_warning("%s probe at %#x failed to detect IRQ line.\n",
- mname, ioaddr);
- release_region(ioaddr, EL1_IO_EXTENT);
- return -EAGAIN;
- }
- }
-
- outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */
- dev->base_addr = ioaddr;
- memcpy(dev->dev_addr, station_addr, ETH_ALEN);
-
- if (mem_start & 0xf)
- el_debug = mem_start & 0x7;
- if (autoirq)
- dev->irq = autoirq;
-
- pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n",
- dev->name, mname, dev->base_addr,
- autoirq ? "auto":"assigned ", dev->irq);
-
-#ifdef CONFIG_IP_MULTICAST
- pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n");
-#endif
-
- if (el_debug)
- pr_debug("%s", version);
-
- lp = netdev_priv(dev);
- memset(lp, 0, sizeof(struct net_local));
- spin_lock_init(&lp->lock);
-
- /*
- * The EL1-specific entries in the device structure.
- */
-
- dev->netdev_ops = &el_netdev_ops;
- dev->watchdog_timeo = HZ;
- dev->ethtool_ops = &netdev_ethtool_ops;
- return 0;
-}
-
-/**
- * el1_open:
- * @dev: device that is being opened
- *
- * When an ifconfig is issued which changes the device flags to include
- * IFF_UP this function is called. It is only called when the change
- * occurs, not when the interface remains up. #el1_close will be called
- * when it goes down.
- *
- * Returns 0 for a successful open, or -EAGAIN if someone has run off
- * with our interrupt line.
- */
-
-static int el_open(struct net_device *dev)
-{
- int retval;
- int ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-
- if (el_debug > 2)
- pr_debug("%s: Doing el_open()...\n", dev->name);
-
- retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev);
- if (retval)
- return retval;
-
- spin_lock_irqsave(&lp->lock, flags);
- el_reset(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- lp->txing = 0; /* Board in RX mode */
- outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
- netif_start_queue(dev);
- return 0;
-}
-
-/**
- * el_timeout:
- * @dev: The 3c501 card that has timed out
- *
- * Attempt to restart the board. This is basically a mixture of extreme
- * violence and prayer
- *
- */
-
-static void el_timeout(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if (el_debug)
- pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n",
- dev->name, inb(TX_STATUS),
- inb(AX_STATUS), inb(RX_STATUS));
- dev->stats.tx_errors++;
- outb(TX_NORM, TX_CMD);
- outb(RX_NORM, RX_CMD);
- outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */
- outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */
- lp->txing = 0; /* Ripped back in to RX */
- netif_wake_queue(dev);
-}
-
-
-/**
- * el_start_xmit:
- * @skb: The packet that is queued to be sent
- * @dev: The 3c501 card we want to throw it down
- *
- * Attempt to send a packet to a 3c501 card. There are some interesting
- * catches here because the 3c501 is an extremely old and therefore
- * stupid piece of technology.
- *
- * If we are handling an interrupt on the other CPU we cannot load a packet
- * as we may still be attempting to retrieve the last RX packet buffer.
- *
- * When a transmit times out we dump the card into control mode and just
- * start again. It happens enough that it isn't worth logging.
- *
- * We avoid holding the spin locks when doing the packet load to the board.
- * The device is very slow, and its DMA mode is even slower. If we held the
- * lock while loading 1500 bytes onto the controller we would drop a lot of
- * serial port characters. This requires we do extra locking, but we have
- * no real choice.
- */
-
-static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- /*
- * Avoid incoming interrupts between us flipping txing and flipping
- * mode as the driver assumes txing is a faithful indicator of card
- * state
- */
-
- spin_lock_irqsave(&lp->lock, flags);
-
- /*
- * Avoid timer-based retransmission conflicts.
- */
-
- netif_stop_queue(dev);
-
- do {
- int len = skb->len;
- int pad = 0;
- int gp_start;
- unsigned char *buf = skb->data;
-
- if (len < ETH_ZLEN)
- pad = ETH_ZLEN - len;
-
- gp_start = 0x800 - (len + pad);
-
- lp->tx_pkt_start = gp_start;
- lp->collisions = 0;
-
- dev->stats.tx_bytes += skb->len;
-
- /*
- * Command mode with status cleared should [in theory]
- * mean no more interrupts can be pending on the card.
- */
-
- outb_p(AX_SYS, AX_CMD);
- inb_p(RX_STATUS);
- inb_p(TX_STATUS);
-
- lp->loading = 1;
- lp->txing = 1;
-
- /*
- * Turn interrupts back on while we spend a pleasant
- * afternoon loading bytes into the board
- */
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Set rx packet area to 0. */
- outw(0x00, RX_BUF_CLR);
- /* aim - packet will be loaded into buffer start */
- outw(gp_start, GP_LOW);
- /* load buffer (usual thing each byte increments the pointer) */
- outsb(DATAPORT, buf, len);
- if (pad) {
- while (pad--) /* Zero fill buffer tail */
- outb(0, DATAPORT);
- }
- /* the board reuses the same register */
- outw(gp_start, GP_LOW);
-
- if (lp->loading != 2) {
- /* fire ... Trigger xmit. */
- outb(AX_XMIT, AX_CMD);
- lp->loading = 0;
- if (el_debug > 2)
- pr_debug(" queued xmit.\n");
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- /* A receive upset our load, despite our best efforts */
- if (el_debug > 2)
- pr_debug("%s: burped during tx load.\n", dev->name);
- spin_lock_irqsave(&lp->lock, flags);
- } while (1);
-}
-
-/**
- * el_interrupt:
- * @irq: Interrupt number
- * @dev_id: The 3c501 that burped
- *
- * Handle the ether interface interrupts. The 3c501 needs a lot more
- * hand holding than most cards. In particular we get a transmit interrupt
- * with a collision error because the board firmware isn't capable of rewinding
- * its own transmit buffer pointers. It can however count to 16 for us.
- *
- * On the receive side the card is also very dumb. It has no buffering to
- * speak of. We simply pull the packet out of its PIO buffer (which is slow)
- * and queue it for the kernel. Then we reset the card for the next packet.
- *
- * We sometimes get surprise interrupts late both because the SMP IRQ delivery
- * is message passing and because the card sometimes seems to deliver late. I
- * think if it is part way through a receive and the mode is changed it carries
- * on receiving and sends us an interrupt. We have to band aid all these cases
- * to get a sensible 150kBytes/second performance. Even then you want a small
- * TCP window.
- */
-
-static irqreturn_t el_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr;
- int axsr; /* Aux. status reg. */
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- /*
- * What happened ?
- */
-
- axsr = inb(AX_STATUS);
-
- /*
- * Log it
- */
-
- if (el_debug > 3)
- pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr);
-
- if (lp->loading == 1 && !lp->txing)
- pr_warning("%s: Inconsistent state loading while not in tx\n",
- dev->name);
-
- if (lp->txing) {
- /*
- * Board in transmit mode. May be loading. If we are
- * loading we shouldn't have got this.
- */
- int txsr = inb(TX_STATUS);
-
- if (lp->loading == 1) {
- if (el_debug > 2)
- pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n",
- dev->name, txsr, inw(GP_LOW), inw(RX_LOW));
-
- /* Force a reload */
- lp->loading = 2;
- spin_unlock(&lp->lock);
- goto out;
- }
- if (el_debug > 6)
- pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name,
- txsr, inw(GP_LOW), inw(RX_LOW));
-
- if ((axsr & 0x80) && (txsr & TX_READY) == 0) {
- /*
- * FIXME: is there a logic to whether to keep
- * on trying or reset immediately ?
- */
- if (el_debug > 1)
- pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n",
- dev->name, txsr, axsr,
- inw(ioaddr + EL1_DATAPTR),
- inw(ioaddr + EL1_RXPTR));
- lp->txing = 0;
- netif_wake_queue(dev);
- } else if (txsr & TX_16COLLISIONS) {
- /*
- * Timed out
- */
- if (el_debug)
- pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name);
- outb(AX_SYS, AX_CMD);
- lp->txing = 0;
- dev->stats.tx_aborted_errors++;
- netif_wake_queue(dev);
- } else if (txsr & TX_COLLISION) {
- /*
- * Retrigger xmit.
- */
-
- if (el_debug > 6)
- pr_debug("%s: retransmitting after a collision.\n", dev->name);
- /*
- * Poor little chip can't reset its own start
- * pointer
- */
-
- outb(AX_SYS, AX_CMD);
- outw(lp->tx_pkt_start, GP_LOW);
- outb(AX_XMIT, AX_CMD);
- dev->stats.collisions++;
- spin_unlock(&lp->lock);
- goto out;
- } else {
- /*
- * It worked.. we will now fall through and receive
- */
- dev->stats.tx_packets++;
- if (el_debug > 6)
- pr_debug("%s: Tx succeeded %s\n", dev->name,
- (txsr & TX_RDY) ? "." : "but tx is busy!");
- /*
- * This is safe the interrupt is atomic WRT itself.
- */
- lp->txing = 0;
- /* In case more to transmit */
- netif_wake_queue(dev);
- }
- } else {
- /*
- * In receive mode.
- */
-
- int rxsr = inb(RX_STATUS);
- if (el_debug > 5)
- pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n",
- dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW));
- /*
- * Just reading rx_status fixes most errors.
- */
- if (rxsr & RX_MISSED)
- dev->stats.rx_missed_errors++;
- else if (rxsr & RX_RUNT) {
- /* Handled to avoid board lock-up. */
- dev->stats.rx_length_errors++;
- if (el_debug > 5)
- pr_debug("%s: runt.\n", dev->name);
- } else if (rxsr & RX_GOOD) {
- /*
- * Receive worked.
- */
- el_receive(dev);
- } else {
- /*
- * Nothing? Something is broken!
- */
- if (el_debug > 2)
- pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n",
- dev->name, rxsr);
- el_reset(dev);
- }
- }
-
- /*
- * Move into receive mode
- */
-
- outb(AX_RX, AX_CMD);
- outw(0x00, RX_BUF_CLR);
- inb(RX_STATUS); /* Be certain that interrupts are cleared. */
- inb(TX_STATUS);
- spin_unlock(&lp->lock);
-out:
- return IRQ_HANDLED;
-}
-
-
-/**
- * el_receive:
- * @dev: Device to pull the packets from
- *
- * We have a good packet. Well, not really "good", just mostly not broken.
- * We must check everything to see if it is good. In particular we occasionally
- * get wild packet sizes from the card. If the packet seems sane we PIO it
- * off the card and queue it for the protocol layers.
- */
-
-static void el_receive(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int pkt_len;
- struct sk_buff *skb;
-
- pkt_len = inw(RX_LOW);
-
- if (el_debug > 4)
- pr_debug(" el_receive %d.\n", pkt_len);
-
- if (pkt_len < 60 || pkt_len > 1536) {
- if (el_debug)
- pr_debug("%s: bogus packet, length=%d\n",
- dev->name, pkt_len);
- dev->stats.rx_over_errors++;
- return;
- }
-
- /*
- * Command mode so we can empty the buffer
- */
-
- outb(AX_SYS, AX_CMD);
- skb = netdev_alloc_skb(dev, pkt_len + 2);
-
- /*
- * Start of frame
- */
-
- outw(0x00, GP_LOW);
- if (skb == NULL) {
- pr_info("%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return;
- } else {
- skb_reserve(skb, 2); /* Force 16 byte alignment */
- /*
- * The read increments through the bytes. The interrupt
- * handler will fix the pointer when it returns to
- * receive mode.
- */
- insb(DATAPORT, skb_put(skb, pkt_len), pkt_len);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-}
-
-/**
- * el_reset: Reset a 3c501 card
- * @dev: The 3c501 card about to get zapped
- *
- * Even resetting a 3c501 isn't simple. When you activate reset it loses all
- * its configuration. You must hold the lock when doing this. The function
- * cannot take the lock itself as it is callable from the irq handler.
- */
-
-static void el_reset(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if (el_debug > 2)
- pr_info("3c501 reset...\n");
- outb(AX_RESET, AX_CMD); /* Reset the chip */
- /* Aux control, irq and loopback enabled */
- outb(AX_LOOP, AX_CMD);
- {
- int i;
- for (i = 0; i < 6; i++) /* Set the station address. */
- outb(dev->dev_addr[i], ioaddr + i);
- }
-
- outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */
- outb(TX_NORM, TX_CMD); /* tx irq on done, collision */
- outb(RX_NORM, RX_CMD); /* Set Rx commands. */
- inb(RX_STATUS); /* Clear status. */
- inb(TX_STATUS);
- lp->txing = 0;
-}
-
-/**
- * el1_close:
- * @dev: 3c501 card to shut down
- *
- * Close a 3c501 card. The IFF_UP flag has been cleared by the user via
- * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued,
- * and then disable the interrupts. Finally we reset the chip. The effects
- * of the rest will be cleaned up by #el1_open. Always returns 0 indicating
- * a success.
- */
-
-static int el1_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (el_debug > 2)
- pr_info("%s: Shutting down Ethernet card at %#x.\n",
- dev->name, ioaddr);
-
- netif_stop_queue(dev);
-
- /*
- * Free and disable the IRQ.
- */
-
- free_irq(dev->irq, dev);
- outb(AX_RESET, AX_CMD); /* Reset the chip */
-
- return 0;
-}
-
-/**
- * set_multicast_list:
- * @dev: The device to adjust
- *
- * Set or clear the multicast filter for this adaptor to use the best-effort
- * filtering supported. The 3c501 supports only three modes of filtering.
- * It always receives broadcasts and packets for itself. You can choose to
- * optionally receive all packets, or all multicast packets on top of this.
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (dev->flags & IFF_PROMISC) {
- outb(RX_PROM, RX_CMD);
- inb(RX_STATUS);
- } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
- /* Multicast or all multicast is the same */
- outb(RX_MULT, RX_CMD);
- inb(RX_STATUS); /* Clear status. */
- } else {
- outb(RX_NORM, RX_CMD);
- inb(RX_STATUS);
- }
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-#ifdef MODULE
-
-static struct net_device *dev_3c501;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "EtherLink I/O base address");
-MODULE_PARM_DESC(irq, "EtherLink IRQ number");
-
-/**
- * init_module:
- *
- * When the driver is loaded as a module this function is called. We fake up
- * a device structure with the base I/O and interrupt set as if it were being
- * called from Space.c. This minimises the extra code that would otherwise
- * be required.
- *
- * Returns 0 for success or -EIO if a card is not found. Returning an error
- * here also causes the module to be unloaded
- */
-
-int __init init_module(void)
-{
- dev_3c501 = el1_probe(-1);
- if (IS_ERR(dev_3c501))
- return PTR_ERR(dev_3c501);
- return 0;
-}
-
-/**
- * cleanup_module:
- *
- * The module is being unloaded. We unhook our network device from the system
- * and then free up the resources we took when the card was found.
- */
-
-void __exit cleanup_module(void)
-{
- struct net_device *dev = dev_3c501;
- unregister_netdev(dev);
- release_region(dev->base_addr, EL1_IO_EXTENT);
- free_netdev(dev);
-}
-
-#endif /* MODULE */
-
-MODULE_AUTHOR("Donald Becker, Alan Cox");
-MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/net/ethernet/3com/3c501.h b/drivers/net/ethernet/3com/3c501.h
deleted file mode 100644
index 183fd55f03c..00000000000
--- a/drivers/net/ethernet/3com/3c501.h
+++ /dev/null
@@ -1,91 +0,0 @@
-
-/*
- * Index to functions.
- */
-
-static int el1_probe1(struct net_device *dev, int ioaddr);
-static int el_open(struct net_device *dev);
-static void el_timeout(struct net_device *dev);
-static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t el_interrupt(int irq, void *dev_id);
-static void el_receive(struct net_device *dev);
-static void el_reset(struct net_device *dev);
-static int el1_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-
-#define EL1_IO_EXTENT 16
-
-#ifndef EL_DEBUG
-#define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */
-#endif /* Anything above 5 is wordy death! */
-#define debug el_debug
-static int el_debug = EL_DEBUG;
-
-/*
- * Board-specific info in netdev_priv(dev).
- */
-
-struct net_local
-{
- int tx_pkt_start; /* The length of the current Tx packet. */
- int collisions; /* Tx collisions this packet */
- int loading; /* Spot buffer load collisions */
- int txing; /* True if card is in TX mode */
- spinlock_t lock; /* Serializing lock */
-};
-
-
-#define RX_STATUS (ioaddr + 0x06)
-#define RX_CMD RX_STATUS
-#define TX_STATUS (ioaddr + 0x07)
-#define TX_CMD TX_STATUS
-#define GP_LOW (ioaddr + 0x08)
-#define GP_HIGH (ioaddr + 0x09)
-#define RX_BUF_CLR (ioaddr + 0x0A)
-#define RX_LOW (ioaddr + 0x0A)
-#define RX_HIGH (ioaddr + 0x0B)
-#define SAPROM (ioaddr + 0x0C)
-#define AX_STATUS (ioaddr + 0x0E)
-#define AX_CMD AX_STATUS
-#define DATAPORT (ioaddr + 0x0F)
-#define TX_RDY 0x08 /* In TX_STATUS */
-
-#define EL1_DATAPTR 0x08
-#define EL1_RXPTR 0x0A
-#define EL1_SAPROM 0x0C
-#define EL1_DATAPORT 0x0f
-
-/*
- * Writes to the ax command register.
- */
-
-#define AX_OFF 0x00 /* Irq off, buffer access on */
-#define AX_SYS 0x40 /* Load the buffer */
-#define AX_XMIT 0x44 /* Transmit a packet */
-#define AX_RX 0x48 /* Receive a packet */
-#define AX_LOOP 0x0C /* Loopback mode */
-#define AX_RESET 0x80
-
-/*
- * Normal receive mode written to RX_STATUS. We must intr on short packets
- * to avoid bogus rx lockups.
- */
-
-#define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */
-#define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */
-#define RX_MULT 0xE8 /* Accept multicast packets. */
-#define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */
-
-/*
- * TX_STATUS register.
- */
-
-#define TX_COLLISION 0x02
-#define TX_16COLLISIONS 0x04
-#define TX_READY 0x08
-
-#define RX_RUNT 0x08
-#define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */
-#define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */
-
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 633c709b9d9..f36ff99fd39 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1161,8 +1161,8 @@ el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 59e1e001bc3..94c656f5a05 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1542,9 +1542,10 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
+ dev->base_addr);
}
static u32 netdev_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ed0feb3cc6f..1928e200158 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1293,7 +1293,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
if (print_info)
pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index eb56174469a..1c71c763f68 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_3COM
bool "3Com devices"
default y
- depends on ISA || EISA || MCA || PCI || PCMCIA
+ depends on ISA || EISA || PCI || PCMCIA
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -18,23 +18,9 @@ config NET_VENDOR_3COM
if NET_VENDOR_3COM
-config EL1
- tristate "3c501 \"EtherLink\" support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Also, consider buying a
- new card, since the 3c501 is slow, broken, and obsolete: you will
- have problems. Some people suggest to ping ("man ping") a nearby
- machine every minute ("man cron") when using this card.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c501.
-
config EL3
- tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support"
- depends on (ISA || EISA || MCA)
+ tristate "3c509/3c579 \"EtherLink III\" support"
+ depends on (ISA || EISA)
---help---
If you have a network (Ethernet) card belonging to the 3Com
EtherLinkIII series, say Y and read the Ethernet-HOWTO, available
diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile
index 1e5382a30ea..74046afab99 100644
--- a/drivers/net/ethernet/3com/Makefile
+++ b/drivers/net/ethernet/3com/Makefile
@@ -2,7 +2,6 @@
# Makefile for the 3Com Ethernet device drivers
#
-obj-$(CONFIG_EL1) += 3c501.o
obj-$(CONFIG_EL3) += 3c509.o
obj-$(CONFIG_3C515) += 3c515.o
obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o
diff --git a/drivers/net/ethernet/8390/3c503.c b/drivers/net/ethernet/8390/3c503.c
deleted file mode 100644
index 49d76bd0dc8..00000000000
--- a/drivers/net/ethernet/8390/3c503.c
+++ /dev/null
@@ -1,777 +0,0 @@
-/* 3c503.c: A shared-memory NS8390 ethernet driver for linux. */
-/*
- Written 1992-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- This driver should work with the 3c503 and 3c503/16. It should be used
- in shared memory mode for best performance, although it may also work
- in programmed-I/O mode.
-
- Sources:
- EtherLink II Technical Reference Manual,
- EtherLink II/16 Technical Reference Manual Supplement,
- 3Com Corporation, 5400 Bayfront Plaza, Santa Clara CA 95052-8145
-
- The Crynwr 3c503 packet driver.
-
- Changelog:
-
- Paul Gortmaker : add support for the 2nd 8kB of RAM on 16 bit cards.
- Paul Gortmaker : multiple card support for module users.
- rjohnson@analogic.com : Fix up PIO interface for efficient operation.
- Jeff Garzik : ethtool support
-
-*/
-
-#define DRV_NAME "3c503"
-#define DRV_VERSION "1.10a"
-#define DRV_RELDATE "11/17/2001"
-
-
-static const char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ethtool.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/byteorder.h>
-
-#include "8390.h"
-#include "3c503.h"
-#define WRD_COUNT 4
-
-static int el2_pio_probe(struct net_device *dev);
-static int el2_probe1(struct net_device *dev, int ioaddr);
-
-/* A zero-terminated list of I/O addresses to be probed in PIO mode. */
-static unsigned int netcard_portlist[] __initdata =
- { 0x300,0x310,0x330,0x350,0x250,0x280,0x2a0,0x2e0,0};
-
-#define EL2_IO_EXTENT 16
-
-static int el2_open(struct net_device *dev);
-static int el2_close(struct net_device *dev);
-static void el2_reset_8390(struct net_device *dev);
-static void el2_init_card(struct net_device *dev);
-static void el2_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void el2_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset);
-static void el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static const struct ethtool_ops netdev_ethtool_ops;
-
-
-/* This routine probes for a memory-mapped 3c503 board by looking for
- the "location register" at the end of the jumpered boot PROM space.
- This works even if a PROM isn't there.
-
- If the ethercard isn't found there is an optional probe for
- ethercard jumpered to programmed-I/O mode.
- */
-static int __init do_el2_probe(struct net_device *dev)
-{
- int *addr, addrs[] = { 0xddffe, 0xd9ffe, 0xcdffe, 0xc9ffe, 0};
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return el2_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (addr = addrs; *addr; addr++) {
- void __iomem *p = ioremap(*addr, 1);
- unsigned base_bits;
- int i;
-
- if (!p)
- continue;
- base_bits = readb(p);
- iounmap(p);
- i = ffs(base_bits) - 1;
- if (i == -1 || base_bits != (1 << i))
- continue;
- if (el2_probe1(dev, netcard_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-#if ! defined(no_probe_nonshared_memory)
- return el2_pio_probe(dev);
-#else
- return -ENODEV;
-#endif
-}
-
-/* Try all of the locations that aren't obviously empty. This touches
- a lot of locations, and is much riskier than the code above. */
-static int __init
-el2_pio_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return el2_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; netcard_portlist[i]; i++) {
- if (el2_probe1(dev, netcard_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init el2_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_el2_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops el2_netdev_ops = {
- .ndo_open = el2_open,
- .ndo_stop = el2_close,
-
- .ndo_start_xmit = eip_start_xmit,
- .ndo_tx_timeout = eip_tx_timeout,
- .ndo_get_stats = eip_get_stats,
- .ndo_set_rx_mode = eip_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = eip_poll,
-#endif
-};
-
-/* Probe for the Etherlink II card at I/O port base IOADDR,
- returning non-zero on success. If found, set the station
- address and memory parameters in DEVICE. */
-static int __init
-el2_probe1(struct net_device *dev, int ioaddr)
-{
- int i, iobase_reg, membase_reg, saved_406, wordlength, retval;
- static unsigned version_printed;
- unsigned long vendor_id;
-
- if (!request_region(ioaddr, EL2_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (!request_region(ioaddr + 0x400, 8, DRV_NAME)) {
- retval = -EBUSY;
- goto out;
- }
-
- /* Reset and/or avoid any lurking NE2000 */
- if (inb(ioaddr + 0x408) == 0xff) {
- mdelay(1);
- retval = -ENODEV;
- goto out1;
- }
-
- /* We verify that it's a 3C503 board by checking the first three octets
- of its ethernet address. */
- iobase_reg = inb(ioaddr+0x403);
- membase_reg = inb(ioaddr+0x404);
- /* ASIC location registers should be 0 or have only a single bit set. */
- if ((iobase_reg & (iobase_reg - 1)) ||
- (membase_reg & (membase_reg - 1))) {
- retval = -ENODEV;
- goto out1;
- }
- saved_406 = inb_p(ioaddr + 0x406);
- outb_p(ECNTRL_RESET|ECNTRL_THIN, ioaddr + 0x406); /* Reset it... */
- outb_p(ECNTRL_THIN, ioaddr + 0x406);
- /* Map the station addr PROM into the lower I/O ports. We now check
- for both the old and new 3Com prefix */
- outb(ECNTRL_SAPROM|ECNTRL_THIN, ioaddr + 0x406);
- vendor_id = inb(ioaddr)*0x10000 + inb(ioaddr + 1)*0x100 + inb(ioaddr + 2);
- if ((vendor_id != OLD_3COM_ID) && (vendor_id != NEW_3COM_ID)) {
- /* Restore the register we frobbed. */
- outb(saved_406, ioaddr + 0x406);
- retval = -ENODEV;
- goto out1;
- }
-
- if (ei_debug && version_printed++ == 0)
- pr_debug("%s", version);
-
- dev->base_addr = ioaddr;
-
- pr_info("%s: 3c503 at i/o base %#3x, node ", dev->name, ioaddr);
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
- pr_cont("%pM", dev->dev_addr);
-
- /* Map the 8390 back into the window. */
- outb(ECNTRL_THIN, ioaddr + 0x406);
-
- /* Check for EL2/16 as described in tech. man. */
- outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
- outb_p(0, ioaddr + EN0_DCFG);
- outb_p(E8390_PAGE2, ioaddr + E8390_CMD);
- wordlength = inb_p(ioaddr + EN0_DCFG) & ENDCFG_WTS;
- outb_p(E8390_PAGE0, ioaddr + E8390_CMD);
-
- /* Probe for, turn on and clear the board's shared memory. */
- if (ei_debug > 2)
- pr_cont(" memory jumpers %2.2x ", membase_reg);
- outb(EGACFR_NORM, ioaddr + 0x405); /* Enable RAM */
-
- /* This should be probed for (or set via an ioctl()) at run-time.
- Right now we use a sleazy hack to pass in the interface number
- at boot-time via the low bits of the mem_end field. That value is
- unused, and the low bits would be discarded even if it was used. */
-#if defined(EI8390_THICK) || defined(EL2_AUI)
- ei_status.interface_num = 1;
-#else
- ei_status.interface_num = dev->mem_end & 0xf;
-#endif
- pr_cont(", using %sternal xcvr.\n", ei_status.interface_num == 0 ? "in" : "ex");
-
- if ((membase_reg & 0xf0) == 0) {
- dev->mem_start = 0;
- ei_status.name = "3c503-PIO";
- ei_status.mem = NULL;
- } else {
- dev->mem_start = ((membase_reg & 0xc0) ? 0xD8000 : 0xC8000) +
- ((membase_reg & 0xA0) ? 0x4000 : 0);
-#define EL2_MEMSIZE (EL2_MB1_STOP_PG - EL2_MB1_START_PG)*256
- ei_status.mem = ioremap(dev->mem_start, EL2_MEMSIZE);
-
-#ifdef EL2MEMTEST
- /* This has never found an error, but someone might care.
- Note that it only tests the 2nd 8kB on 16kB 3c503/16
- cards between card addr. 0x2000 and 0x3fff. */
- { /* Check the card's memory. */
- void __iomem *mem_base = ei_status.mem;
- unsigned int test_val = 0xbbadf00d;
- writel(0xba5eba5e, mem_base);
- for (i = sizeof(test_val); i < EL2_MEMSIZE; i+=sizeof(test_val)) {
- writel(test_val, mem_base + i);
- if (readl(mem_base) != 0xba5eba5e ||
- readl(mem_base + i) != test_val) {
- pr_warning("3c503: memory failure or memory address conflict.\n");
- dev->mem_start = 0;
- ei_status.name = "3c503-PIO";
- iounmap(mem_base);
- ei_status.mem = NULL;
- break;
- }
- test_val += 0x55555555;
- writel(0, mem_base + i);
- }
- }
-#endif /* EL2MEMTEST */
-
- if (dev->mem_start)
- dev->mem_end = dev->mem_start + EL2_MEMSIZE;
-
- if (wordlength) { /* No Tx pages to skip over to get to Rx */
- ei_status.priv = 0;
- ei_status.name = "3c503/16";
- } else {
- ei_status.priv = TX_PAGES * 256;
- ei_status.name = "3c503";
- }
- }
-
- /*
- Divide up the memory on the card. This is the same regardless of
- whether shared-mem or PIO is used. For 16 bit cards (16kB RAM),
- we use the entire 8k of bank1 for an Rx ring. We only use 3k
- of the bank0 for 2 full size Tx packet slots. For 8 bit cards,
- (8kB RAM) we use 3kB of bank1 for two Tx slots, and the remaining
- 5kB for an Rx ring. */
-
- if (wordlength) {
- ei_status.tx_start_page = EL2_MB0_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG;
- } else {
- ei_status.tx_start_page = EL2_MB1_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
- }
-
- /* Finish setting the board's parameters. */
- ei_status.stop_page = EL2_MB1_STOP_PG;
- ei_status.word16 = wordlength;
- ei_status.reset_8390 = el2_reset_8390;
- ei_status.get_8390_hdr = el2_get_8390_hdr;
- ei_status.block_input = el2_block_input;
- ei_status.block_output = el2_block_output;
-
- if (dev->irq == 2)
- dev->irq = 9;
- else if (dev->irq > 5 && dev->irq != 9) {
- pr_warning("3c503: configured interrupt %d invalid, will use autoIRQ.\n",
- dev->irq);
- dev->irq = 0;
- }
-
- ei_status.saved_irq = dev->irq;
-
- dev->netdev_ops = &el2_netdev_ops;
- dev->ethtool_ops = &netdev_ethtool_ops;
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
-
- if (dev->mem_start)
- pr_info("%s: %s - %dkB RAM, 8kB shared mem window at %#6lx-%#6lx.\n",
- dev->name, ei_status.name, (wordlength+1)<<3,
- dev->mem_start, dev->mem_end-1);
-
- else
- {
- ei_status.tx_start_page = EL2_MB1_START_PG;
- ei_status.rx_start_page = EL2_MB1_START_PG + TX_PAGES;
- pr_info("%s: %s, %dkB RAM, using programmed I/O (REJUMPER for SHARED MEMORY).\n",
- dev->name, ei_status.name, (wordlength+1)<<3);
- }
- release_region(ioaddr + 0x400, 8);
- return 0;
-out1:
- release_region(ioaddr + 0x400, 8);
-out:
- release_region(ioaddr, EL2_IO_EXTENT);
- return retval;
-}
-
-static irqreturn_t el2_probe_interrupt(int irq, void *seen)
-{
- *(bool *)seen = true;
- return IRQ_HANDLED;
-}
-
-static int
-el2_open(struct net_device *dev)
-{
- int retval;
-
- if (dev->irq < 2) {
- static const int irqlist[] = {5, 9, 3, 4, 0};
- const int *irqp = irqlist;
-
- outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
- do {
- bool seen;
-
- retval = request_irq(*irqp, el2_probe_interrupt, 0,
- dev->name, &seen);
- if (retval == -EBUSY)
- continue;
- if (retval < 0)
- goto err_disable;
-
- /* Twinkle the interrupt, and check if it's seen. */
- seen = false;
- smp_wmb();
- outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
- outb_p(0x00, E33G_IDCFR);
- msleep(1);
- free_irq(*irqp, &seen);
- if (!seen)
- continue;
-
- retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
- dev->name, dev);
- if (retval == -EBUSY)
- continue;
- if (retval < 0)
- goto err_disable;
- break;
- } while (*++irqp);
-
- if (*irqp == 0) {
- err_disable:
- outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
- return -EAGAIN;
- }
- } else {
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
- return retval;
- }
- }
-
- el2_init_card(dev);
- eip_open(dev);
- return 0;
-}
-
-static int
-el2_close(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- dev->irq = ei_status.saved_irq;
- outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
-
- eip_close(dev);
- return 0;
-}
-
-/* This is called whenever we have a unrecoverable failure:
- transmit timeout
- Bad ring buffer packet header
- */
-static void
-el2_reset_8390(struct net_device *dev)
-{
- if (ei_debug > 1) {
- pr_debug("%s: Resetting the 3c503 board...", dev->name);
- pr_cont(" %#lx=%#02x %#lx=%#02x %#lx=%#02x...", E33G_IDCFR, inb(E33G_IDCFR),
- E33G_CNTRL, inb(E33G_CNTRL), E33G_GACFR, inb(E33G_GACFR));
- }
- outb_p(ECNTRL_RESET|ECNTRL_THIN, E33G_CNTRL);
- ei_status.txing = 0;
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
- el2_init_card(dev);
- if (ei_debug > 1)
- pr_cont("done\n");
-}
-
-/* Initialize the 3c503 GA registers after a reset. */
-static void
-el2_init_card(struct net_device *dev)
-{
- /* Unmap the station PROM and select the DIX or BNC connector. */
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-
- /* Set ASIC copy of rx's first and last+1 buffer pages */
- /* These must be the same as in the 8390. */
- outb(ei_status.rx_start_page, E33G_STARTPG);
- outb(ei_status.stop_page, E33G_STOPPG);
-
- /* Point the vector pointer registers somewhere ?harmless?. */
- outb(0xff, E33G_VP2); /* Point at the ROM restart location 0xffff0 */
- outb(0xff, E33G_VP1);
- outb(0x00, E33G_VP0);
- /* Turn off all interrupts until we're opened. */
- outb_p(0x00, dev->base_addr + EN0_IMR);
- /* Enable IRQs iff started. */
- outb(EGACFR_NORM, E33G_GACFR);
-
- /* Set the interrupt line. */
- outb_p((0x04 << (dev->irq == 9 ? 2 : dev->irq)), E33G_IDCFR);
- outb_p((WRD_COUNT << 1), E33G_DRQCNT); /* Set burst size to 8 */
- outb_p(0x20, E33G_DMAAH); /* Put a valid addr in the GA DMA */
- outb_p(0x00, E33G_DMAAL);
- return; /* We always succeed */
-}
-
-/*
- * Either use the shared memory (if enabled on the board) or put the packet
- * out through the ASIC FIFO.
- */
-static void
-el2_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- unsigned short int *wrd;
- int boguscount; /* timeout counter */
- unsigned short word; /* temporary for better machine code */
- void __iomem *base = ei_status.mem;
-
- if (ei_status.word16) /* Tx packets go into bank 0 on EL2/16 card */
- outb(EGACFR_RSEL|EGACFR_TCM, E33G_GACFR);
- else
- outb(EGACFR_NORM, E33G_GACFR);
-
- if (base) { /* Shared memory transfer */
- memcpy_toio(base + ((start_page - ei_status.tx_start_page) << 8),
- buf, count);
- outb(EGACFR_NORM, E33G_GACFR); /* Back to bank1 in case on bank0 */
- return;
- }
-
-/*
- * No shared memory, put the packet out the other way.
- * Set up then start the internal memory transfer to Tx Start Page
- */
-
- word = (unsigned short)start_page;
- outb(word&0xFF, E33G_DMAAH);
- outb(word>>8, E33G_DMAAL);
-
- outb_p((ei_status.interface_num ? ECNTRL_AUI : ECNTRL_THIN ) | ECNTRL_OUTPUT
- | ECNTRL_START, E33G_CNTRL);
-
-/*
- * Here I am going to write data to the FIFO as quickly as possible.
- * Note that E33G_FIFOH is defined incorrectly. It is really
- * E33G_FIFOL, the lowest port address for both the byte and
- * word write. Variable 'count' is NOT checked. Caller must supply a
- * valid count. Note that I may write a harmless extra byte to the
- * 8390 if the byte-count was not even.
- */
- wrd = (unsigned short int *) buf;
- count = (count + 1) >> 1;
- for(;;)
- {
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_block_output.\n", dev->name);
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- if(count > WRD_COUNT)
- {
- outsw(E33G_FIFOH, wrd, WRD_COUNT);
- wrd += WRD_COUNT;
- count -= WRD_COUNT;
- }
- else
- {
- outsw(E33G_FIFOH, wrd, count);
- break;
- }
- }
- blocked:;
- outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-/* Read the 4 byte, page aligned 8390 specific header. */
-static void
-el2_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int boguscount;
- void __iomem *base = ei_status.mem;
- unsigned short word;
-
- if (base) { /* Use the shared memory. */
- void __iomem *hdr_start = base + ((ring_page - EL2_MB1_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = le16_to_cpu(hdr->count);
- return;
- }
-
-/*
- * No shared memory, use programmed I/O.
- */
-
- word = (unsigned short)ring_page;
- outb(word&0xFF, E33G_DMAAH);
- outb(word>>8, E33G_DMAAL);
-
- outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
- | ECNTRL_START, E33G_CNTRL);
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_get_8390_hdr.\n", dev->name);
- memset(hdr, 0x00, sizeof(struct e8390_pkt_hdr));
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- insw(E33G_FIFOH, hdr, (sizeof(struct e8390_pkt_hdr))>> 1);
- blocked:;
- outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-
-static void
-el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int boguscount = 0;
- void __iomem *base = ei_status.mem;
- unsigned short int *buf;
- unsigned short word;
-
- /* Maybe enable shared memory just be to be safe... nahh.*/
- if (base) { /* Use the shared memory. */
- ring_offset -= (EL2_MB1_START_PG<<8);
- if (ring_offset + count > EL2_MEMSIZE) {
- /* We must wrap the input move. */
- int semi_count = EL2_MEMSIZE - ring_offset;
- memcpy_fromio(skb->data, base + ring_offset, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count, base + ei_status.priv, count);
- } else {
- memcpy_fromio(skb->data, base + ring_offset, count);
- }
- return;
- }
-
-/*
- * No shared memory, use programmed I/O.
- */
- word = (unsigned short) ring_offset;
- outb(word>>8, E33G_DMAAH);
- outb(word&0xFF, E33G_DMAAL);
-
- outb_p((ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI) | ECNTRL_INPUT
- | ECNTRL_START, E33G_CNTRL);
-
-/*
- * Here I also try to get data as fast as possible. I am betting that I
- * can read one extra byte without clobbering anything in the kernel because
- * this would only occur on an odd byte-count and allocation of skb->data
- * is word-aligned. Variable 'count' is NOT checked. Caller must check
- * for a valid count.
- * [This is currently quite safe.... but if one day the 3c503 explodes
- * you know where to come looking ;)]
- */
-
- buf = (unsigned short int *) skb->data;
- count = (count + 1) >> 1;
- for(;;)
- {
- boguscount = 0x1000;
- while ((inb(E33G_STATUS) & ESTAT_DPRDY) == 0)
- {
- if(!boguscount--)
- {
- pr_notice("%s: FIFO blocked in el2_block_input.\n", dev->name);
- el2_reset_8390(dev);
- goto blocked;
- }
- }
- if(count > WRD_COUNT)
- {
- insw(E33G_FIFOH, buf, WRD_COUNT);
- buf += WRD_COUNT;
- count -= WRD_COUNT;
- }
- else
- {
- insw(E33G_FIFOH, buf, count);
- break;
- }
- }
- blocked:;
- outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
-};
-
-#ifdef MODULE
-#define MAX_EL2_CARDS 4 /* Max number of EL2 cards per module */
-
-static struct net_device *dev_el2[MAX_EL2_CARDS];
-static int io[MAX_EL2_CARDS];
-static int irq[MAX_EL2_CARDS];
-static int xcvr[MAX_EL2_CARDS]; /* choose int. or ext. xcvr */
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
-MODULE_DESCRIPTION("3Com ISA EtherLink II, II/16 (3c503, 3c503/16) driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- pr_notice("3c503.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
- if (do_el2_probe(dev) == 0) {
- dev_el2[found++] = dev;
- continue;
- }
- free_netdev(dev);
- pr_warning("3c503.c: No 3c503 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: el2_close() handles free_irq */
- release_region(dev->base_addr, EL2_IO_EXTENT);
- if (ei_status.mem)
- iounmap(ei_status.mem);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_EL2_CARDS; this_dev++) {
- struct net_device *dev = dev_el2[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/3c503.h b/drivers/net/ethernet/8390/3c503.h
deleted file mode 100644
index e2367b82a2e..00000000000
--- a/drivers/net/ethernet/8390/3c503.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* Definitions for the 3Com 3c503 Etherlink 2. */
-/* This file is distributed under the GPL.
- Many of these names and comments are directly from the Crynwr packet
- drivers, which are released under the GPL. */
-
-#define EL2H (dev->base_addr + 0x400)
-#define EL2L (dev->base_addr)
-
-/* Vendor unique hardware addr. prefix. 3Com has 2 because they ran
- out of available addresses on the first one... */
-
-#define OLD_3COM_ID 0x02608c
-#define NEW_3COM_ID 0x0020af
-
-/* Shared memory management parameters. NB: The 8 bit cards have only
- one bank (MB1) which serves both Tx and Rx packet space. The 16bit
- cards have 2 banks, MB0 for Tx packets, and MB1 for Rx packets.
- You choose which bank appears in the sh. mem window with EGACFR_MBSn */
-
-#define EL2_MB0_START_PG (0x00) /* EL2/16 Tx packets go in bank 0 */
-#define EL2_MB1_START_PG (0x20) /* First page of bank 1 */
-#define EL2_MB1_STOP_PG (0x40) /* Last page +1 of bank 1 */
-
-/* 3Com 3c503 ASIC registers */
-#define E33G_STARTPG (EL2H+0) /* Start page, matching EN0_STARTPG */
-#define E33G_STOPPG (EL2H+1) /* Stop page, must match EN0_STOPPG */
-#define E33G_DRQCNT (EL2H+2) /* DMA burst count */
-#define E33G_IOBASE (EL2H+3) /* Read of I/O base jumpers. */
- /* (non-useful, but it also appears at the end of EPROM space) */
-#define E33G_ROMBASE (EL2H+4) /* Read of memory base jumpers. */
-#define E33G_GACFR (EL2H+5) /* Config/setup bits for the ASIC GA */
-#define E33G_CNTRL (EL2H+6) /* Board's main control register */
-#define E33G_STATUS (EL2H+7) /* Status on completions. */
-#define E33G_IDCFR (EL2H+8) /* Interrupt/DMA config register */
- /* (Which IRQ to assert, DMA chan to use) */
-#define E33G_DMAAH (EL2H+9) /* High byte of DMA address reg */
-#define E33G_DMAAL (EL2H+10) /* Low byte of DMA address reg */
-/* "Vector pointer" - if this address matches a read, the EPROM (rather than
- shared RAM) is mapped into memory space. */
-#define E33G_VP2 (EL2H+11)
-#define E33G_VP1 (EL2H+12)
-#define E33G_VP0 (EL2H+13)
-#define E33G_FIFOH (EL2H+14) /* FIFO for programmed I/O moves */
-#define E33G_FIFOL (EL2H+15) /* ... low byte of above. */
-
-/* Bits in E33G_CNTRL register: */
-
-#define ECNTRL_RESET (0x01) /* Software reset of the ASIC and 8390 */
-#define ECNTRL_THIN (0x02) /* Onboard xcvr enable, AUI disable */
-#define ECNTRL_AUI (0x00) /* Onboard xcvr disable, AUI enable */
-#define ECNTRL_SAPROM (0x04) /* Map the station address prom */
-#define ECNTRL_DBLBFR (0x20) /* FIFO configuration bit */
-#define ECNTRL_OUTPUT (0x40) /* PC-to-3C503 direction if 1 */
-#define ECNTRL_INPUT (0x00) /* 3C503-to-PC direction if 0 */
-#define ECNTRL_START (0x80) /* Start the DMA logic */
-
-/* Bits in E33G_STATUS register: */
-
-#define ESTAT_DPRDY (0x80) /* Data port (of FIFO) ready */
-#define ESTAT_UFLW (0x40) /* Tried to read FIFO when it was empty */
-#define ESTAT_OFLW (0x20) /* Tried to write FIFO when it was full */
-#define ESTAT_DTC (0x10) /* Terminal Count from PC bus DMA logic */
-#define ESTAT_DIP (0x08) /* DMA In Progress */
-
-/* Bits in E33G_GACFR register: */
-
-#define EGACFR_NIM (0x80) /* NIC interrupt mask */
-#define EGACFR_TCM (0x40) /* DMA term. count interrupt mask */
-#define EGACFR_RSEL (0x08) /* Map a bank of card mem into system mem */
-#define EGACFR_MBS2 (0x04) /* Memory bank select, bit 2. */
-#define EGACFR_MBS1 (0x02) /* Memory bank select, bit 1. */
-#define EGACFR_MBS0 (0x01) /* Memory bank select, bit 0. */
-
-#define EGACFR_NORM (0x49) /* TCM | RSEL | MBS0 */
-#define EGACFR_IRQOFF (0xc9) /* TCM | RSEL | MBS0 | NIM */
-
-/*
- MBS2 MBS1 MBS0 Sh. mem windows card mem at:
- ---- ---- ---- -----------------------------
- 0 0 0 0x0000 -- bank 0
- 0 0 1 0x2000 -- bank 1 (only choice for 8bit card)
- 0 1 0 0x4000 -- bank 2, not used
- 0 1 1 0x6000 -- bank 3, not used
-
-There was going to be a 32k card that used bank 2 and 3, but it
-never got produced.
-
-*/
-
-
-/* End of 3C503 parameter definitions */
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e1219e037c0..1b78ca7a978 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -6,8 +6,8 @@ config NET_VENDOR_8390
bool "National Semi-conductor 8390 devices"
default y
depends on NET_VENDOR_NATSEMI && (AMIGA_PCMCIA || PCI || SUPERH || \
- ISA || MCA || EISA || MAC || M32R || MACH_TX49XX || \
- MCA_LEGACY || H8300 || ARM || MIPS || ZORRO || PCMCIA || \
+ ISA || MAC || M32R || MACH_TX49XX || \
+ H8300 || ARM || MIPS || ZORRO || PCMCIA || \
EXPERIMENTAL)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
@@ -21,30 +21,6 @@ config NET_VENDOR_8390
if NET_VENDOR_8390
-config EL2
- tristate "3c503 \"EtherLink II\" support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c503.
-
-config AC3200
- tristate "Ansel Communications EISA 3200 support (EXPERIMENTAL)"
- depends on PCI && (ISA || EISA) && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ac3200.
-
config PCMCIA_AXNET
tristate "Asix AX88190 PCMCIA support"
depends on PCMCIA
@@ -74,54 +50,6 @@ config AX88796_93CX6
---help---
Select this if your platform comes with an external 93CX6 eeprom.
-config E2100
- tristate "Cabletron E21xx support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called e2100.
-
-config ES3210
- tristate "Racal-Interlan EISA ES3210 support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called es3210.
-
-config HPLAN_PLUS
- tristate "HP PCLAN+ (27247B and 27252A) support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp-plus.
-
-config HPLAN
- tristate "HP PCLAN (27245 and other 27xxx series) support"
- depends on ISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called hp.
-
config HYDRA
tristate "Hydra support"
depends on ZORRO
@@ -140,18 +68,6 @@ config ARM_ETHERH
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
-config LNE390
- tristate "Mylex EISA LNE390A/B support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called lne390.
-
config MAC8390
bool "Macintosh NS 8390 based ethernet cards"
depends on MAC
@@ -187,11 +103,7 @@ config NE2000
without a specific driver are compatible with NE2000.
If you have a PCI NE2000 card however, say N here and Y to "PCI
- NE2000 and clone support" under "EISA, VLB, PCI and on board
- controllers" below. If you have a NE2000 card and are running on
- an MCA system (a bus system used on some IBM PS/2 computers and
- laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
- below.
+ NE2000 and clone support" below.
To compile this driver as a module, choose M here. The module
will be called ne.
@@ -226,19 +138,6 @@ config APNE
To compile this driver as a module, choose M here: the module
will be called apne.
-config NE3210
- tristate "Novell/Eagle/Microdyne NE3210 EISA support (EXPERIMENTAL)"
- depends on PCI && EISA && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this driver
- will NOT WORK for NE3200 cards as they are completely different.
-
- To compile this driver as a module, choose M here. The module
- will be called ne3210.
-
config PCMCIA_PCNET
tristate "NE2000 compatible PCMCIA support"
depends on PCMCIA
@@ -288,18 +187,6 @@ config ULTRA
To compile this driver as a module, choose M here. The module
will be called smc-ultra.
-config ULTRA32
- tristate "SMC Ultra32 EISA support"
- depends on EISA
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called smc-ultra32.
-
config WD80x3
tristate "WD80*3 support"
depends on ISA
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index f43038babf8..588954a79b2 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -3,27 +3,17 @@
#
obj-$(CONFIG_MAC8390) += mac8390.o
-obj-$(CONFIG_AC3200) += ac3200.o 8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_ARM_ETHERH) += etherh.o
obj-$(CONFIG_AX88796) += ax88796.o
-obj-$(CONFIG_E2100) += e2100.o 8390.o
-obj-$(CONFIG_EL2) += 3c503.o 8390p.o
-obj-$(CONFIG_ES3210) += es3210.o 8390.o
-obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
-obj-$(CONFIG_HPLAN) += hp.o 8390p.o
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
-obj-$(CONFIG_LNE390) += lne390.o 8390.o
obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
-obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_NE3210) += ne3210.o 8390.o
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
-obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
diff --git a/drivers/net/ethernet/8390/ac3200.c b/drivers/net/ethernet/8390/ac3200.c
deleted file mode 100644
index ccf07942ff6..00000000000
--- a/drivers/net/ethernet/8390/ac3200.c
+++ /dev/null
@@ -1,431 +0,0 @@
-/* ac3200.c: A driver for the Ansel Communications EISA ethernet adaptor. */
-/*
- Written 1993, 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. This software may only be used and distributed
- according to the terms of the GNU General Public License as modified by SRC,
- incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is driver for the Ansel Communications Model 3200 EISA Ethernet LAN
- Adapter. The programming information is from the users manual, as related
- by glee@ardnassak.math.clemson.edu.
-
- Changelog:
-
- Paul Gortmaker 05/98 : add support for shared mem above 1MB.
-
- */
-
-static const char version[] =
- "ac3200.c:v1.01 7/1/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#include "8390.h"
-
-#define DRV_NAME "ac3200"
-
-/* Offsets from the base address. */
-#define AC_NIC_BASE 0x00
-#define AC_SA_PROM 0x16 /* The station address PROM. */
-#define AC_ADDR0 0x00 /* Prefix station address values. */
-#define AC_ADDR1 0x40
-#define AC_ADDR2 0x90
-#define AC_ID_PORT 0xC80
-#define AC_EISA_ID 0x0110d305
-#define AC_RESET_PORT 0xC84
-#define AC_RESET 0x00
-#define AC_ENABLE 0x01
-#define AC_CONFIG 0xC90 /* The configuration port. */
-
-#define AC_IO_EXTENT 0x20
- /* Actually accessed is:
- * AC_NIC_BASE (0-15)
- * AC_SA_PROM (0-5)
- * AC_ID_PORT (0-3)
- * AC_RESET_PORT
- * AC_CONFIG
- */
-
-/* Decoding of the configuration register. */
-static unsigned char config2irqmap[8] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static int addrmap[8] =
-{0xFF0000, 0xFE0000, 0xFD0000, 0xFFF0000, 0xFFE0000, 0xFFC0000, 0xD0000, 0 };
-static const char *port_name[4] = { "10baseT", "invalid", "AUI", "10base2"};
-
-#define config2irq(configval) config2irqmap[((configval) >> 3) & 7]
-#define config2mem(configval) addrmap[(configval) & 7]
-#define config2name(configval) port_name[((configval) >> 6) & 3]
-
-/* First and last 8390 pages. */
-#define AC_START_PG 0x00 /* First page of 8390 TX buffer */
-#define AC_STOP_PG 0x80 /* Last page +1 of the 8390 RX ring */
-
-static int ac_probe1(int ioaddr, struct net_device *dev);
-
-static int ac_open(struct net_device *dev);
-static void ac_reset_8390(struct net_device *dev);
-static void ac_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ac_block_output(struct net_device *dev, const int count,
- const unsigned char *buf, const int start_page);
-static void ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-
-static int ac_close_card(struct net_device *dev);
-
-
-/* Probe for the AC3200.
-
- The AC3200 can be identified by either the EISA configuration registers,
- or the unique value in the station address PROM.
- */
-
-static int __init do_ac3200_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
-
- if (ioaddr > 0x1ff) /* Check a single specified location. */
- return ac_probe1(ioaddr, dev);
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if ( ! EISA_bus)
- return -ENXIO;
-
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (ac_probe1(ioaddr, dev) == 0)
- return 0;
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init ac3200_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_ac3200_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops ac_netdev_ops = {
- .ndo_open = ac_open,
- .ndo_stop = ac_close_card,
-
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init ac_probe1(int ioaddr, struct net_device *dev)
-{
- int i, retval;
-
- if (!request_region(ioaddr, AC_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (inb_p(ioaddr + AC_ID_PORT) == 0xff) {
- retval = -ENODEV;
- goto out;
- }
-
- if (inl(ioaddr + AC_ID_PORT) != AC_EISA_ID) {
- retval = -ENODEV;
- goto out;
- }
-
-#ifndef final_version
- printk(KERN_DEBUG "AC3200 ethercard configuration register is %#02x,"
- " EISA ID %02x %02x %02x %02x.\n", inb(ioaddr + AC_CONFIG),
- inb(ioaddr + AC_ID_PORT + 0), inb(ioaddr + AC_ID_PORT + 1),
- inb(ioaddr + AC_ID_PORT + 2), inb(ioaddr + AC_ID_PORT + 3));
-#endif
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + AC_SA_PROM + i);
-
- printk(KERN_DEBUG "AC3200 in EISA slot %d, node %pM",
- ioaddr/0x1000, dev->dev_addr);
-#if 0
- /* Check the vendor ID/prefix. Redundant after checking the EISA ID */
- if (inb(ioaddr + AC_SA_PROM + 0) != AC_ADDR0
- || inb(ioaddr + AC_SA_PROM + 1) != AC_ADDR1
- || inb(ioaddr + AC_SA_PROM + 2) != AC_ADDR2 ) {
- printk(", not found (invalid prefix).\n");
- retval = -ENODEV;
- goto out;
- }
-#endif
-
- /* Assign and allocate the interrupt now. */
- if (dev->irq == 0) {
- dev->irq = config2irq(inb(ioaddr + AC_CONFIG));
- printk(", using");
- } else {
- dev->irq = irq_canonicalize(dev->irq);
- printk(", assigning");
- }
-
- retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
- goto out;
- }
-
- printk(" IRQ %d, %s port\n", dev->irq, port_name[dev->if_port]);
-
- dev->base_addr = ioaddr;
-
-#ifdef notyet
- if (dev->mem_start) { /* Override the value from the board. */
- for (i = 0; i < 7; i++)
- if (addrmap[i] == dev->mem_start)
- break;
- if (i >= 7)
- i = 0;
- outb((inb(ioaddr + AC_CONFIG) & ~7) | i, ioaddr + AC_CONFIG);
- }
-#endif
-
- dev->if_port = inb(ioaddr + AC_CONFIG) >> 6;
- dev->mem_start = config2mem(inb(ioaddr + AC_CONFIG));
-
- printk("%s: AC3200 at %#3x with %dkB memory at physical address %#lx.\n",
- dev->name, ioaddr, AC_STOP_PG/4, dev->mem_start);
-
- /*
- * BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- * the card mem within the region covered by `normal' RAM !!!
- *
- * ioremap() will fail in that case.
- */
- ei_status.mem = ioremap(dev->mem_start, AC_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "ac3200.c: Unable to remap card memory above 1MB !!\n");
- printk(KERN_ERR "ac3200.c: Try using EISA SCU to set memory below 1MB.\n");
- printk(KERN_ERR "ac3200.c: Driver NOT installed.\n");
- retval = -EINVAL;
- goto out1;
- }
- printk("ac3200.c: remapped %dkB card memory to virtual address %p\n",
- AC_STOP_PG/4, ei_status.mem);
-
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (AC_STOP_PG - AC_START_PG)*256;
-
- ei_status.name = "AC3200";
- ei_status.tx_start_page = AC_START_PG;
- ei_status.rx_start_page = AC_START_PG + TX_PAGES;
- ei_status.stop_page = AC_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &ac_reset_8390;
- ei_status.block_input = &ac_block_input;
- ei_status.block_output = &ac_block_output;
- ei_status.get_8390_hdr = &ac_get_8390_hdr;
-
- dev->netdev_ops = &ac_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out2;
- return 0;
-out2:
- if (ei_status.reg0)
- iounmap(ei_status.mem);
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, AC_IO_EXTENT);
- return retval;
-}
-
-static int ac_open(struct net_device *dev)
-{
-#ifdef notyet
- /* Someday we may enable the IRQ and shared memory here. */
- int ioaddr = dev->base_addr;
-#endif
-
- ei_open(dev);
- return 0;
-}
-
-static void ac_reset_8390(struct net_device *dev)
-{
- ushort ioaddr = dev->base_addr;
-
- outb(AC_RESET, ioaddr + AC_RESET_PORT);
- if (ei_debug > 1) printk("resetting AC3200, t=%ld...", jiffies);
-
- ei_status.txing = 0;
- outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void
-ac_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - AC_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
-}
-
-/* Block input and output are easy on shared memory ethercards, the only
- complication is when the ring buffer wraps. */
-
-static void ac_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *start = ei_status.mem + ring_offset - AC_START_PG*256;
-
- if (ring_offset + count > AC_STOP_PG*256) {
- /* We must wrap the input move. */
- int semi_count = AC_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + TX_PAGES*256, count);
- } else {
- memcpy_fromio(skb->data, start, count);
- }
-}
-
-static void ac_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - AC_START_PG)<<8);
-
- memcpy_toio(shmem, buf, count);
-}
-
-static int ac_close_card(struct net_device *dev)
-{
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
-#ifdef notyet
- /* We should someday disable shared memory and interrupts. */
- outb(0x00, ioaddr + 6); /* Disable interrupts. */
- free_irq(dev->irq, dev);
-#endif
-
- ei_close(dev);
- return 0;
-}
-
-#ifdef MODULE
-#define MAX_AC32_CARDS 4 /* Max number of AC32 cards per module */
-static struct net_device *dev_ac32[MAX_AC32_CARDS];
-static int io[MAX_AC32_CARDS];
-static int irq[MAX_AC32_CARDS];
-static int mem[MAX_AC32_CARDS];
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "Memory base address(es)");
-MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-static int __init ac3200_module_init(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev]; /* Currently ignored by driver */
- if (do_ac3200_probe(dev) == 0) {
- dev_ac32[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "ac3200.c: No ac3200 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* Someday free_irq may be in ac_close_card() */
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, AC_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-static void __exit ac3200_module_exit(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_AC32_CARDS; this_dev++) {
- struct net_device *dev = dev_ac32[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-module_init(ac3200_module_init);
-module_exit(ac3200_module_exit);
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 70dba5d01ad..cab306a9888 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -358,7 +358,7 @@ static int ax_mii_probe(struct net_device *dev)
return -ENODEV;
}
- ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+ ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change,
PHY_INTERFACE_MODE_MII);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -469,9 +469,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pdev->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/8390/e2100.c b/drivers/net/ethernet/8390/e2100.c
deleted file mode 100644
index ed55ce85ebb..00000000000
--- a/drivers/net/ethernet/8390/e2100.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
-/*
- Written 1993-1994 by Donald Becker.
-
- Copyright 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- This is a driver for the Cabletron E2100 series ethercards.
-
- The Author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- The E2100 series ethercard is a fairly generic shared memory 8390
- implementation. The only unusual aspect is the way the shared memory
- registers are set: first you do an inb() in what is normally the
- station address region, and the low three bits of next outb() *address*
- is used as the write value for that register. Either someone wasn't
- too used to dem bit en bites, or they were trying to obfuscate the
- programming interface.
-
- There is an additional complication when setting the window on the packet
- buffer. You must first do a read into the packet buffer region with the
- low 8 address bits the address setting the page for the start of the packet
- buffer window, and then do the above operation. See mem_on() for details.
-
- One bug on the chip is that even a hard reset won't disable the memory
- window, usually resulting in a hung machine if mem_off() isn't called.
- If this happens, you must power down the machine for about 30 seconds.
-*/
-
-static const char version[] =
- "e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "e2100"
-
-static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
-
-/* Offsets from the base_addr.
- Read from the ASIC register, and the low three bits of the next outb()
- address is used to set the corresponding register. */
-#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
-#define E21_ASIC 0x10
-#define E21_MEM_ENABLE 0x10
-#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
-#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
-#define E21_MEM_BASE 0x11
-#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
-#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
-#define E21_MEDIA 0x14 /* (alias). */
-#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
-#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
-#define E21_SAPROM 0x10 /* Offset to station address data. */
-#define E21_IO_EXTENT 0x20
-
-static inline void mem_on(short port, volatile char __iomem *mem_base,
- unsigned char start_page )
-{
- /* This is a little weird: set the shared memory window by doing a
- read. The low address bits specify the starting page. */
- readb(mem_base+start_page);
- inb(port + E21_MEM_ENABLE);
- outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
-}
-
-static inline void mem_off(short port)
-{
- inb(port + E21_MEM_ENABLE);
- outb(0x00, port + E21_MEM_ENABLE);
-}
-
-/* In other drivers I put the TX pages first, but the E2100 window circuitry
- is designed to have a 4K Tx region last. The windowing circuitry wraps the
- window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
- appear contiguously in the window. */
-#define E21_RX_START_PG 0x00 /* First page of RX buffer */
-#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
-#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
-
-static int e21_probe1(struct net_device *dev, int ioaddr);
-
-static int e21_open(struct net_device *dev);
-static void e21_reset_8390(struct net_device *dev);
-static void e21_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void e21_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static int e21_open(struct net_device *dev);
-static int e21_close(struct net_device *dev);
-
-
-/* Probe for the E2100 series ethercards. These cards have an 8390 at the
- base address and the station address at both offset 0x10 and 0x18. I read
- the station address from offset 0x18 to avoid the dataport of NE2000
- ethercards, and look for Ctron's unique ID (first three octets of the
- station address).
- */
-
-static int __init do_e2100_probe(struct net_device *dev)
-{
- int *port;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return e21_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (port = e21_probe_list; *port; port++) {
- dev->irq = irq;
- if (e21_probe1(dev, *port) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init e2100_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_e2100_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops e21_netdev_ops = {
- .ndo_open = e21_open,
- .ndo_stop = e21_close,
-
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init e21_probe1(struct net_device *dev, int ioaddr)
-{
- int i, status, retval;
- unsigned char *station_addr = dev->dev_addr;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* First check the station address for the Ctron prefix. */
- if (inb(ioaddr + E21_SAPROM + 0) != 0x00 ||
- inb(ioaddr + E21_SAPROM + 1) != 0x00 ||
- inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Verify by making certain that there is a 8390 at there. */
- outb(E8390_NODMA + E8390_STOP, ioaddr);
- udelay(1); /* we want to delay one I/O cycle - which is 2MHz */
- status = inb(ioaddr);
- if (status != 0x21 && status != 0x23) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Read the station address PROM. */
- for (i = 0; i < 6; i++)
- station_addr[i] = inb(ioaddr + E21_SAPROM + i);
-
- inb(ioaddr + E21_MEDIA); /* Point to media selection. */
- outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- for (i = 0; i < 6; i++)
- printk(" %02X", station_addr[i]);
-
- if (dev->irq < 2) {
- static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
- for (i = 0; i < ARRAY_SIZE(irqlist); i++)
- if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
- dev->irq = irqlist[i];
- break;
- }
- if (i >= ARRAY_SIZE(irqlist)) {
- printk(" unable to get IRQ %d.\n", dev->irq);
- retval = -EAGAIN;
- goto out;
- }
- } else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
- dev->irq = 9;
-
- /* The 8390 is at the base address. */
- dev->base_addr = ioaddr;
-
- ei_status.name = "E2100";
- ei_status.word16 = 1;
- ei_status.tx_start_page = E21_TX_START_PG;
- ei_status.rx_start_page = E21_RX_START_PG;
- ei_status.stop_page = E21_RX_STOP_PG;
- ei_status.saved_irq = dev->irq;
-
- /* Check the media port used. The port can be passed in on the
- low mem_end bits. */
- if (dev->mem_end & 15)
- dev->if_port = dev->mem_end & 7;
- else {
- dev->if_port = 0;
- inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
- for(i = 0; i < 6; i++)
- if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
- dev->if_port = 1;
- break;
- }
- }
-
- /* Never map in the E21 shared memory unless you are actively using it.
- Also, the shared memory has effective only one setting -- spread all
- over the 128K region! */
- if (dev->mem_start == 0)
- dev->mem_start = 0xd0000;
-
- ei_status.mem = ioremap(dev->mem_start, 2*1024);
- if (!ei_status.mem) {
- printk("unable to remap memory\n");
- retval = -EAGAIN;
- goto out;
- }
-
-#ifdef notdef
- /* These values are unused. The E2100 has a 2K window into the packet
- buffer. The window can be set to start on any page boundary. */
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024;
-#endif
-
- printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
- dev->if_port ? "secondary" : "primary", dev->mem_start);
-
- ei_status.reset_8390 = &e21_reset_8390;
- ei_status.block_input = &e21_block_input;
- ei_status.block_output = &e21_block_output;
- ei_status.get_8390_hdr = &e21_get_8390_hdr;
-
- dev->netdev_ops = &e21_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out;
- return 0;
-out:
- release_region(ioaddr, E21_IO_EXTENT);
- return retval;
-}
-
-static int
-e21_open(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- int retval;
-
- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
- return retval;
-
- /* Set the interrupt line and memory base on the hardware. */
- inb(ioaddr + E21_IRQ_LOW);
- outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
- inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
- outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
- + (dev->if_port ? E21_ALT_IFPORT : 0));
- inb(ioaddr + E21_MEM_BASE);
- outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
-
- ei_open(dev);
- return 0;
-}
-
-static void
-e21_reset_8390(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- outb(0x01, ioaddr);
- if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
- ei_status.txing = 0;
-
- /* Set up the ASIC registers, just in case something changed them. */
-
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. We put the 2k window so the header page
- appears at the start of the shared memory. */
-
-static void
-e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
-
- short ioaddr = dev->base_addr;
- char __iomem *shared_mem = ei_status.mem;
-
- mem_on(ioaddr, shared_mem, ring_page);
-
-#ifdef notdef
- /* Officially this is what we are doing, but the readl() is faster */
- memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
-#else
- ((unsigned int*)hdr)[0] = readl(shared_mem);
-#endif
-
- /* Turn off memory access: we would need to reprogram the window anyway. */
- mem_off(ioaddr);
-
-}
-
-/* Block input and output are easy on shared memory ethercards.
- The E21xx makes block_input() especially easy by wrapping the top
- ring buffer to the bottom automatically. */
-static void
-e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- short ioaddr = dev->base_addr;
- char __iomem *shared_mem = ei_status.mem;
-
- mem_on(ioaddr, shared_mem, (ring_offset>>8));
-
- memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count);
-
- mem_off(ioaddr);
-}
-
-static void
-e21_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
-{
- short ioaddr = dev->base_addr;
- volatile char __iomem *shared_mem = ei_status.mem;
-
- /* Set the shared memory window start by doing a read, with the low address
- bits specifying the starting page. */
- readb(shared_mem + start_page);
- mem_on(ioaddr, shared_mem, start_page);
-
- memcpy_toio(shared_mem, buf, count);
- mem_off(ioaddr);
-}
-
-static int
-e21_close(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
- free_irq(dev->irq, dev);
- dev->irq = ei_status.saved_irq;
-
- /* Shut off the interrupt line and secondary interface. */
- inb(ioaddr + E21_IRQ_LOW);
- outb(0, ioaddr + E21_ASIC);
- inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
- outb(0, ioaddr + E21_ASIC);
-
- ei_close(dev);
-
- /* Double-check that the memory has been turned off, because really
- really bad things happen if it isn't. */
- mem_off(ioaddr);
-
- return 0;
-}
-
-
-#ifdef MODULE
-#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
-static struct net_device *dev_e21[MAX_E21_CARDS];
-static int io[MAX_E21_CARDS];
-static int irq[MAX_E21_CARDS];
-static int mem[MAX_E21_CARDS];
-static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-module_param_array(xcvr, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, " memory base address(es)");
-MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
-MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
- if (do_e2100_probe(dev) == 0) {
- dev_e21[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: e21_close() handles free_irq */
- iounmap(ei_status.mem);
- release_region(dev->base_addr, E21_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
- struct net_device *dev = dev_e21[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
deleted file mode 100644
index ba1b5c95531..00000000000
--- a/drivers/net/ethernet/8390/es3210.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- es3210.c
-
- Linux driver for Racal-Interlan ES3210 EISA Network Adapter
-
- Copyright (C) 1996, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) The existing myriad of Linux 8390 drivers written by Donald Becker.
-
- 2) Once again Russ Nelson's asm packet driver provided additional info.
-
- 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
- Too bad it doesn't work -- see below.
-
- The ES3210 is an EISA shared memory NS8390 implementation. Note
- that all memory copies to/from the board must be 32bit transfers.
- Which rules out using eth_io_copy_and_sum() in this driver.
-
- Apparently there are two slightly different revisions of the
- card, since there are two distinct EISA cfg files (!rii0101.cfg
- and !rii0102.cfg) One has media select in the cfg file and the
- other doesn't. Hopefully this will work with either.
-
- That is about all I can tell you about it, having never actually
- even seen one of these cards. :) Try http://www.interlan.com
- if you want more info.
-
- Thanks go to Mark Salazar for testing v0.02 of this driver.
-
- Bugs, to-fix, etc:
-
- 1) The EISA cfg ports that are *supposed* to have the IRQ and shared
- mem values just read 0xff all the time. Hrrmpf. Apparently the
- same happens with the packet driver as the code for reading
- these registers is disabled there. In the meantime, boot with:
- ether=<IRQ>,0,0x<shared_mem_addr>,eth0 to override the IRQ and
- shared memory detection. (The i/o port detection is okay.)
-
- 2) Module support currently untested. Probably works though.
-
-*/
-
-static const char version[] =
- "es3210.c: Driver revision v0.03, 14/09/96\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-static int es_probe1(struct net_device *dev, int ioaddr);
-
-static void es_reset_8390(struct net_device *dev);
-
-static void es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void es_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page);
-
-#define ES_START_PG 0x00 /* First page of TX buffer */
-#define ES_STOP_PG 0x40 /* Last page +1 of RX ring */
-
-#define ES_IO_EXTENT 0x37 /* The cfg file says 0xc90 -> 0xcc7 */
-#define ES_ID_PORT 0xc80 /* Same for all EISA cards */
-#define ES_SA_PROM 0xc90 /* Start of e'net addr. */
-#define ES_RESET_PORT 0xc84 /* From the packet driver source */
-#define ES_NIC_OFFSET 0xca0 /* Hello, the 8390 is *here* */
-
-#define ES_ADDR0 0x02 /* 3 byte vendor prefix */
-#define ES_ADDR1 0x07
-#define ES_ADDR2 0x01
-
-/*
- * Two card revisions. EISA ID's are always rev. minor, rev. major,, and
- * then the three vendor letters stored in 5 bits each, with an "a" = 1.
- * For eg: "rii" = 10010 01001 01001 = 0x4929, which is how the EISA
- * config utility determines automagically what config file(s) to use.
- */
-#define ES_EISA_ID1 0x01012949 /* !rii0101.cfg */
-#define ES_EISA_ID2 0x02012949 /* !rii0102.cfg */
-
-#define ES_CFG1 0xcc0 /* IOPORT(1) --> IOPORT(6) in cfg file */
-#define ES_CFG2 0xcc1
-#define ES_CFG3 0xcc2
-#define ES_CFG4 0xcc3
-#define ES_CFG5 0xcc4
-#define ES_CFG6 0xc84 /* NB: 0xc84 is also "reset" port. */
-
-/*
- * You can OR any of the following bits together and assign it
- * to ES_DEBUG to get verbose driver info during operation.
- * Some of these don't do anything yet.
- */
-
-#define ES_D_PROBE 0x01
-#define ES_D_RX_PKT 0x02
-#define ES_D_TX_PKT 0x04
-#define ED_D_IRQ 0x08
-
-#define ES_DEBUG 0
-
-static unsigned char lo_irq_map[] __initdata = {3, 4, 5, 6, 7, 9, 10};
-static unsigned char hi_irq_map[] __initdata = {11, 12, 0, 14, 0, 0, 0, 15};
-
-/*
- * Probe for the card. The best way is to read the EISA ID if it
- * is known. Then we check the prefix of the station address
- * PROM for a match against the Racal-Interlan assigned value.
- */
-
-static int __init do_es_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
-
- if (ioaddr > 0x1ff) /* Check a single specified location. */
- return es_probe1(dev, ioaddr);
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if (!EISA_bus) {
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: Not EISA bus. Not probing high ports.\n");
-#endif
- return -ENXIO;
- }
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (es_probe1(dev, ioaddr) == 0)
- return 0;
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init es_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_es_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init es_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval;
- unsigned long eisa_id;
-
- if (!request_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT, "es3210"))
- return -ENODEV;
-
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + ES_ID_PORT));
- printk("es3210.c: config regs: %#x %#x %#x %#x %#x %#x\n",
- inb(ioaddr + ES_CFG1), inb(ioaddr + ES_CFG2), inb(ioaddr + ES_CFG3),
- inb(ioaddr + ES_CFG4), inb(ioaddr + ES_CFG5), inb(ioaddr + ES_CFG6));
-#endif
-
-/* Check the EISA ID of the card. */
- eisa_id = inl(ioaddr + ES_ID_PORT);
- if ((eisa_id != ES_EISA_ID1) && (eisa_id != ES_EISA_ID2)) {
- retval = -ENODEV;
- goto out;
- }
-
- for (i = 0; i < ETH_ALEN ; i++)
- dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
-
-/* Check the Racal vendor ID as well. */
- if (dev->dev_addr[0] != ES_ADDR0 ||
- dev->dev_addr[1] != ES_ADDR1 ||
- dev->dev_addr[2] != ES_ADDR2) {
- printk("es3210.c: card not found %pM (invalid_prefix).\n",
- dev->dev_addr);
- retval = -ENODEV;
- goto out;
- }
-
- printk("es3210.c: ES3210 rev. %ld at %#x, node %pM",
- eisa_id>>24, ioaddr, dev->dev_addr);
-
- /* Snarf the interrupt now. */
- if (dev->irq == 0) {
- unsigned char hi_irq = inb(ioaddr + ES_CFG2) & 0x07;
- unsigned char lo_irq = inb(ioaddr + ES_CFG1) & 0xfe;
-
- if (hi_irq != 0) {
- dev->irq = hi_irq_map[hi_irq - 1];
- } else {
- int i = 0;
- while (lo_irq > (1<<i)) i++;
- dev->irq = lo_irq_map[i];
- }
- printk(" using IRQ %d", dev->irq);
-#if ES_DEBUG & ES_D_PROBE
- printk("es3210.c: hi_irq %#x, lo_irq %#x, dev->irq = %d\n",
- hi_irq, lo_irq, dev->irq);
-#endif
- } else {
- if (dev->irq == 2)
- dev->irq = 9; /* Doh! */
- printk(" assigning IRQ %d", dev->irq);
- }
-
- if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- retval = -EAGAIN;
- goto out;
- }
-
- if (dev->mem_start == 0) {
- unsigned char mem_enabled = inb(ioaddr + ES_CFG2) & 0xc0;
- unsigned char mem_bits = inb(ioaddr + ES_CFG3) & 0x07;
-
- if (mem_enabled != 0x80) {
- printk(" shared mem disabled - giving up\n");
- retval = -ENXIO;
- goto out1;
- }
- dev->mem_start = 0xC0000 + mem_bits*0x4000;
- printk(" using ");
- } else {
- printk(" assigning ");
- }
-
- ei_status.mem = ioremap(dev->mem_start, (ES_STOP_PG - ES_START_PG)*256);
- if (!ei_status.mem) {
- printk("ioremap failed - giving up\n");
- retval = -ENXIO;
- goto out1;
- }
-
- dev->mem_end = dev->mem_start + (ES_STOP_PG - ES_START_PG)*256;
-
- printk("mem %#lx-%#lx\n", dev->mem_start, dev->mem_end-1);
-
-#if ES_DEBUG & ES_D_PROBE
- if (inb(ioaddr + ES_CFG5))
- printk("es3210: Warning - DMA channel enabled, but not used here.\n");
-#endif
- /* Note, point at the 8390, and not the card... */
- dev->base_addr = ioaddr + ES_NIC_OFFSET;
-
- ei_status.name = "ES3210";
- ei_status.tx_start_page = ES_START_PG;
- ei_status.rx_start_page = ES_START_PG + TX_PAGES;
- ei_status.stop_page = ES_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &es_reset_8390;
- ei_status.block_input = &es_block_input;
- ei_status.block_output = &es_block_output;
- ei_status.get_8390_hdr = &es_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
- NS8390_init(dev, 0);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr + ES_SA_PROM, ES_IO_EXTENT);
- return retval;
-}
-
-/*
- * Reset as per the packet driver method. Judging by the EISA cfg
- * file, this just toggles the "Board Enable" bits (bit 2 and 0).
- */
-
-static void es_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- unsigned long end;
-
- outb(0x04, ioaddr + ES_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the ES3210...", dev->name);
-
- end = jiffies + 2*HZ/100;
- while ((signed)(end - jiffies) > 0) continue;
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + ES_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-es_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - ES_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via es_get_8390_hdr() above.
- */
-
-static void es_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + ring_offset - ES_START_PG*256;
-
- if (ring_offset + count > ES_STOP_PG*256) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = ES_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void es_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - ES_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-#ifdef MODULE
-#define MAX_ES_CARDS 4 /* Max number of ES3210 cards per module */
-#define NAMELEN 8 /* # of chars for storing dev->name */
-static struct net_device *dev_es3210[MAX_ES_CARDS];
-static int io[MAX_ES_CARDS];
-static int irq[MAX_ES_CARDS];
-static int mem[MAX_ES_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "memory base address(es)");
-MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- if (do_es_probe(dev) == 0) {
- dev_es3210[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "es3210.c: No es3210 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, ES_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_ES_CARDS; this_dev++) {
- struct net_device *dev = dev_es3210[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
deleted file mode 100644
index 52f70f999c0..00000000000
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ /dev/null
@@ -1,505 +0,0 @@
-/* hp-plus.c: A HP PCLAN/plus ethernet driver for linux. */
-/*
- Written 1994 by Donald Becker.
-
- This driver is for the Hewlett Packard PC LAN (27***) plus ethercards.
- These cards are sold under several model numbers, usually 2724*.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- As is often the case, a great deal of credit is owed to Russ Nelson.
- The Crynwr packet driver was my primary source of HP-specific
- programming information.
-*/
-
-static const char version[] =
-"hp-plus.c:v1.10 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include <linux/module.h>
-
-#include <linux/string.h> /* Important -- this inlines word moves. */
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "hp-plus"
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int hpplus_portlist[] __initdata =
-{0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
-
-/*
- The HP EtherTwist chip implementation is a fairly routine DP8390
- implementation. It allows both shared memory and programmed-I/O buffer
- access, using a custom interface for both. The programmed-I/O mode is
- entirely implemented in the HP EtherTwist chip, bypassing the problem
- ridden built-in 8390 facilities used on NE2000 designs. The shared
- memory mode is likewise special, with an offset register used to make
- packets appear at the shared memory base. Both modes use a base and bounds
- page register to hide the Rx ring buffer wrap -- a packet that spans the
- end of physical buffer memory appears continuous to the driver. (c.f. the
- 3c503 and Cabletron E2100)
-
- A special note: the internal buffer of the board is only 8 bits wide.
- This lays several nasty traps for the unaware:
- - the 8390 must be programmed for byte-wide operations
- - all I/O and memory operations must work on whole words (the access
- latches are serially preloaded and have no byte-swapping ability).
-
- This board is laid out in I/O space much like the earlier HP boards:
- the first 16 locations are for the board registers, and the second 16 are
- for the 8390. The board is easy to identify, with both a dedicated 16 bit
- ID register and a constant 0x530* value in the upper bits of the paging
- register.
-*/
-
-#define HP_ID 0x00 /* ID register, always 0x4850. */
-#define HP_PAGING 0x02 /* Registers visible @ 8-f, see PageName. */
-#define HPP_OPTION 0x04 /* Bitmapped options, see HP_Option. */
-#define HPP_OUT_ADDR 0x08 /* I/O output location in Perf_Page. */
-#define HPP_IN_ADDR 0x0A /* I/O input location in Perf_Page. */
-#define HP_DATAPORT 0x0c /* I/O data transfer in Perf_Page. */
-#define NIC_OFFSET 0x10 /* Offset to the 8390 registers. */
-#define HP_IO_EXTENT 32
-
-#define HP_START_PG 0x00 /* First page of TX buffer */
-#define HP_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-/* The register set selected in HP_PAGING. */
-enum PageName {
- Perf_Page = 0, /* Normal operation. */
- MAC_Page = 1, /* The ethernet address (+checksum). */
- HW_Page = 2, /* EEPROM-loaded hardware parameters. */
- LAN_Page = 4, /* Transceiver selection, testing, etc. */
- ID_Page = 6 };
-
-/* The bit definitions for the HPP_OPTION register. */
-enum HP_Option {
- NICReset = 1, ChipReset = 2, /* Active low, really UNreset. */
- EnableIRQ = 4, FakeIntr = 8, BootROMEnb = 0x10, IOEnb = 0x20,
- MemEnable = 0x40, ZeroWait = 0x80, MemDisable = 0x1000, };
-
-static int hpp_probe1(struct net_device *dev, int ioaddr);
-
-static void hpp_reset_8390(struct net_device *dev);
-static int hpp_open(struct net_device *dev);
-static int hpp_close(struct net_device *dev);
-static void hpp_mem_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void hpp_mem_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void hpp_io_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void hpp_io_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-static void hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-
-
-/* Probe a list of addresses for an HP LAN+ adaptor.
- This routine is almost boilerplate. */
-
-static int __init do_hpp_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return hpp_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; hpplus_portlist[i]; i++) {
- if (hpp_probe1(dev, hpplus_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init hp_plus_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_hpp_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops hpp_netdev_ops = {
- .ndo_open = hpp_open,
- .ndo_stop = hpp_close,
- .ndo_start_xmit = eip_start_xmit,
- .ndo_tx_timeout = eip_tx_timeout,
- .ndo_get_stats = eip_get_stats,
- .ndo_set_rx_mode = eip_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = eip_poll,
-#endif
-};
-
-
-/* Do the interesting part of the probe at a single address. */
-static int __init hpp_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval;
- unsigned char checksum = 0;
- const char name[] = "HP-PC-LAN+";
- int mem_start;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Check for the HP+ signature, 50 48 0x 53. */
- if (inw(ioaddr + HP_ID) != 0x4850 ||
- (inw(ioaddr + HP_PAGING) & 0xfff0) != 0x5300) {
- retval = -ENODEV;
- goto out;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s at %#3x, ", dev->name, name, ioaddr);
-
- /* Retrieve and checksum the station address. */
- outw(MAC_Page, ioaddr + HP_PAGING);
-
- for(i = 0; i < ETH_ALEN; i++) {
- unsigned char inval = inb(ioaddr + 8 + i);
- dev->dev_addr[i] = inval;
- checksum += inval;
- }
- checksum += inb(ioaddr + 14);
-
- printk("%pM", dev->dev_addr);
-
- if (checksum != 0xff) {
- printk(" bad checksum %2.2x.\n", checksum);
- retval = -ENODEV;
- goto out;
- } else {
- /* Point at the Software Configuration Flags. */
- outw(ID_Page, ioaddr + HP_PAGING);
- printk(" ID %4.4x", inw(ioaddr + 12));
- }
-
- /* Read the IRQ line. */
- outw(HW_Page, ioaddr + HP_PAGING);
- {
- int irq = inb(ioaddr + 13) & 0x0f;
- int option = inw(ioaddr + HPP_OPTION);
-
- dev->irq = irq;
- if (option & MemEnable) {
- mem_start = inw(ioaddr + 9) << 8;
- printk(", IRQ %d, memory address %#x.\n", irq, mem_start);
- } else {
- mem_start = 0;
- printk(", IRQ %d, programmed-I/O mode.\n", irq);
- }
- }
-
- /* Set the wrap registers for string I/O reads. */
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
-
- /* Set the base address to point to the NIC, not the "real" base! */
- dev->base_addr = ioaddr + NIC_OFFSET;
-
- dev->netdev_ops = &hpp_netdev_ops;
-
- ei_status.name = name;
- ei_status.word16 = 0; /* Agggghhhhh! Debug time: 2 days! */
- ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES/2;
- ei_status.stop_page = HP_STOP_PG;
-
- ei_status.reset_8390 = &hpp_reset_8390;
- ei_status.block_input = &hpp_io_block_input;
- ei_status.block_output = &hpp_io_block_output;
- ei_status.get_8390_hdr = &hpp_io_get_8390_hdr;
-
- /* Check if the memory_enable flag is set in the option register. */
- if (mem_start) {
- ei_status.block_input = &hpp_mem_block_input;
- ei_status.block_output = &hpp_mem_block_output;
- ei_status.get_8390_hdr = &hpp_mem_get_8390_hdr;
- dev->mem_start = mem_start;
- ei_status.mem = ioremap(mem_start,
- (HP_STOP_PG - HP_START_PG)*256);
- if (!ei_status.mem) {
- retval = -ENOMEM;
- goto out;
- }
- ei_status.rmem_start = dev->mem_start + TX_PAGES/2*256;
- dev->mem_end = ei_status.rmem_end
- = dev->mem_start + (HP_STOP_PG - HP_START_PG)*256;
- }
-
- outw(Perf_Page, ioaddr + HP_PAGING);
- NS8390p_init(dev, 0);
- /* Leave the 8390 and HP chip reset. */
- outw(inw(ioaddr + HPP_OPTION) & ~EnableIRQ, ioaddr + HPP_OPTION);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- iounmap(ei_status.mem);
-out:
- release_region(ioaddr, HP_IO_EXTENT);
- return retval;
-}
-
-static int
-hpp_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg;
- int retval;
-
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, dev->name, dev))) {
- return retval;
- }
-
- /* Reset the 8390 and HP chip. */
- option_reg = inw(ioaddr + HPP_OPTION);
- outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
- udelay(5);
- /* Unreset the board and enable interrupts. */
- outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
-
- /* Set the wrap registers for programmed-I/O operation. */
- outw(HW_Page, ioaddr + HP_PAGING);
- outw((HP_START_PG + TX_PAGES/2) | ((HP_STOP_PG - 1) << 8), ioaddr + 14);
-
- /* Select the operational page. */
- outw(Perf_Page, ioaddr + HP_PAGING);
-
- return eip_open(dev);
-}
-
-static int
-hpp_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- free_irq(dev->irq, dev);
- eip_close(dev);
- outw((option_reg & ~EnableIRQ) | MemDisable | NICReset | ChipReset,
- ioaddr + HPP_OPTION);
-
- return 0;
-}
-
-static void
-hpp_reset_8390(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
-
- outw(option_reg & ~(NICReset + ChipReset), ioaddr + HPP_OPTION);
- /* Pause a few cycles for the hardware reset to take place. */
- udelay(5);
- ei_status.txing = 0;
- outw(option_reg | (EnableIRQ + NICReset + ChipReset), ioaddr + HPP_OPTION);
-
- udelay(5);
-
-
- if ((inb_p(ioaddr+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
- printk("%s: hp_reset_8390() did not complete.\n", dev->name);
-
- if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
-}
-
-/* The programmed-I/O version of reading the 4 byte 8390 specific header.
- Note that transfer with the EtherTwist+ must be on word boundaries. */
-
-static void
-hpp_io_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
-
- outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
- insw(ioaddr + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. */
-
-static void
-hpp_io_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- char *buf = skb->data;
-
- outw(ring_offset, ioaddr + HPP_IN_ADDR);
- insw(ioaddr + HP_DATAPORT, buf, count>>1);
- if (count & 0x01)
- buf[count-1] = inw(ioaddr + HP_DATAPORT);
-}
-
-/* The corresponding shared memory versions of the above 2 functions. */
-
-static void
-hpp_mem_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw((ring_page<<8), ioaddr + HPP_IN_ADDR);
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
- memcpy_fromio(hdr, ei_status.mem, sizeof(struct e8390_pkt_hdr));
- outw(option_reg, ioaddr + HPP_OPTION);
- hdr->count = (le16_to_cpu(hdr->count) + 3) & ~3; /* Round up allocation. */
-}
-
-static void
-hpp_mem_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw(ring_offset, ioaddr + HPP_IN_ADDR);
-
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
-
- /* Caution: this relies on get_8390_hdr() rounding up count!
- Also note that we *can't* use eth_io_copy_and_sum() because
- it will not always copy "count" bytes (e.g. padded IP). */
-
- memcpy_fromio(skb->data, ei_status.mem, count);
- outw(option_reg, ioaddr + HPP_OPTION);
-}
-
-/* A special note: we *must* always transfer >=16 bit words.
- It's always safe to round up, so we do. */
-static void
-hpp_io_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
- outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
-}
-
-static void
-hpp_mem_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int ioaddr = dev->base_addr - NIC_OFFSET;
- int option_reg = inw(ioaddr + HPP_OPTION);
-
- outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
- outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
- memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
- outw(option_reg, ioaddr + HPP_OPTION);
-}
-
-
-#ifdef MODULE
-#define MAX_HPP_CARDS 4 /* Max number of HPP cards per module */
-static struct net_device *dev_hpp[MAX_HPP_CARDS];
-static int io[MAX_HPP_CARDS];
-static int irq[MAX_HPP_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O port address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s); ignored if properly detected");
-MODULE_DESCRIPTION("HP PC-LAN+ ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "hp-plus.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (do_hpp_probe(dev) == 0) {
- dev_hpp[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "hp-plus.c: No HP-Plus card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- /* NB: hpp_close() handles free_irq */
- iounmap(ei_status.mem);
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_HPP_CARDS; this_dev++) {
- struct net_device *dev = dev_hpp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
deleted file mode 100644
index 37fa89aa457..00000000000
--- a/drivers/net/ethernet/8390/hp.c
+++ /dev/null
@@ -1,438 +0,0 @@
-/* hp.c: A HP LAN ethernet driver for linux. */
-/*
- Written 1993-94 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is a driver for the HP PC-LAN adaptors.
-
- Sources:
- The Crynwr packet driver.
-*/
-
-static const char version[] =
- "hp.c:v1.10 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "hp"
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int hppclan_portlist[] __initdata =
-{ 0x300, 0x320, 0x340, 0x280, 0x2C0, 0x200, 0x240, 0};
-
-#define HP_IO_EXTENT 32
-
-#define HP_DATAPORT 0x0c /* "Remote DMA" data port. */
-#define HP_ID 0x07
-#define HP_CONFIGURE 0x08 /* Configuration register. */
-#define HP_RUN 0x01 /* 1 == Run, 0 == reset. */
-#define HP_IRQ 0x0E /* Mask for software-configured IRQ line. */
-#define HP_DATAON 0x10 /* Turn on dataport */
-#define NIC_OFFSET 0x10 /* Offset the 8390 registers. */
-
-#define HP_START_PG 0x00 /* First page of TX buffer */
-#define HP_8BSTOP_PG 0x80 /* Last page +1 of RX ring */
-#define HP_16BSTOP_PG 0xFF /* Same, for 16 bit cards. */
-
-static int hp_probe1(struct net_device *dev, int ioaddr);
-
-static void hp_reset_8390(struct net_device *dev);
-static void hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void hp_block_input(struct net_device *dev, int count,
- struct sk_buff *skb , int ring_offset);
-static void hp_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page);
-
-static void hp_init_card(struct net_device *dev);
-
-/* The map from IRQ number to HP_CONFIGURE register setting. */
-/* My default is IRQ5 0 1 2 3 4 5 6 7 8 9 10 11 */
-static char irqmap[16] __initdata= { 0, 0, 4, 6, 8,10, 0,14, 0, 4, 2,12,0,0,0,0};
-
-
-/* Probe for an HP LAN adaptor.
- Also initialize the card and fill in STATION_ADDR with the station
- address. */
-
-static int __init do_hp_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return hp_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; hppclan_portlist[i]; i++) {
- if (hp_probe1(dev, hppclan_portlist[i]) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init hp_probe(int unit)
-{
- struct net_device *dev = alloc_eip_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_hp_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init hp_probe1(struct net_device *dev, int ioaddr)
-{
- int i, retval, board_id, wordmode;
- const char *name;
- static unsigned version_printed;
-
- if (!request_region(ioaddr, HP_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Check for the HP physical address, 08 00 09 xx xx xx. */
- /* This really isn't good enough: we may pick up HP LANCE boards
- also! Avoid the lance 0x5757 signature. */
- if (inb(ioaddr) != 0x08
- || inb(ioaddr+1) != 0x00
- || inb(ioaddr+2) != 0x09
- || inb(ioaddr+14) == 0x57) {
- retval = -ENODEV;
- goto out;
- }
-
- /* Set up the parameters based on the board ID.
- If you have additional mappings, please mail them to me -djb. */
- if ((board_id = inb(ioaddr + HP_ID)) & 0x80) {
- name = "HP27247";
- wordmode = 1;
- } else {
- name = "HP27250";
- wordmode = 0;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
-
- for(i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
-
- printk(" %pM", dev->dev_addr);
-
- /* Snarf the interrupt now. Someday this could be moved to open(). */
- if (dev->irq < 2) {
- static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
- static const int irq_8list[] = { 7, 5, 3, 4, 9, 0};
- const int *irqp = wordmode ? irq_16list : irq_8list;
- do {
- int irq = *irqp;
- if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
- unsigned long cookie = probe_irq_on();
- /* Twinkle the interrupt, and check if it's seen. */
- outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
- outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
- if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */
- && request_irq (irq, eip_interrupt, 0, DRV_NAME, dev) == 0) {
- printk(" selecting IRQ %d.\n", irq);
- dev->irq = *irqp;
- break;
- }
- }
- } while (*++irqp);
- if (*irqp == 0) {
- printk(" no free IRQ lines.\n");
- retval = -EBUSY;
- goto out;
- }
- } else {
- if (dev->irq == 2)
- dev->irq = 9;
- if ((retval = request_irq(dev->irq, eip_interrupt, 0, DRV_NAME, dev))) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- goto out;
- }
- }
-
- /* Set the base address to point to the NIC, not the "real" base! */
- dev->base_addr = ioaddr + NIC_OFFSET;
- dev->netdev_ops = &eip_netdev_ops;
-
- ei_status.name = name;
- ei_status.word16 = wordmode;
- ei_status.tx_start_page = HP_START_PG;
- ei_status.rx_start_page = HP_START_PG + TX_PAGES;
- ei_status.stop_page = wordmode ? HP_16BSTOP_PG : HP_8BSTOP_PG;
-
- ei_status.reset_8390 = hp_reset_8390;
- ei_status.get_8390_hdr = hp_get_8390_hdr;
- ei_status.block_input = hp_block_input;
- ei_status.block_output = hp_block_output;
- hp_init_card(dev);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, HP_IO_EXTENT);
- return retval;
-}
-
-static void
-hp_reset_8390(struct net_device *dev)
-{
- int hp_base = dev->base_addr - NIC_OFFSET;
- int saved_config = inb_p(hp_base + HP_CONFIGURE);
-
- if (ei_debug > 1) printk("resetting the 8390 time=%ld...", jiffies);
- outb_p(0x00, hp_base + HP_CONFIGURE);
- ei_status.txing = 0;
- /* Pause just a few cycles for the hardware reset to take place. */
- udelay(5);
-
- outb_p(saved_config, hp_base + HP_CONFIGURE);
- udelay(5);
-
- if ((inb_p(hp_base+NIC_OFFSET+EN0_ISR) & ENISR_RESET) == 0)
- printk("%s: hp_reset_8390() did not complete.\n", dev->name);
-
- if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
-}
-
-static void
-hp_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
- outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
- outb_p(0, nic_base + EN0_RCNTHI);
- outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
- outb_p(ring_page, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base);
-
- if (ei_status.word16)
- insw(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
- else
- insb(nic_base - NIC_OFFSET + HP_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-/* Block input and output, similar to the Crynwr packet driver. If you are
- porting to a new ethercard look at the packet driver source for hints.
- The HP LAN doesn't use shared memory -- we put the packet
- out through the "remote DMA" dataport. */
-
-static void
-hp_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
- int xfer_count = count;
- char *buf = skb->data;
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base);
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(ring_offset & 0xff, nic_base + EN0_RSARLO);
- outb_p(ring_offset >> 8, nic_base + EN0_RSARHI);
- outb_p(E8390_RREAD+E8390_START, nic_base);
- if (ei_status.word16) {
- insw(nic_base - NIC_OFFSET + HP_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base - NIC_OFFSET + HP_DATAPORT), xfer_count++;
- } else {
- insb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
- }
- /* This is for the ALPHA version only, remove for later releases. */
- if (ei_debug > 0) { /* DMA termination address check... */
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- int addr = (high << 8) + low;
- /* Check only the lower 8 bits so we can ignore ring wrap. */
- if (((ring_offset + xfer_count) & 0xff) != (addr & 0xff))
- printk("%s: RX transfer address mismatch, %#4.4x vs. %#4.4x (actual).\n",
- dev->name, ring_offset + xfer_count, addr);
- }
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-static void
-hp_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- int nic_base = dev->base_addr;
- int saved_config = inb_p(nic_base - NIC_OFFSET + HP_CONFIGURE);
-
- outb_p(saved_config | HP_DATAON, nic_base - NIC_OFFSET + HP_CONFIGURE);
- /* Round the count up for word writes. Do we need to do this?
- What effect will an odd byte count have on the 8390?
- I should check someday. */
- if (ei_status.word16 && (count & 0x01))
- count++;
- /* We should already be in page 0, but to be safe... */
- outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base);
-
-#ifdef NE8390_RW_BUGFIX
- /* Handle the read-before-write bug the same way as the
- Crynwr packet driver -- the NatSemi method doesn't work. */
- outb_p(0x42, nic_base + EN0_RCNTLO);
- outb_p(0, nic_base + EN0_RCNTHI);
- outb_p(0xff, nic_base + EN0_RSARLO);
- outb_p(0x00, nic_base + EN0_RSARHI);
-#define NE_CMD 0x00
- outb_p(E8390_RREAD+E8390_START, nic_base + NE_CMD);
- /* Make certain that the dummy read has occurred. */
- inb_p(0x61);
- inb_p(0x61);
-#endif
-
- outb_p(count & 0xff, nic_base + EN0_RCNTLO);
- outb_p(count >> 8, nic_base + EN0_RCNTHI);
- outb_p(0x00, nic_base + EN0_RSARLO);
- outb_p(start_page, nic_base + EN0_RSARHI);
-
- outb_p(E8390_RWRITE+E8390_START, nic_base);
- if (ei_status.word16) {
- /* Use the 'rep' sequence for 16 bit boards. */
- outsw(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count>>1);
- } else {
- outsb(nic_base - NIC_OFFSET + HP_DATAPORT, buf, count);
- }
-
- /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here -- it's broken! */
-
- /* This is for the ALPHA version only, remove for later releases. */
- if (ei_debug > 0) { /* DMA termination address check... */
- int high = inb_p(nic_base + EN0_RSARHI);
- int low = inb_p(nic_base + EN0_RSARLO);
- int addr = (high << 8) + low;
- if ((start_page << 8) + count != addr)
- printk("%s: TX Transfer address mismatch, %#4.4x vs. %#4.4x.\n",
- dev->name, (start_page << 8) + count, addr);
- }
- outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
-}
-
-/* This function resets the ethercard if something screws up. */
-static void __init
-hp_init_card(struct net_device *dev)
-{
- int irq = dev->irq;
- NS8390p_init(dev, 0);
- outb_p(irqmap[irq&0x0f] | HP_RUN,
- dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
-}
-
-#ifdef MODULE
-#define MAX_HP_CARDS 4 /* Max number of HP cards per module */
-static struct net_device *dev_hp[MAX_HP_CARDS];
-static int io[MAX_HP_CARDS];
-static int irq[MAX_HP_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
-MODULE_DESCRIPTION("HP PC-LAN ISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that only a single autoprobe takes place per call.
-ISA device autoprobes on a running machine are not recommended. */
-int __init
-init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
- if (io[this_dev] == 0) {
- if (this_dev != 0) break; /* only autoprobe 1st one */
- printk(KERN_NOTICE "hp.c: Presently autoprobing (not recommended) for a single card.\n");
- }
- dev = alloc_eip_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (do_hp_probe(dev) == 0) {
- dev_hp[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "hp.c: No HP card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr - NIC_OFFSET, HP_IO_EXTENT);
-}
-
-void __exit
-cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_HP_CARDS; this_dev++) {
- struct net_device *dev = dev_hp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
deleted file mode 100644
index 479409bf2e3..00000000000
--- a/drivers/net/ethernet/8390/lne390.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- lne390.c
-
- Linux driver for Mylex LNE390 EISA Network Adapter
-
- Copyright (C) 1996-1998, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) Based upon framework of es3210 driver.
- 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
- 3) Russ Nelson's asm packet driver provided additional info.
- 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files.
-
- The LNE390 is an EISA shared memory NS8390 implementation. Note
- that all memory copies to/from the board must be 32bit transfers.
- There are two versions of the card: the lne390a and the lne390b.
- Going by the EISA cfg files, the "a" has jumpers to select between
- BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU.
- The shared memory address selection is also slightly different.
- Note that shared memory address > 1MB are supported with this driver.
-
- You can try <http://www.mylex.com> if you want more info, as I've
- never even seen one of these cards. :)
-
- Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/01
- - get rid of check_region
- - no need to check if dev == NULL in lne390_probe1
-*/
-
-static const char *version =
- "lne390.c: Driver revision v0.99.1, 01/09/2000\n";
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "lne390"
-
-static int lne390_probe1(struct net_device *dev, int ioaddr);
-
-static void lne390_reset_8390(struct net_device *dev);
-
-static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
-
-#define LNE390_START_PG 0x00 /* First page of TX buffer */
-#define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-#define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */
-#define LNE390_IO_EXTENT 0x20
-#define LNE390_SA_PROM 0x16 /* Start of e'net addr. */
-#define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */
-#define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
-
-#define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */
-#define LNE390_ADDR1 0x80
-#define LNE390_ADDR2 0xe5
-
-#define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */
-#define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */
-
-#define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
-#define LNE390_CFG2 0xc90
-
-/*
- * You can OR any of the following bits together and assign it
- * to LNE390_DEBUG to get verbose driver info during operation.
- * Currently only the probe one is implemented.
- */
-
-#define LNE390_D_PROBE 0x01
-#define LNE390_D_RX_PKT 0x02
-#define LNE390_D_TX_PKT 0x04
-#define LNE390_D_IRQ 0x08
-
-#define LNE390_DEBUG 0
-
-static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
-static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0};
-
-/*
- * Probe for the card. The best way is to read the EISA ID if it
- * is known. Then we can check the prefix of the station address
- * PROM for a match against the value assigned to Mylex.
- */
-
-static int __init do_lne390_probe(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- int irq = dev->irq;
- int mem_start = dev->mem_start;
- int ret;
-
- if (ioaddr > 0x1ff) { /* Check a single specified location. */
- if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
- return -EBUSY;
- ret = lne390_probe1(dev, ioaddr);
- if (ret)
- release_region(ioaddr, LNE390_IO_EXTENT);
- return ret;
- }
- else if (ioaddr > 0) /* Don't probe at all. */
- return -ENXIO;
-
- if (!EISA_bus) {
-#if LNE390_DEBUG & LNE390_D_PROBE
- printk("lne390-debug: Not an EISA bus. Not probing high ports.\n");
-#endif
- return -ENXIO;
- }
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
- if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME))
- continue;
- if (lne390_probe1(dev, ioaddr) == 0)
- return 0;
- release_region(ioaddr, LNE390_IO_EXTENT);
- dev->irq = irq;
- dev->mem_start = mem_start;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init lne390_probe(int unit)
-{
- struct net_device *dev = alloc_ei_netdev();
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_lne390_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static int __init lne390_probe1(struct net_device *dev, int ioaddr)
-{
- int i, revision, ret;
- unsigned long eisa_id;
-
- if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV;
-
-#if LNE390_DEBUG & LNE390_D_PROBE
- printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT));
- printk("lne390-debug: config regs: %#x %#x\n",
- inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2));
-#endif
-
-
-/* Check the EISA ID of the card. */
- eisa_id = inl(ioaddr + LNE390_ID_PORT);
- if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) {
- return -ENODEV;
- }
-
- revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */
-
-#if 0
-/* Check the Mylex vendor ID as well. Not really required. */
- if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0
- || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
- || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
- printk("lne390.c: card not found");
- for (i = 0; i < ETH_ALEN; i++)
- printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
- printk(" (invalid prefix).\n");
- return -ENODEV;
- }
-#endif
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
- printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
- 0xa+revision, ioaddr/0x1000, dev->dev_addr);
-
- printk("lne390.c: ");
-
- /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
- if (dev->irq == 0) {
- unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3;
- dev->irq = irq_map[irq_reg & 0x07];
- printk("using");
- } else {
- /* This is useless unless we reprogram the card here too */
- if (dev->irq == 2) dev->irq = 9; /* Doh! */
- printk("assigning");
- }
- printk(" IRQ %d,", dev->irq);
-
- if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- return ret;
- }
-
- if (dev->mem_start == 0) {
- unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07;
-
- if (revision) /* LNE390B */
- dev->mem_start = shmem_mapB[mem_reg] * 0x10000;
- else /* LNE390A */
- dev->mem_start = shmem_mapA[mem_reg] * 0x10000;
- printk(" using ");
- } else {
- /* Should check for value in shmem_map and reprogram the card to use it */
- dev->mem_start &= 0xfff0000;
- printk(" assigning ");
- }
-
- printk("%dkB memory at physical address %#lx\n",
- LNE390_STOP_PG/4, dev->mem_start);
-
- /*
- BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- the card mem within the region covered by `normal' RAM !!!
-
- ioremap() will fail in that case.
- */
- ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n");
- printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n");
- printk(KERN_ERR "lne390.c: Driver NOT installed.\n");
- ret = -EAGAIN;
- goto cleanup;
- }
- printk("lne390.c: remapped %dkB card memory to virtual address %p\n",
- LNE390_STOP_PG/4, ei_status.mem);
-
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256;
-
- /* The 8390 offset is zero for the LNE390 */
- dev->base_addr = ioaddr;
-
- ei_status.name = "LNE390";
- ei_status.tx_start_page = LNE390_START_PG;
- ei_status.rx_start_page = LNE390_START_PG + TX_PAGES;
- ei_status.stop_page = LNE390_STOP_PG;
- ei_status.word16 = 1;
-
- if (ei_debug > 0)
- printk(version);
-
- ei_status.reset_8390 = &lne390_reset_8390;
- ei_status.block_input = &lne390_block_input;
- ei_status.block_output = &lne390_block_output;
- ei_status.get_8390_hdr = &lne390_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
- NS8390_init(dev, 0);
-
- ret = register_netdev(dev);
- if (ret)
- goto unmap;
- return 0;
-unmap:
- if (ei_status.reg0)
- iounmap(ei_status.mem);
-cleanup:
- free_irq(dev->irq, dev);
- return ret;
-}
-
-/*
- * Reset as per the packet driver method. Judging by the EISA cfg
- * file, this just toggles the "Board Enable" bits (bit 2 and 0).
- */
-
-static void lne390_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
-
- outb(0x04, ioaddr + LNE390_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name);
-
- mdelay(2);
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + LNE390_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via lne390_get_8390_hdr() above.
- */
-
-static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8);
-
- if (ring_offset + count > (LNE390_STOP_PG<<8)) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = (LNE390_STOP_PG<<8) - ring_offset;
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + (TX_PAGES<<8), count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void lne390_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-
-#ifdef MODULE
-#define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */
-static struct net_device *dev_lne[MAX_LNE_CARDS];
-static int io[MAX_LNE_CARDS];
-static int irq[MAX_LNE_CARDS];
-static int mem[MAX_LNE_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_PARM_DESC(mem, "memory base address(es)");
-MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
- if (io[this_dev] == 0 && this_dev != 0)
- break;
- dev = alloc_ei_netdev();
- if (!dev)
- break;
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- dev->mem_start = mem[this_dev];
- if (do_lne390_probe(dev) == 0) {
- dev_lne[found++] = dev;
- continue;
- }
- free_netdev(dev);
- printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, LNE390_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) {
- struct net_device *dev = dev_lne[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index c0c127913de..587a885de25 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -374,7 +374,6 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
NS8390_init(dev, 0);
memcpy(dev->dev_addr, SA_prom, dev->addr_len);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
i = register_netdev(dev);
if (i)
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
deleted file mode 100644
index ebcdb52ec73..00000000000
--- a/drivers/net/ethernet/8390/ne3210.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- ne3210.c
-
- Linux driver for Novell NE3210 EISA Network Adapter
-
- Copyright (C) 1998, Paul Gortmaker.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- Information and Code Sources:
-
- 1) Based upon my other EISA 8390 drivers (lne390, es3210, smc-ultra32)
- 2) The existing myriad of other Linux 8390 drivers by Donald Becker.
- 3) Info for getting IRQ and sh-mem gleaned from the EISA cfg file
-
- The NE3210 is an EISA shared memory NS8390 implementation. Shared
- memory address > 1MB should work with this driver.
-
- Note that the .cfg file (3/11/93, v1.0) has AUI and BNC switched
- around (or perhaps there are some defective/backwards cards ???)
-
- This driver WILL NOT WORK FOR THE NE3200 - it is completely different
- and does not use an 8390 at all.
-
- Updated to EISA probing API 5/2003 by Marc Zyngier.
-*/
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/mm.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "ne3210"
-
-static void ne3210_reset_8390(struct net_device *dev);
-
-static void ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page);
-static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset);
-static void ne3210_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page);
-
-#define NE3210_START_PG 0x00 /* First page of TX buffer */
-#define NE3210_STOP_PG 0x80 /* Last page +1 of RX ring */
-
-#define NE3210_IO_EXTENT 0x20
-#define NE3210_SA_PROM 0x16 /* Start of e'net addr. */
-#define NE3210_RESET_PORT 0xc84
-#define NE3210_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */
-
-#define NE3210_ADDR0 0x00 /* 3 byte vendor prefix */
-#define NE3210_ADDR1 0x00
-#define NE3210_ADDR2 0x1b
-
-#define NE3210_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */
-#define NE3210_CFG2 0xc90
-#define NE3210_CFG_EXTENT (NE3210_CFG2 - NE3210_CFG1 + 1)
-
-/*
- * You can OR any of the following bits together and assign it
- * to NE3210_DEBUG to get verbose driver info during operation.
- * Currently only the probe one is implemented.
- */
-
-#define NE3210_D_PROBE 0x01
-#define NE3210_D_RX_PKT 0x02
-#define NE3210_D_TX_PKT 0x04
-#define NE3210_D_IRQ 0x08
-
-#define NE3210_DEBUG 0x0
-
-static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
-static const char * const ifmap[] __initconst = {"UTP", "?", "BNC", "AUI"};
-static int ifmap_val[] __initdata = {
- IF_PORT_10BASET,
- IF_PORT_UNKNOWN,
- IF_PORT_10BASE2,
- IF_PORT_AUI,
-};
-
-static int __init ne3210_eisa_probe (struct device *device)
-{
- unsigned long ioaddr, phys_mem;
- int i, retval, port_index;
- struct eisa_device *edev = to_eisa_device (device);
- struct net_device *dev;
-
- /* Allocate dev->priv and fill in 8390 specific dev fields. */
- if (!(dev = alloc_ei_netdev ())) {
- printk ("ne3210.c: unable to allocate memory for dev!\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(dev, device);
- dev_set_drvdata(device, dev);
- ioaddr = edev->base_addr;
-
- if (!request_region(ioaddr, NE3210_IO_EXTENT, DRV_NAME)) {
- retval = -EBUSY;
- goto out;
- }
-
- if (!request_region(ioaddr + NE3210_CFG1,
- NE3210_CFG_EXTENT, DRV_NAME)) {
- retval = -EBUSY;
- goto out1;
- }
-
-#if NE3210_DEBUG & NE3210_D_PROBE
- printk("ne3210-debug: probe at %#x, ID %s\n", ioaddr, edev->id.sig);
- printk("ne3210-debug: config regs: %#x %#x\n",
- inb(ioaddr + NE3210_CFG1), inb(ioaddr + NE3210_CFG2));
-#endif
-
- port_index = inb(ioaddr + NE3210_CFG2) >> 6;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
- printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
- edev->slot, ifmap[port_index], dev->dev_addr);
-
- /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */
- dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
- printk("ne3210.c: using IRQ %d, ", dev->irq);
-
- retval = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev);
- if (retval) {
- printk (" unable to get IRQ %d.\n", dev->irq);
- goto out2;
- }
-
- phys_mem = shmem_map[inb(ioaddr + NE3210_CFG2) & 0x07] * 0x1000;
-
- /*
- BEWARE!! Some dain-bramaged EISA SCUs will allow you to put
- the card mem within the region covered by `normal' RAM !!!
- */
- if (phys_mem > 1024*1024) { /* phys addr > 1MB */
- if (phys_mem < virt_to_phys(high_memory)) {
- printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
- printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
- printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
- (u64)virt_to_phys(high_memory));
- printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
- retval = -EINVAL;
- goto out3;
- }
- }
-
- if (!request_mem_region (phys_mem, NE3210_STOP_PG*0x100, DRV_NAME)) {
- printk ("ne3210.c: Unable to request shared memory at physical address %#lx\n",
- phys_mem);
- goto out3;
- }
-
- printk("%dkB memory at physical address %#lx\n",
- NE3210_STOP_PG/4, phys_mem);
-
- ei_status.mem = ioremap(phys_mem, NE3210_STOP_PG*0x100);
- if (!ei_status.mem) {
- printk(KERN_ERR "ne3210.c: Unable to remap card memory !!\n");
- printk(KERN_ERR "ne3210.c: Driver NOT installed.\n");
- retval = -EAGAIN;
- goto out4;
- }
- printk("ne3210.c: remapped %dkB card memory to virtual address %p\n",
- NE3210_STOP_PG/4, ei_status.mem);
- dev->mem_start = (unsigned long)ei_status.mem;
- dev->mem_end = dev->mem_start + (NE3210_STOP_PG - NE3210_START_PG)*256;
-
- /* The 8390 offset is zero for the NE3210 */
- dev->base_addr = ioaddr;
-
- ei_status.name = "NE3210";
- ei_status.tx_start_page = NE3210_START_PG;
- ei_status.rx_start_page = NE3210_START_PG + TX_PAGES;
- ei_status.stop_page = NE3210_STOP_PG;
- ei_status.word16 = 1;
- ei_status.priv = phys_mem;
-
- if (ei_debug > 0)
- printk("ne3210 loaded.\n");
-
- ei_status.reset_8390 = &ne3210_reset_8390;
- ei_status.block_input = &ne3210_block_input;
- ei_status.block_output = &ne3210_block_output;
- ei_status.get_8390_hdr = &ne3210_get_8390_hdr;
-
- dev->netdev_ops = &ei_netdev_ops;
-
- dev->if_port = ifmap_val[port_index];
-
- if ((retval = register_netdev (dev)))
- goto out5;
-
- NS8390_init(dev, 0);
- return 0;
-
- out5:
- iounmap(ei_status.mem);
- out4:
- release_mem_region (phys_mem, NE3210_STOP_PG*0x100);
- out3:
- free_irq (dev->irq, dev);
- out2:
- release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
- out1:
- release_region (ioaddr, NE3210_IO_EXTENT);
- out:
- free_netdev (dev);
-
- return retval;
-}
-
-static int ne3210_eisa_remove(struct device *device)
-{
- struct net_device *dev = dev_get_drvdata(device);
- unsigned long ioaddr = to_eisa_device (device)->base_addr;
-
- unregister_netdev (dev);
- iounmap(ei_status.mem);
- release_mem_region (ei_status.priv, NE3210_STOP_PG*0x100);
- free_irq (dev->irq, dev);
- release_region (ioaddr + NE3210_CFG1, NE3210_CFG_EXTENT);
- release_region (ioaddr, NE3210_IO_EXTENT);
- free_netdev (dev);
-
- return 0;
-}
-
-/*
- * Reset by toggling the "Board Enable" bits (bit 2 and 0).
- */
-
-static void ne3210_reset_8390(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
-
- outb(0x04, ioaddr + NE3210_RESET_PORT);
- if (ei_debug > 1) printk("%s: resetting the NE3210...", dev->name);
-
- mdelay(2);
-
- ei_status.txing = 0;
- outb(0x01, ioaddr + NE3210_RESET_PORT);
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/*
- * Note: In the following three functions is the implicit assumption
- * that the associated memcpy will only use "rep; movsl" as long as
- * we keep the counts as some multiple of doublewords. This is a
- * requirement of the hardware, and also prevents us from using
- * eth_io_copy_and_sum() since we can't guarantee it will limit
- * itself to doubleword access.
- */
-
-/*
- * Grab the 8390 specific header. Similar to the block_input routine, but
- * we don't need to be concerned with ring wrap as the header will be at
- * the start of a page, so we optimize accordingly. (A single doubleword.)
- */
-
-static void
-ne3210_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page - NE3210_START_PG)<<8);
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */
-}
-
-/*
- * Block input and output are easy on shared memory ethercards, the only
- * complication is when the ring buffer wraps. The count will already
- * be rounded up to a doubleword value via ne3210_get_8390_hdr() above.
- */
-
-static void ne3210_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *start = ei_status.mem + ring_offset - NE3210_START_PG*256;
-
- if (ring_offset + count > NE3210_STOP_PG*256) {
- /* Packet wraps over end of ring buffer. */
- int semi_count = NE3210_STOP_PG*256 - ring_offset;
- memcpy_fromio(skb->data, start, semi_count);
- count -= semi_count;
- memcpy_fromio(skb->data + semi_count,
- ei_status.mem + TX_PAGES*256, count);
- } else {
- /* Packet is in one chunk. */
- memcpy_fromio(skb->data, start, count);
- }
-}
-
-static void ne3210_block_output(struct net_device *dev, int count,
- const unsigned char *buf, int start_page)
-{
- void __iomem *shmem = ei_status.mem + ((start_page - NE3210_START_PG)<<8);
-
- count = (count + 3) & ~3; /* Round up to doubleword */
- memcpy_toio(shmem, buf, count);
-}
-
-static struct eisa_device_id ne3210_ids[] = {
- { "EGL0101" },
- { "NVL1801" },
- { "" },
-};
-MODULE_DEVICE_TABLE(eisa, ne3210_ids);
-
-static struct eisa_driver ne3210_eisa_driver = {
- .id_table = ne3210_ids,
- .driver = {
- .name = "ne3210",
- .probe = ne3210_eisa_probe,
- .remove = ne3210_eisa_remove,
- },
-};
-
-MODULE_DESCRIPTION("NE3210 EISA Ethernet driver");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(eisa, ne3210_ids);
-
-static int ne3210_init(void)
-{
- return eisa_driver_register (&ne3210_eisa_driver);
-}
-
-static void ne3210_cleanup(void)
-{
- eisa_driver_unregister (&ne3210_eisa_driver);
-}
-
-module_init (ne3210_init);
-module_exit (ne3210_cleanup);
diff --git a/drivers/net/ethernet/8390/smc-ultra32.c b/drivers/net/ethernet/8390/smc-ultra32.c
deleted file mode 100644
index 923e42aedcf..00000000000
--- a/drivers/net/ethernet/8390/smc-ultra32.c
+++ /dev/null
@@ -1,463 +0,0 @@
-/* smc-ultra32.c: An SMC Ultra32 EISA ethernet driver for linux.
-
-Sources:
-
- This driver is based on (cloned from) the ISA SMC Ultra driver
- written by Donald Becker. Modifications to support the EISA
- version of the card by Paul Gortmaker and Leonard N. Zubkoff.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
-Theory of Operation:
-
- The SMC Ultra32C card uses the SMC 83c790 chip which is also
- found on the ISA SMC Ultra cards. It has a shared memory mode of
- operation that makes it similar to the ISA version of the card.
- The main difference is that the EISA card has 32KB of RAM, but
- only an 8KB window into that memory. The EISA card also can be
- set for a bus-mastering mode of operation via the ECU, but that
- is not (and probably will never be) supported by this driver.
- The ECU should be run to enable shared memory and to disable the
- bus-mastering feature for use with linux.
-
- By programming the 8390 to use only 8KB RAM, the modifications
- to the ISA driver can be limited to the probe and initialization
- code. This allows easy integration of EISA support into the ISA
- driver. However, the driver development kit from SMC provided the
- register information for sliding the 8KB window, and hence the 8390
- is programmed to use the full 32KB RAM.
-
- Unfortunately this required code changes outside the probe/init
- routines, and thus we decided to separate the EISA driver from
- the ISA one. In this way, ISA users don't end up with a larger
- driver due to the EISA code, and EISA users don't end up with a
- larger driver due to the ISA EtherEZ PIO code. The driver is
- similar to the 3c503/16 driver, in that the window must be set
- back to the 1st 8KB of space for access to the two 8390 Tx slots.
-
- In testing, using only 8KB RAM (3 Tx / 5 Rx) didn't appear to
- be a limiting factor, since the EISA bus could get packets off
- the card fast enough, but having the use of lots of RAM as Rx
- space is extra insurance if interrupt latencies become excessive.
-
-*/
-
-static const char *version = "smc-ultra32.c: 06/97 v1.00\n";
-
-
-#include <linux/module.h>
-#include <linux/eisa.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-#include <asm/io.h>
-
-#include "8390.h"
-
-#define DRV_NAME "smc-ultra32"
-
-static int ultra32_probe1(struct net_device *dev, int ioaddr);
-static int ultra32_open(struct net_device *dev);
-static void ultra32_reset_8390(struct net_device *dev);
-static void ultra32_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page);
-static void ultra32_block_input(struct net_device *dev, int count,
- struct sk_buff *skb, int ring_offset);
-static void ultra32_block_output(struct net_device *dev, int count,
- const unsigned char *buf,
- const int start_page);
-static int ultra32_close(struct net_device *dev);
-
-#define ULTRA32_CMDREG 0 /* Offset to ASIC command register. */
-#define ULTRA32_RESET 0x80 /* Board reset, in ULTRA32_CMDREG. */
-#define ULTRA32_MEMENB 0x40 /* Enable the shared memory. */
-#define ULTRA32_NIC_OFFSET 16 /* NIC register offset from the base_addr. */
-#define ULTRA32_IO_EXTENT 32
-#define EN0_ERWCNT 0x08 /* Early receive warning count. */
-
-/*
- * Defines that apply only to the Ultra32 EISA card. Note that
- * "smc" = 10011 01101 00011 = 0x4da3, and hence !smc8010.cfg translates
- * into an EISA ID of 0x1080A34D
- */
-#define ULTRA32_BASE 0xca0
-#define ULTRA32_ID 0x1080a34d
-#define ULTRA32_IDPORT (-0x20) /* 0xc80 */
-/* Config regs 1->7 from the EISA !SMC8010.CFG file. */
-#define ULTRA32_CFG1 0x04 /* 0xca4 */
-#define ULTRA32_CFG2 0x05 /* 0xca5 */
-#define ULTRA32_CFG3 (-0x18) /* 0xc88 */
-#define ULTRA32_CFG4 (-0x17) /* 0xc89 */
-#define ULTRA32_CFG5 (-0x16) /* 0xc8a */
-#define ULTRA32_CFG6 (-0x15) /* 0xc8b */
-#define ULTRA32_CFG7 0x0d /* 0xcad */
-
-static void cleanup_card(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET;
- /* NB: ultra32_close_card() does free_irq */
- release_region(ioaddr, ULTRA32_IO_EXTENT);
- iounmap(ei_status.mem);
-}
-
-/* Probe for the Ultra32. This looks like a 8013 with the station
- address PROM at I/O ports <base>+8 to <base>+13, with a checksum
- following.
-*/
-
-struct net_device * __init ultra32_probe(int unit)
-{
- struct net_device *dev;
- int base;
- int irq;
- int err = -ENODEV;
-
- if (!EISA_bus)
- return ERR_PTR(-ENODEV);
-
- dev = alloc_ei_netdev();
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- irq = dev->irq;
-
- /* EISA spec allows for up to 16 slots, but 8 is typical. */
- for (base = 0x1000 + ULTRA32_BASE; base < 0x9000; base += 0x1000) {
- if (ultra32_probe1(dev, base) == 0)
- break;
- dev->irq = irq;
- }
- if (base >= 0x9000)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-
-static const struct net_device_ops ultra32_netdev_ops = {
- .ndo_open = ultra32_open,
- .ndo_stop = ultra32_close,
- .ndo_start_xmit = ei_start_xmit,
- .ndo_tx_timeout = ei_tx_timeout,
- .ndo_get_stats = ei_get_stats,
- .ndo_set_rx_mode = ei_set_multicast_list,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ei_poll,
-#endif
-};
-
-static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
-{
- int i, edge, media, retval;
- int checksum = 0;
- const char *model_name;
- static unsigned version_printed;
- /* Values from various config regs. */
- unsigned char idreg;
- unsigned char reg4;
- const char *ifmap[] = {"UTP No Link", "", "UTP/AUI", "UTP/BNC"};
-
- if (!request_region(ioaddr, ULTRA32_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- if (inb(ioaddr + ULTRA32_IDPORT) == 0xff ||
- inl(ioaddr + ULTRA32_IDPORT) != ULTRA32_ID) {
- retval = -ENODEV;
- goto out;
- }
-
- media = inb(ioaddr + ULTRA32_CFG7) & 0x03;
- edge = inb(ioaddr + ULTRA32_CFG5) & 0x08;
- printk("SMC Ultra32 in EISA Slot %d, Media: %s, %s IRQs.\n",
- ioaddr >> 12, ifmap[media],
- (edge ? "Edge Triggered" : "Level Sensitive"));
-
- idreg = inb(ioaddr + 7);
- reg4 = inb(ioaddr + 4) & 0x7f;
-
- /* Check the ID nibble. */
- if ((idreg & 0xf0) != 0x20) { /* SMC Ultra */
- retval = -ENODEV;
- goto out;
- }
-
- /* Select the station address register set. */
- outb(reg4, ioaddr + 4);
-
- for (i = 0; i < 8; i++)
- checksum += inb(ioaddr + 8 + i);
- if ((checksum & 0xff) != 0xff) {
- retval = -ENODEV;
- goto out;
- }
-
- if (ei_debug && version_printed++ == 0)
- printk(version);
-
- model_name = "SMC Ultra32";
-
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
-
- printk("%s: %s at 0x%X, %pM",
- dev->name, model_name, ioaddr, dev->dev_addr);
-
- /* Switch from the station address to the alternate register set and
- read the useful registers there. */
- outb(0x80 | reg4, ioaddr + 4);
-
- /* Enable FINE16 mode to avoid BIOS ROM width mismatches @ reboot. */
- outb(0x80 | inb(ioaddr + 0x0c), ioaddr + 0x0c);
-
- /* Reset RAM addr. */
- outb(0x00, ioaddr + 0x0b);
-
- /* Switch back to the station address register set so that the
- MS-DOS driver can find the card after a warm boot. */
- outb(reg4, ioaddr + 4);
-
- if ((inb(ioaddr + ULTRA32_CFG5) & 0x40) == 0) {
- printk("\nsmc-ultra32: Card RAM is disabled! "
- "Run EISA config utility.\n");
- retval = -ENODEV;
- goto out;
- }
- if ((inb(ioaddr + ULTRA32_CFG2) & 0x04) == 0)
- printk("\nsmc-ultra32: Ignoring Bus-Master enable bit. "
- "Run EISA config utility.\n");
-
- if (dev->irq < 2) {
- unsigned char irqmap[] = {0, 9, 3, 5, 7, 10, 11, 15};
- int irq = irqmap[inb(ioaddr + ULTRA32_CFG5) & 0x07];
- if (irq == 0) {
- printk(", failed to detect IRQ line.\n");
- retval = -EAGAIN;
- goto out;
- }
- dev->irq = irq;
- }
-
- /* The 8390 isn't at the base address, so fake the offset */
- dev->base_addr = ioaddr + ULTRA32_NIC_OFFSET;
-
- /* Save RAM address in the unused reg0 to avoid excess inb's. */
- ei_status.reg0 = inb(ioaddr + ULTRA32_CFG3) & 0xfc;
-
- dev->mem_start = 0xc0000 + ((ei_status.reg0 & 0x7c) << 11);
-
- ei_status.name = model_name;
- ei_status.word16 = 1;
- ei_status.tx_start_page = 0;
- ei_status.rx_start_page = TX_PAGES;
- /* All Ultra32 cards have 32KB memory with an 8KB window. */
- ei_status.stop_page = 128;
-
- ei_status.mem = ioremap(dev->mem_start, 0x2000);
- if (!ei_status.mem) {
- printk(", failed to ioremap.\n");
- retval = -ENOMEM;
- goto out;
- }
- dev->mem_end = dev->mem_start + 0x1fff;
-
- printk(", IRQ %d, 32KB memory, 8KB window at 0x%lx-0x%lx.\n",
- dev->irq, dev->mem_start, dev->mem_end);
- ei_status.block_input = &ultra32_block_input;
- ei_status.block_output = &ultra32_block_output;
- ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
- ei_status.reset_8390 = &ultra32_reset_8390;
-
- dev->netdev_ops = &ultra32_netdev_ops;
- NS8390_init(dev, 0);
-
- return 0;
-out:
- release_region(ioaddr, ULTRA32_IO_EXTENT);
- return retval;
-}
-
-static int ultra32_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
- int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : IRQF_SHARED;
- int retval;
-
- retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev);
- if (retval)
- return retval;
-
- outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
- outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
- outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
- outb(0x01, ioaddr + 6); /* Enable Interrupts. */
- /* Set the early receive warning level in window 0 high enough not
- to receive ERW interrupts. */
- outb_p(E8390_NODMA+E8390_PAGE0, dev->base_addr);
- outb(0xff, dev->base_addr + EN0_ERWCNT);
- ei_open(dev);
- return 0;
-}
-
-static int ultra32_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* CMDREG */
-
- netif_stop_queue(dev);
-
- if (ei_debug > 1)
- printk("%s: Shutting down ethercard.\n", dev->name);
-
- outb(0x00, ioaddr + ULTRA32_CFG6); /* Disable Interrupts. */
- outb(0x00, ioaddr + 6); /* Disable interrupts. */
- free_irq(dev->irq, dev);
-
- NS8390_init(dev, 0);
-
- return 0;
-}
-
-static void ultra32_reset_8390(struct net_device *dev)
-{
- int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC base addr */
-
- outb(ULTRA32_RESET, ioaddr);
- if (ei_debug > 1) printk("resetting Ultra32, t=%ld...", jiffies);
- ei_status.txing = 0;
-
- outb(ULTRA32_MEMENB, ioaddr); /* Enable Shared Memory. */
- outb(0x80, ioaddr + ULTRA32_CFG6); /* Enable Interrupts. */
- outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
- outb(0x01, ioaddr + 6); /* Enable Interrupts. */
- if (ei_debug > 1) printk("reset done\n");
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
- we don't need to be concerned with ring wrap as the header will be at
- the start of a page, so we optimize accordingly. */
-
-static void ultra32_get_8390_hdr(struct net_device *dev,
- struct e8390_pkt_hdr *hdr,
- int ring_page)
-{
- void __iomem *hdr_start = ei_status.mem + ((ring_page & 0x1f) << 8);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- /* Select correct 8KB Window. */
- outb(ei_status.reg0 | ((ring_page & 0x60) >> 5), RamReg);
-
-#ifdef __BIG_ENDIAN
- /* Officially this is what we are doing, but the readl() is faster */
- /* unfortunately it isn't endian aware of the struct */
- memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr));
- hdr->count = le16_to_cpu(hdr->count);
-#else
- ((unsigned int*)hdr)[0] = readl(hdr_start);
-#endif
-}
-
-/* Block input and output are easy on shared memory ethercards, the only
- complication is when the ring buffer wraps, or in this case, when a
- packet spans an 8KB boundary. Note that the current 8KB segment is
- already set by the get_8390_hdr routine. */
-
-static void ultra32_block_input(struct net_device *dev,
- int count,
- struct sk_buff *skb,
- int ring_offset)
-{
- void __iomem *xfer_start = ei_status.mem + (ring_offset & 0x1fff);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- if ((ring_offset & ~0x1fff) != ((ring_offset + count - 1) & ~0x1fff)) {
- int semi_count = 8192 - (ring_offset & 0x1FFF);
- memcpy_fromio(skb->data, xfer_start, semi_count);
- count -= semi_count;
- if (ring_offset < 96*256) {
- /* Select next 8KB Window. */
- ring_offset += semi_count;
- outb(ei_status.reg0 | ((ring_offset & 0x6000) >> 13), RamReg);
- memcpy_fromio(skb->data + semi_count, ei_status.mem, count);
- } else {
- /* Select first 8KB Window. */
- outb(ei_status.reg0, RamReg);
- memcpy_fromio(skb->data + semi_count, ei_status.mem + TX_PAGES * 256, count);
- }
- } else {
- memcpy_fromio(skb->data, xfer_start, count);
- }
-}
-
-static void ultra32_block_output(struct net_device *dev,
- int count,
- const unsigned char *buf,
- int start_page)
-{
- void __iomem *xfer_start = ei_status.mem + (start_page<<8);
- unsigned int RamReg = dev->base_addr - ULTRA32_NIC_OFFSET + ULTRA32_CFG3;
-
- /* Select first 8KB Window. */
- outb(ei_status.reg0, RamReg);
-
- memcpy_toio(xfer_start, buf, count);
-}
-
-#ifdef MODULE
-#define MAX_ULTRA32_CARDS 4 /* Max number of Ultra cards per module */
-static struct net_device *dev_ultra[MAX_ULTRA32_CARDS];
-
-MODULE_DESCRIPTION("SMC Ultra32 EISA ethernet driver");
-MODULE_LICENSE("GPL");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
- struct net_device *dev = ultra32_probe(-1);
- if (IS_ERR(dev))
- break;
- dev_ultra[found++] = dev;
- }
- if (found)
- return 0;
- printk(KERN_WARNING "smc-ultra32.c: No SMC Ultra32 found.\n");
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < MAX_ULTRA32_CARDS; this_dev++) {
- struct net_device *dev = dev_ultra[this_dev];
- if (dev) {
- unregister_netdev(dev);
- cleanup_card(dev);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
-
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e4ff3894911..ed956e08d38 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -135,7 +135,6 @@ config ETHOC
source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
-source "drivers/net/ethernet/racal/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index d4473072654..8268d85f944 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -53,7 +53,6 @@ obj-$(CONFIG_ETHOC) += ethoc.o
obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/
obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/
obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
-obj-$(CONFIG_NET_VENDOR_RACAL) += racal/
obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index c1fdb8be8be..a175d0be1ae 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -425,8 +425,8 @@ static int mii_probe(struct net_device *dev, int phy_mode)
return -EINVAL;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
- 0, phy_mode);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &bfin_mac_adjust_link, phy_mode);
if (IS_ERR(phydev)) {
netdev_err(dev, "could not attach PHY\n");
@@ -498,10 +498,10 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, dev_name(&dev->dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static void bfin_mac_ethtool_getwol(struct net_device *dev,
@@ -647,7 +647,6 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
setup_mac_addr(dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index aa53115bb38..0be2195e503 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1127,10 +1127,11 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strncpy(info->driver, dev_driver_string(greth->dev), 32);
- strncpy(info->version, "revision: 1.0", 32);
- strncpy(info->bus_info, greth->dev->bus->name, 32);
- strncpy(info->fw_version, "N/A", 32);
+ strlcpy(info->driver, dev_driver_string(greth->dev),
+ sizeof(info->driver));
+ strlcpy(info->version, "revision: 1.0", sizeof(info->version));
+ strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
info->eedump_len = 0;
info->regdump_len = sizeof(struct greth_regs);
}
@@ -1287,9 +1288,7 @@ static int greth_mdio_probe(struct net_device *dev)
}
ret = phy_connect_direct(dev, phy, &greth_link_change,
- 0, greth->gbit_mac ?
- PHY_INTERFACE_MODE_GMII :
- PHY_INTERFACE_MODE_MII);
+ greth->gbit_mac ? PHY_INTERFACE_MODE_GMII : PHY_INTERFACE_MODE_MII);
if (ret) {
if (netif_msg_ifup(greth))
dev_err(&dev->dev, "could not attach to PHY\n");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 8350f4b37a8..13d74aa4033 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
default y
depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
- (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -105,19 +105,6 @@ config DECLANCE
DEC (now Compaq) based on the AMD LANCE chipset, including the
DEPCA series. (This chipset is better known via the NE2100 cards.)
-config DEPCA
- tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
- depends on (ISA || EISA || MCA)
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto> as well as
- <file:drivers/net/ethernet/amd/depca.c>.
-
- To compile this driver as a module, choose M here. The module
- will be called depca.
-
config HPLANCE
bool "HP on-board LANCE support"
depends on DIO
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 175caa5328c..cdd4301a973 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_DECLANCE) += declance.o
-obj-$(CONFIG_DEPCA) += depca.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_LANCE) += lance.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 2ea221ed477..de774d41914 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -437,8 +437,8 @@ static int au1000_mii_probe(struct net_device *dev)
/* now we are supposed to have a proper phydev, to attach to... */
BUG_ON(phydev->attached_dev);
- phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -587,10 +587,10 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
+ aup->mac_id);
info->regdump_len = 0;
}
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
deleted file mode 100644
index 34a485363d5..00000000000
--- a/drivers/net/ethernet/amd/depca.c
+++ /dev/null
@@ -1,1910 +0,0 @@
-/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
-
- Written 1994, 1995 by David C. Davies.
-
-
- Copyright 1994 David C. Davies
- and
- United States Government
- (as represented by the Director, National Security Agency).
-
- Copyright 1995 Digital Equipment Corporation.
-
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- This driver is written for the Digital Equipment Corporation series
- of DEPCA and EtherWORKS ethernet cards:
-
- DEPCA (the original)
- DE100
- DE101
- DE200 Turbo
- DE201 Turbo
- DE202 Turbo (TP BNC)
- DE210
- DE422 (EISA)
-
- The driver has been tested on DE100, DE200 and DE202 cards in a
- relatively busy network. The DE422 has been tested a little.
-
- This driver will NOT work for the DE203, DE204 and DE205 series of
- cards, since they have a new custom ASIC in place of the AMD LANCE
- chip. See the 'ewrk3.c' driver in the Linux source tree for running
- those cards.
-
- I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
- a DECstation 5000/200.
-
- The author may be reached at davies@maniac.ultranet.com
-
- =========================================================================
-
- The driver was originally based on the 'lance.c' driver from Donald
- Becker which is included with the standard driver distribution for
- linux. V0.4 is a complete re-write with only the kernel interface
- remaining from the original code.
-
- 1) Lance.c code in /linux/drivers/net/
- 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
- AMD, 1992 [(800) 222-9323].
- 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
- AMD, Pub. #17881, May 1993.
- 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
- AMD, Pub. #16907, May 1992
- 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
- 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
- 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
- Digital Equipment Corporation, 1989
- 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
- Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
-
-
- Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
- driver.
-
- The original DEPCA card requires that the ethernet ROM address counter
- be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
- only done when a 0x08 is read as the first address octet (to minimise
- the chances of writing over some other hardware's I/O register). The
- NICSR accesses have been changed to byte accesses for all the cards
- supported by this driver, since there is only one useful bit in the MSB
- (remote boot timeout) and it is not used. Also, there is a maximum of
- only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
- help debugging all this (and holding my feet to the fire until I got it
- right).
-
- The DE200 series boards have on-board 64kB RAM for use as a shared
- memory network buffer. Only the DE100 cards make use of a 2kB buffer
- mode which has not been implemented in this driver (only the 32kB and
- 64kB modes are supported [16kB/48kB for the original DEPCA]).
-
- At the most only 2 DEPCA cards can be supported on the ISA bus because
- there is only provision for two I/O base addresses on each card (0x300
- and 0x200). The I/O address is detected by searching for a byte sequence
- in the Ethernet station address PROM at the expected I/O address for the
- Ethernet PROM. The shared memory base address is 'autoprobed' by
- looking for the self test PROM and detecting the card name. When a
- second DEPCA is detected, information is placed in the base_addr
- variable of the next device structure (which is created if necessary),
- thus enabling ethif_probe initialization for the device. More than 2
- EISA cards can be supported, but care will be needed assigning the
- shared memory to ensure that each slot has the correct IRQ, I/O address
- and shared memory address assigned.
-
- ************************************************************************
-
- NOTE: If you are using two ISA DEPCAs, it is important that you assign
- the base memory addresses correctly. The driver autoprobes I/O 0x300
- then 0x200. The base memory address for the first device must be less
- than that of the second so that the auto probe will correctly assign the
- I/O and memory addresses on the same card. I can't think of a way to do
- this unambiguously at the moment, since there is nothing on the cards to
- tie I/O and memory information together.
-
- I am unable to test 2 cards together for now, so this code is
- unchecked. All reports, good or bad, are welcome.
-
- ************************************************************************
-
- The board IRQ setting must be at an unused IRQ which is auto-probed
- using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
- {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
- really IRQ9 in machines with 16 IRQ lines.
-
- No 16MB memory limitation should exist with this driver as DMA is not
- used and the common memory area is in low memory on the network card (my
- current system has 20MB and I've not had problems yet).
-
- The ability to load this driver as a loadable module has been added. To
- utilise this ability, you have to do <8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy depca.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) if you wish, edit the source code near line 1530 to reflect the I/O
- address and IRQ you're using (see also 5).
- 3) compile depca.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the depca configuration turned off and reboot.
- 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
- [Alan Cox: Changed the code to allow command line irq/io assignments]
- [Dave Davies: Changed the code to allow command line mem/name
- assignments]
- 6) run the net startup bits for your eth?? interface manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- Note that autoprobing is not allowed in loadable modules - the system is
- already up and running and you're messing with interrupts.
-
- To unload a module, turn off the associated interface
- 'ifconfig eth?? down' then 'rmmod depca'.
-
- To assign a base memory address for the shared memory when running as a
- loadable module, see 5 above. To include the adapter name (if you have
- no PROM but know the card name) also see 5 above. Note that this last
- option will not work with kernel built-in depca's.
-
- The shared memory assignment for a loadable module makes sense to avoid
- the 'memory autoprobe' picking the wrong shared memory (for the case of
- 2 depca's in a PC).
-
- ************************************************************************
- Support for MCA EtherWORKS cards added 11-3-98. (MCA since deleted)
- Verified to work with up to 2 DE212 cards in a system (although not
- fully stress-tested).
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 25-jan-94 Initial writing.
- 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
- 0.3 1-feb-94 Added multiple DEPCA support.
- 0.31 4-feb-94 Added DE202 recognition.
- 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
- 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
- Add jabber packet fix from murf@perftech.com
- and becker@super.org
- 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
- 0.35 8-mar-94 Added DE201 recognition. Tidied up.
- 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
- 0.36 16-may-94 DE422 fix released.
- 0.37 22-jul-94 Added MODULE support
- 0.38 15-aug-94 Added DBR ROM switch in depca_close().
- Multi DEPCA bug fix.
- 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
- 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
- 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
- <stromain@alf.dec.com>
- 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
- 0.385 3-apr-95 Fix a recognition bug reported by
- <ryan.niemi@lastfrontier.com>
- 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
- 0.40 25-May-95 Rewrite for portability & updated.
- ALPHA support from <jestabro@amt.tay1.dec.com>
- 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
- suggestion by <heiko@colossus.escape.de>
- 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
- modules.
- Add 'adapter_name' for loadable modules when no PROM.
- Both above from a suggestion by
- <pchen@woodruffs121.residence.gatech.edu>.
- Add new multicasting code.
- 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
- 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
- 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
- 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug
- reported by <mmogilvi@elbert.uccs.edu>
- 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards
- by <tymm@computer.org>
- 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
- 0.5 14-Nov-98 Re-spin for 2.1.x kernels.
- 0.51 27-Jun-99 Correct received packet length for CRC from
- report by <worm@dkik.dk>
- 0.52 16-Oct-00 Fixes for 2.3 io memory accesses
- Fix show-stopper (ints left masked) in depca_interrupt
- by <peterd@pnd-pc.demon.co.uk>
- 0.53 12-Jan-01 Release resources on failure, bss tidbits
- by acme@conectiva.com.br
- 0.54 08-Nov-01 use library crc32 functions
- by Matt_Domsch@dell.com
- 0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org>
-
- =========================================================================
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#ifdef CONFIG_EISA
-#include <linux/eisa.h>
-#endif
-
-#include "depca.h"
-
-static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
-
-#ifdef DEPCA_DEBUG
-static int depca_debug = DEPCA_DEBUG;
-#else
-static int depca_debug = 1;
-#endif
-
-#define DEPCA_NDA 0xffe0 /* No Device Address */
-
-#define TX_TIMEOUT (1*HZ)
-
-/*
-** Ethernet PROM defines
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** Set the number of Tx and Rx buffers. Ensure that the memory requested
-** here is <= to the amount of shared memory set up by the board switches.
-** The number of descriptors MUST BE A POWER OF 2.
-**
-** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
-*/
-#define NUM_RX_DESC 8 /* Number of RX descriptors */
-#define NUM_TX_DESC 8 /* Number of TX descriptors */
-#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
-#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
-
-/*
-** EISA bus defines
-*/
-#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-
-/*
-** ISA Bus defines
-*/
-#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
-#define DEPCA_TOTAL_SIZE 0x10
-
-static struct {
- u_long iobase;
- struct platform_device *device;
-} depca_io_ports[] = {
- { 0x300, NULL },
- { 0x200, NULL },
- { 0 , NULL },
-};
-
-/*
-** Name <-> Adapter mapping
-*/
-#define DEPCA_SIGNATURE {"DEPCA",\
- "DE100","DE101",\
- "DE200","DE201","DE202",\
- "DE210","DE212",\
- "DE422",\
- ""}
-
-static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
-
-enum depca_type {
- DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
-};
-
-static char depca_string[] = "depca";
-
-static int depca_device_remove (struct device *device);
-
-#ifdef CONFIG_EISA
-static struct eisa_device_id depca_eisa_ids[] = {
- { "DEC4220", de422 },
- { "" }
-};
-MODULE_DEVICE_TABLE(eisa, depca_eisa_ids);
-
-static int depca_eisa_probe (struct device *device);
-
-static struct eisa_driver depca_eisa_driver = {
- .id_table = depca_eisa_ids,
- .driver = {
- .name = depca_string,
- .probe = depca_eisa_probe,
- .remove = depca_device_remove
- }
-};
-#endif
-
-static int depca_isa_probe (struct platform_device *);
-
-static int depca_isa_remove(struct platform_device *pdev)
-{
- return depca_device_remove(&pdev->dev);
-}
-
-static struct platform_driver depca_isa_driver = {
- .probe = depca_isa_probe,
- .remove = depca_isa_remove,
- .driver = {
- .name = depca_string,
- },
-};
-
-/*
-** Miscellaneous info...
-*/
-#define DEPCA_STRLEN 16
-
-/*
-** Memory Alignment. Each descriptor is 4 longwords long. To force a
-** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
-** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
-*/
-#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
-#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
-#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */
-
-/*
-** The DEPCA Rx and Tx ring descriptors.
-*/
-struct depca_rx_desc {
- volatile s32 base;
- s16 buf_length; /* This length is negative 2's complement! */
- s16 msg_length; /* This length is "normal". */
-};
-
-struct depca_tx_desc {
- volatile s32 base;
- s16 length; /* This length is negative 2's complement! */
- s16 misc; /* Errors and TDR info */
-};
-
-#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
- to LANCE memory address space */
-
-/*
-** The Lance initialization block, described in databook, in common memory.
-*/
-struct depca_init {
- u16 mode; /* Mode register */
- u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
- u8 mcast_table[8]; /* Multicast Hash Table. */
- u32 rx_ring; /* Rx ring base pointer & ring length */
- u32 tx_ring; /* Tx ring base pointer & ring length */
-};
-
-#define DEPCA_PKT_STAT_SZ 16
-#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase DEPCA_PKT_STAT_SZ */
-struct depca_private {
- char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
- enum depca_type adapter; /* Adapter type */
- enum {
- DEPCA_BUS_ISA = 1,
- DEPCA_BUS_EISA,
- } depca_bus; /* type of bus */
- struct depca_init init_block; /* Shadow Initialization block */
-/* CPU address space fields */
- struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */
- struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */
- void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
- void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
- void __iomem *sh_mem; /* CPU mapped virt address of device RAM */
- u_long mem_start; /* Bus address of device RAM (before remap) */
- u_long mem_len; /* device memory size */
-/* Device address space fields */
- u_long device_ram_start; /* Start of RAM in device addr space */
-/* Offsets used in both address spaces */
- u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */
- u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */
- u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */
-/* Kernel-only (not device) fields */
- int rx_new, tx_new; /* The next free ring entry */
- int rx_old, tx_old; /* The ring entries to be free()ed. */
- spinlock_t lock;
- struct { /* Private stats counters */
- u32 bins[DEPCA_PKT_STAT_SZ];
- u32 unicast;
- u32 multicast;
- u32 broadcast;
- u32 excessive_collisions;
- u32 tx_underruns;
- u32 excessive_underruns;
- } pktStats;
- int txRingMask; /* TX ring mask */
- int rxRingMask; /* RX ring mask */
- s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
- s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
-};
-
-/*
-** The transmit ring full condition is described by the tx_old and tx_new
-** pointers by:
-** tx_old = tx_new Empty ring
-** tx_old = tx_new+1 Full ring
-** tx_old+txRingMask = tx_new Full ring (wrapped condition)
-*/
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->txRingMask-lp->tx_new:\
- lp->tx_old -lp->tx_new-1)
-
-/*
-** Public Functions
-*/
-static int depca_open(struct net_device *dev);
-static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t depca_interrupt(int irq, void *dev_id);
-static int depca_close(struct net_device *dev);
-static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static void depca_tx_timeout(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-
-/*
-** Private functions
-*/
-static void depca_init_ring(struct net_device *dev);
-static int depca_rx(struct net_device *dev);
-static int depca_tx(struct net_device *dev);
-
-static void LoadCSRs(struct net_device *dev);
-static int InitRestartDepca(struct net_device *dev);
-static int DepcaSignature(char *name, u_long paddr);
-static int DevicePresent(u_long ioaddr);
-static int get_hw_addr(struct net_device *dev);
-static void SetMulticastFilter(struct net_device *dev);
-static int load_packet(struct net_device *dev, struct sk_buff *skb);
-static void depca_dbg_open(struct net_device *dev);
-
-static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
-static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
-static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
-static u_char *depca_irq;
-
-static int irq;
-static int io;
-static char *adapter_name;
-static int mem; /* For loadable module assignment
- use insmod mem=0x????? .... */
-module_param (irq, int, 0);
-module_param (io, int, 0);
-module_param (adapter_name, charp, 0);
-module_param (mem, int, 0);
-MODULE_PARM_DESC(irq, "DEPCA IRQ number");
-MODULE_PARM_DESC(io, "DEPCA I/O base address");
-MODULE_PARM_DESC(adapter_name, "DEPCA adapter name");
-MODULE_PARM_DESC(mem, "DEPCA shared memory address");
-MODULE_LICENSE("GPL");
-
-/*
-** Miscellaneous defines...
-*/
-#define STOP_DEPCA \
- outw(CSR0, DEPCA_ADDR);\
- outw(STOP, DEPCA_DATA)
-
-static const struct net_device_ops depca_netdev_ops = {
- .ndo_open = depca_open,
- .ndo_start_xmit = depca_start_xmit,
- .ndo_stop = depca_close,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = depca_ioctl,
- .ndo_tx_timeout = depca_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init depca_hw_init (struct net_device *dev, struct device *device)
-{
- struct depca_private *lp;
- int i, j, offset, netRAM, mem_len, status = 0;
- s16 nicsr;
- u_long ioaddr;
- u_long mem_start;
-
- /*
- * We are now supposed to enter this function with the
- * following fields filled with proper values :
- *
- * dev->base_addr
- * lp->mem_start
- * lp->depca_bus
- * lp->adapter
- *
- * dev->irq can be set if known from device configuration (on
- * MCA or EISA) or module option. Otherwise, it will be auto
- * detected.
- */
-
- ioaddr = dev->base_addr;
-
- STOP_DEPCA;
-
- nicsr = inb(DEPCA_NICSR);
- nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
- outb(nicsr, DEPCA_NICSR);
-
- if (inw(DEPCA_DATA) != STOP) {
- return -ENXIO;
- }
-
- lp = netdev_priv(dev);
- mem_start = lp->mem_start;
-
- if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
- return -ENXIO;
-
- printk("%s: %s at 0x%04lx",
- dev_name(device), depca_signature[lp->adapter], ioaddr);
-
- switch (lp->depca_bus) {
-#ifdef CONFIG_EISA
- case DEPCA_BUS_EISA:
- printk(" (EISA slot %d)", to_eisa_device(device)->slot);
- break;
-#endif
-
- case DEPCA_BUS_ISA:
- break;
-
- default:
- printk("Unknown DEPCA bus %d\n", lp->depca_bus);
- return -ENXIO;
- }
-
- printk(", h/w address ");
- status = get_hw_addr(dev);
- printk("%pM", dev->dev_addr);
- if (status != 0) {
- printk(" which has an Ethernet PROM CRC error.\n");
- return -ENXIO;
- }
-
- /* Set up the maximum amount of network RAM(kB) */
- netRAM = ((lp->adapter != DEPCA) ? 64 : 48);
- if ((nicsr & _128KB) && (lp->adapter == de422))
- netRAM = 128;
-
- /* Shared Memory Base Address */
- if (nicsr & BUF) {
- nicsr &= ~BS; /* DEPCA RAM in top 32k */
- netRAM -= 32;
- mem_start += 0x8000;
- }
-
- if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
- > (netRAM << 10)) {
- printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM);
- return -ENXIO;
- }
-
- printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
-
- /* Enable the shadow RAM. */
- if (lp->adapter != DEPCA) {
- nicsr |= SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- spin_lock_init(&lp->lock);
- sprintf(lp->adapter_name, "%s (%s)",
- depca_signature[lp->adapter], dev_name(device));
- status = -EBUSY;
-
- /* Initialisation Block */
- if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) {
- printk(KERN_ERR "depca: cannot request ISA memory, aborting\n");
- goto out_priv;
- }
-
- status = -EIO;
- lp->sh_mem = ioremap(mem_start, mem_len);
- if (lp->sh_mem == NULL) {
- printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
- goto out1;
- }
-
- lp->mem_start = mem_start;
- lp->mem_len = mem_len;
- lp->device_ram_start = mem_start & LA_MASK;
-
- offset = 0;
- offset += sizeof(struct depca_init);
-
- /* Tx & Rx descriptors (aligned to a quadword boundary) */
- offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
- lp->rx_ring = lp->sh_mem + offset;
- lp->rx_ring_offset = offset;
-
- offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
- lp->tx_ring = lp->sh_mem + offset;
- lp->tx_ring_offset = offset;
-
- offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
-
- lp->buffs_offset = offset;
-
- /* Finish initialising the ring information. */
- lp->rxRingMask = NUM_RX_DESC - 1;
- lp->txRingMask = NUM_TX_DESC - 1;
-
- /* Calculate Tx/Rx RLEN size for the descriptors. */
- for (i = 0, j = lp->rxRingMask; j > 0; i++) {
- j >>= 1;
- }
- lp->rx_rlen = (s32) (i << 29);
- for (i = 0, j = lp->txRingMask; j > 0; i++) {
- j >>= 1;
- }
- lp->tx_rlen = (s32) (i << 29);
-
- /* Load the initialisation block */
- depca_init_ring(dev);
-
- /* Initialise the control and status registers */
- LoadCSRs(dev);
-
- /* Enable DEPCA board interrupts for autoprobing */
- nicsr = ((nicsr & ~IM) | IEN);
- outb(nicsr, DEPCA_NICSR);
-
- /* To auto-IRQ we enable the initialization-done and DMA err,
- interrupts. For now we will always get a DMA error. */
- if (dev->irq < 2) {
- unsigned char irqnum;
- unsigned long irq_mask, delay;
-
- irq_mask = probe_irq_on();
-
- /* Assign the correct irq list */
- switch (lp->adapter) {
- case DEPCA:
- case de100:
- case de101:
- depca_irq = de1xx_irq;
- break;
- case de200:
- case de201:
- case de202:
- case de210:
- case de212:
- depca_irq = de2xx_irq;
- break;
- case de422:
- depca_irq = de422_irq;
- break;
-
- default:
- break; /* Not reached */
- }
-
- /* Trigger an initialization just for the interrupt. */
- outw(INEA | INIT, DEPCA_DATA);
-
- delay = jiffies + HZ/50;
- while (time_before(jiffies, delay))
- yield();
-
- irqnum = probe_irq_off(irq_mask);
-
- status = -ENXIO;
- if (!irqnum) {
- printk(" and failed to detect IRQ line.\n");
- goto out2;
- } else {
- for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++)
- if (irqnum == depca_irq[i]) {
- dev->irq = irqnum;
- printk(" and uses IRQ%d.\n", dev->irq);
- }
-
- if (!dev->irq) {
- printk(" but incorrect IRQ line detected.\n");
- goto out2;
- }
- }
- } else {
- printk(" and assigned IRQ%d.\n", dev->irq);
- }
-
- if (depca_debug > 1) {
- printk(version);
- }
-
- /* The DEPCA-specific entries in the device structure. */
- dev->netdev_ops = &depca_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- dev->mem_start = 0;
-
- dev_set_drvdata(device, dev);
- SET_NETDEV_DEV (dev, device);
-
- status = register_netdev(dev);
- if (status == 0)
- return 0;
-out2:
- iounmap(lp->sh_mem);
-out1:
- release_mem_region (mem_start, mem_len);
-out_priv:
- return status;
-}
-
-
-static int depca_open(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- s16 nicsr;
- int status = 0;
-
- STOP_DEPCA;
- nicsr = inb(DEPCA_NICSR);
-
- /* Make sure the shadow RAM is enabled */
- if (lp->adapter != DEPCA) {
- nicsr |= SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- /* Re-initialize the DEPCA... */
- depca_init_ring(dev);
- LoadCSRs(dev);
-
- depca_dbg_open(dev);
-
- if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
- printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
- status = -EAGAIN;
- } else {
-
- /* Enable DEPCA board interrupts and turn off LED */
- nicsr = ((nicsr & ~IM & ~LED) | IEN);
- outb(nicsr, DEPCA_NICSR);
- outw(CSR0, DEPCA_ADDR);
-
- netif_start_queue(dev);
-
- status = InitRestartDepca(dev);
-
- if (depca_debug > 1) {
- printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA));
- printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR));
- }
- }
- return status;
-}
-
-/* Initialize the lance Rx and Tx descriptor rings. */
-static void depca_init_ring(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_int i;
- u_long offset;
-
- /* Lock out other processes whilst setting up the hardware */
- netif_stop_queue(dev);
-
- lp->rx_new = lp->tx_new = 0;
- lp->rx_old = lp->tx_old = 0;
-
- /* Initialize the base address and length of each buffer in the ring */
- for (i = 0; i <= lp->rxRingMask; i++) {
- offset = lp->buffs_offset + i * RX_BUFF_SZ;
- writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base);
- writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
- lp->rx_buff[i] = lp->sh_mem + offset;
- }
-
- for (i = 0; i <= lp->txRingMask; i++) {
- offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ;
- writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base);
- lp->tx_buff[i] = lp->sh_mem + offset;
- }
-
- /* Set up the initialization block */
- lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen;
- lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen;
-
- SetMulticastFilter(dev);
-
- for (i = 0; i < ETH_ALEN; i++) {
- lp->init_block.phys_addr[i] = dev->dev_addr[i];
- }
-
- lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
-}
-
-
-static void depca_tx_timeout(struct net_device *dev)
-{
- u_long ioaddr = dev->base_addr;
-
- printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA));
-
- STOP_DEPCA;
- depca_init_ring(dev);
- LoadCSRs(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
- InitRestartDepca(dev);
-}
-
-
-/*
-** Writes a socket buffer to TX descriptor ring and starts transmission
-*/
-static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- int status = 0;
-
- /* Transmitter timeout, serious problems. */
- if (skb->len < 1)
- goto out;
-
- if (skb_padto(skb, ETH_ZLEN))
- goto out;
-
- netif_stop_queue(dev);
-
- if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
- status = load_packet(dev, skb);
-
- if (!status) {
- /* Trigger an immediate send demand. */
- outw(CSR0, DEPCA_ADDR);
- outw(INEA | TDMD, DEPCA_DATA);
-
- dev_kfree_skb(skb);
- }
- if (TX_BUFFS_AVAIL)
- netif_start_queue(dev);
- } else
- status = NETDEV_TX_LOCKED;
-
- out:
- return status;
-}
-
-/*
-** The DEPCA interrupt handler.
-*/
-static irqreturn_t depca_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct depca_private *lp;
- s16 csr0, nicsr;
- u_long ioaddr;
-
- if (dev == NULL) {
- printk("depca_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- lp = netdev_priv(dev);
- ioaddr = dev->base_addr;
-
- spin_lock(&lp->lock);
-
- /* mask the DEPCA board interrupts and turn on the LED */
- nicsr = inb(DEPCA_NICSR);
- nicsr |= (IM | LED);
- outb(nicsr, DEPCA_NICSR);
-
- outw(CSR0, DEPCA_ADDR);
- csr0 = inw(DEPCA_DATA);
-
- /* Acknowledge all of the current interrupt sources ASAP. */
- outw(csr0 & INTE, DEPCA_DATA);
-
- if (csr0 & RINT) /* Rx interrupt (packet arrived) */
- depca_rx(dev);
-
- if (csr0 & TINT) /* Tx interrupt (packet sent) */
- depca_tx(dev);
-
- /* Any resources available? */
- if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
- netif_wake_queue(dev);
- }
-
- /* Unmask the DEPCA board interrupts and turn off the LED */
- nicsr = (nicsr & ~IM & ~LED);
- outb(nicsr, DEPCA_NICSR);
-
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-/* Called with lp->lock held */
-static int depca_rx(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- int i, entry;
- s32 status;
-
- for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) {
- status = readl(&lp->rx_ring[entry].base) >> 16;
- if (status & R_STP) { /* Remember start of frame */
- lp->rx_old = entry;
- }
- if (status & R_ENP) { /* Valid frame status */
- if (status & R_ERR) { /* There was an error. */
- dev->stats.rx_errors++; /* Update the error stats. */
- if (status & R_FRAM)
- dev->stats.rx_frame_errors++;
- if (status & R_OFLO)
- dev->stats.rx_over_errors++;
- if (status & R_CRC)
- dev->stats.rx_crc_errors++;
- if (status & R_BUFF)
- dev->stats.rx_fifo_errors++;
- } else {
- short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
- unsigned char *buf;
- skb_reserve(skb, 2); /* 16 byte align the IP header */
- buf = skb_put(skb, pkt_len);
- if (entry < lp->rx_old) { /* Wrapped buffer */
- len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
- memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
- memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len);
- } else { /* Linear buffer */
- memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len);
- }
-
- /*
- ** Notify the upper protocol layers that there is another
- ** packet to handle
- */
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-
- /*
- ** Update stats
- */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
- if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) {
- lp->pktStats.bins[i]++;
- i = DEPCA_PKT_STAT_SZ;
- }
- }
- if (is_multicast_ether_addr(buf)) {
- if (is_broadcast_ether_addr(buf)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(buf,
- dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
-
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats));
- }
- } else {
- printk("%s: Memory squeeze, deferring packet.\n", dev->name);
- dev->stats.rx_dropped++; /* Really, deferred. */
- break;
- }
- }
- /* Change buffer ownership for this last frame, back to the adapter */
- for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
- writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
- }
- writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
- }
-
- /*
- ** Update entry information
- */
- lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
- }
-
- return 0;
-}
-
-/*
-** Buffer sent - check for buffer errors.
-** Called with lp->lock held
-*/
-static int depca_tx(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- int entry;
- s32 status;
- u_long ioaddr = dev->base_addr;
-
- for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
- status = readl(&lp->tx_ring[entry].base) >> 16;
-
- if (status < 0) { /* Packet not yet sent! */
- break;
- } else if (status & T_ERR) { /* An error occurred. */
- status = readl(&lp->tx_ring[entry].misc);
- dev->stats.tx_errors++;
- if (status & TMD3_RTRY)
- dev->stats.tx_aborted_errors++;
- if (status & TMD3_LCAR)
- dev->stats.tx_carrier_errors++;
- if (status & TMD3_LCOL)
- dev->stats.tx_window_errors++;
- if (status & TMD3_UFLO)
- dev->stats.tx_fifo_errors++;
- if (status & (TMD3_BUFF | TMD3_UFLO)) {
- /* Trigger an immediate send demand. */
- outw(CSR0, DEPCA_ADDR);
- outw(INEA | TDMD, DEPCA_DATA);
- }
- } else if (status & (T_MORE | T_ONE)) {
- dev->stats.collisions++;
- } else {
- dev->stats.tx_packets++;
- }
-
- /* Update all the pointers */
- lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
- }
-
- return 0;
-}
-
-static int depca_close(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- s16 nicsr;
- u_long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- outw(CSR0, DEPCA_ADDR);
-
- if (depca_debug > 1) {
- printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA));
- }
-
- /*
- ** We stop the DEPCA here -- it occasionally polls
- ** memory if we don't.
- */
- outw(STOP, DEPCA_DATA);
-
- /*
- ** Give back the ROM in case the user wants to go to DOS
- */
- if (lp->adapter != DEPCA) {
- nicsr = inb(DEPCA_NICSR);
- nicsr &= ~SHE;
- outb(nicsr, DEPCA_NICSR);
- }
-
- /*
- ** Free the associated irq
- */
- free_irq(dev->irq, dev);
- return 0;
-}
-
-static void LoadCSRs(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
-
- outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
- outw((u16) lp->device_ram_start, DEPCA_DATA);
- outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
- outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA);
- outw(CSR3, DEPCA_ADDR); /* ALE control */
- outw(ACON, DEPCA_DATA);
-
- outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
-}
-
-static int InitRestartDepca(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- int i, status = 0;
-
- /* Copy the shadow init_block to shared memory */
- memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
-
- outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
- outw(INIT, DEPCA_DATA); /* initialize DEPCA */
-
- /* wait for lance to complete initialisation */
- for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++);
-
- if (i != 100) {
- /* clear IDON by writing a "1", enable interrupts and start lance */
- outw(IDON | INEA | STRT, DEPCA_DATA);
- if (depca_debug > 2) {
- printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
- }
- } else {
- printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
- status = -1;
- }
-
- return status;
-}
-
-/*
-** Set or clear the multicast filter for this adaptor.
-*/
-static void set_multicast_list(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
-
- if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
- lp->init_block.mode |= PROM;
- } else {
- SetMulticastFilter(dev);
- lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
- }
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
-}
-
-/*
-** Calculate the hash code and update the logical address filter
-** from a list of ethernet multicast addresses.
-** Big endian crc one liner is mine, all mine, ha ha ha ha!
-** LANCE calculates its hash codes big endian.
-*/
-static void SetMulticastFilter(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i, j, bit, byte;
- u16 hashcode;
- u32 crc;
-
- if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- lp->init_block.mcast_table[i] = (char) 0xff;
- }
- } else {
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */
- lp->init_block.mcast_table[i] = 0;
- }
- /* Add multicast addresses */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc(ETH_ALEN, ha->addr);
- hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
- for (j = 0; j < 5; j++) { /* ... in reverse order. */
- hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
- }
-
- byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
- lp->init_block.mcast_table[byte] |= bit;
- }
- }
-}
-
-static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
-{
- int status = 0;
-
- if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) {
- status = -EBUSY;
- goto out;
- }
-
- if (DevicePresent(ioaddr)) {
- status = -ENODEV;
- goto out_release;
- }
-
- if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) {
- status = -ENOMEM;
- goto out_release;
- }
-
- return 0;
-
- out_release:
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-
-/*
-** ISA bus I/O device probe
-*/
-
-static void __init depca_platform_probe (void)
-{
- int i;
- struct platform_device *pldev;
-
- for (i = 0; depca_io_ports[i].iobase; i++) {
- depca_io_ports[i].device = NULL;
-
- /* if an address has been specified on the command
- * line, use it (if valid) */
- if (io && io != depca_io_ports[i].iobase)
- continue;
-
- pldev = platform_device_alloc(depca_string, i);
- if (!pldev)
- continue;
-
- pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
- depca_io_ports[i].device = pldev;
-
- if (platform_device_add(pldev)) {
- depca_io_ports[i].device = NULL;
- pldev->dev.platform_data = NULL;
- platform_device_put(pldev);
- continue;
- }
-
- if (!pldev->dev.driver) {
- /* The driver was not bound to this device, there was
- * no hardware at this address. Unregister it, as the
- * release function will take care of freeing the
- * allocated structure */
-
- depca_io_ports[i].device = NULL;
- pldev->dev.platform_data = NULL;
- platform_device_unregister (pldev);
- }
- }
-}
-
-static enum depca_type __init depca_shmem_probe (ulong *mem_start)
-{
- u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
- enum depca_type adapter = unknown;
- int i;
-
- for (i = 0; mem_base[i]; i++) {
- *mem_start = mem ? mem : mem_base[i];
- adapter = DepcaSignature (adapter_name, *mem_start);
- if (adapter != unknown)
- break;
- }
-
- return adapter;
-}
-
-static int depca_isa_probe(struct platform_device *device)
-{
- struct net_device *dev;
- struct depca_private *lp;
- u_long ioaddr, mem_start = 0;
- enum depca_type adapter = unknown;
- int status = 0;
-
- ioaddr = (u_long) device->dev.platform_data;
-
- if ((status = depca_common_init (ioaddr, &dev)))
- goto out;
-
- adapter = depca_shmem_probe (&mem_start);
-
- if (adapter == unknown) {
- status = -ENODEV;
- goto out_free;
- }
-
- dev->base_addr = ioaddr;
- dev->irq = irq; /* Use whatever value the user gave
- * us, and 0 if he didn't. */
- lp = netdev_priv(dev);
- lp->depca_bus = DEPCA_BUS_ISA;
- lp->adapter = adapter;
- lp->mem_start = mem_start;
-
- if ((status = depca_hw_init(dev, &device->dev)))
- goto out_free;
-
- return 0;
-
- out_free:
- free_netdev (dev);
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-
-/*
-** EISA callbacks from sysfs.
-*/
-
-#ifdef CONFIG_EISA
-static int __init depca_eisa_probe (struct device *device)
-{
- enum depca_type adapter = unknown;
- struct eisa_device *edev;
- struct net_device *dev;
- struct depca_private *lp;
- u_long ioaddr, mem_start;
- int status = 0;
-
- edev = to_eisa_device (device);
- ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS;
-
- if ((status = depca_common_init (ioaddr, &dev)))
- goto out;
-
- /* It would have been nice to get card configuration from the
- * card. Unfortunately, this register is write-only (shares
- * it's address with the ethernet prom)... As we don't parse
- * the EISA configuration structures (yet... :-), just rely on
- * the ISA probing to sort it out... */
-
- adapter = depca_shmem_probe (&mem_start);
- if (adapter == unknown) {
- status = -ENODEV;
- goto out_free;
- }
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
- lp = netdev_priv(dev);
- lp->depca_bus = DEPCA_BUS_EISA;
- lp->adapter = edev->id.driver_data;
- lp->mem_start = mem_start;
-
- if ((status = depca_hw_init(dev, device)))
- goto out_free;
-
- return 0;
-
- out_free:
- free_netdev (dev);
- release_region (ioaddr, DEPCA_TOTAL_SIZE);
- out:
- return status;
-}
-#endif
-
-static int depca_device_remove(struct device *device)
-{
- struct net_device *dev;
- struct depca_private *lp;
- int bus;
-
- dev = dev_get_drvdata(device);
- lp = netdev_priv(dev);
-
- unregister_netdev (dev);
- iounmap (lp->sh_mem);
- release_mem_region (lp->mem_start, lp->mem_len);
- release_region (dev->base_addr, DEPCA_TOTAL_SIZE);
- bus = lp->depca_bus;
- free_netdev (dev);
-
- return 0;
-}
-
-/*
-** Look for a particular board name in the on-board Remote Diagnostics
-** and Boot (readb) ROM. This will also give us a clue to the network RAM
-** base address.
-*/
-static int __init DepcaSignature(char *name, u_long base_addr)
-{
- u_int i, j, k;
- void __iomem *ptr;
- char tmpstr[16];
- u_long prom_addr = base_addr + 0xc000;
- u_long mem_addr = base_addr + 0x8000; /* 32KB */
-
- /* Can't reserve the prom region, it is already marked as
- * used, at least on x86. Instead, reserve a memory region a
- * board would certainly use. If it works, go ahead. If not,
- * run like hell... */
-
- if (!request_mem_region (mem_addr, 16, depca_string))
- return unknown;
-
- /* Copy the first 16 bytes of ROM */
-
- ptr = ioremap(prom_addr, 16);
- if (ptr == NULL) {
- printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr);
- return unknown;
- }
- for (i = 0; i < 16; i++) {
- tmpstr[i] = readb(ptr + i);
- }
- iounmap(ptr);
-
- release_mem_region (mem_addr, 16);
-
- /* Check if PROM contains a valid string */
- for (i = 0; *depca_signature[i] != '\0'; i++) {
- for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) {
- if (depca_signature[i][k] == tmpstr[j]) { /* track signature */
- k++;
- } else { /* lost signature; begin search again */
- k = 0;
- }
- }
- if (k == strlen(depca_signature[i]))
- break;
- }
-
- /* Check if name string is valid, provided there's no PROM */
- if (name && *name && (i == unknown)) {
- for (i = 0; *depca_signature[i] != '\0'; i++) {
- if (strcmp(name, depca_signature[i]) == 0)
- break;
- }
- }
-
- return i;
-}
-
-/*
-** Look for a special sequence in the Ethernet station address PROM that
-** is common across all DEPCA products. Note that the original DEPCA needs
-** its ROM address counter to be initialized and enabled. Only enable
-** if the first address octet is a 0x08 - this minimises the chances of
-** messing around with some other hardware, but it assumes that this DEPCA
-** card initialized itself correctly.
-**
-** Search the Ethernet address ROM for the signature. Since the ROM address
-** counter can start at an arbitrary point, the search must include the entire
-** probe sequence length plus the (length_of_the_signature - 1).
-** Stop the search IMMEDIATELY after the signature is found so that the
-** PROM address counter is correctly positioned at the start of the
-** ethernet address for later read out.
-*/
-static int __init DevicePresent(u_long ioaddr)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- }
- dev;
- short sigLength = 0;
- s8 data;
- s16 nicsr;
- int i, j, status = 0;
-
- data = inb(DEPCA_PROM); /* clear counter on DEPCA */
- data = inb(DEPCA_PROM); /* read data */
-
- if (data == 0x08) { /* Enable counter on DEPCA */
- nicsr = inb(DEPCA_NICSR);
- nicsr |= AAC;
- outb(nicsr, DEPCA_NICSR);
- }
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
- data = inb(DEPCA_PROM);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) { /* rare case.... */
- j = 1;
- } else {
- j = 0;
- }
- }
- }
-
- if (j != sigLength) {
- status = -ENODEV; /* search failed */
- }
-
- return status;
-}
-
-/*
-** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
-** reason: access the upper half of the PROM with x=0; access the lower half
-** with x=1.
-*/
-static int __init get_hw_addr(struct net_device *dev)
-{
- u_long ioaddr = dev->base_addr;
- struct depca_private *lp = netdev_priv(dev);
- int i, k, tmp, status = 0;
- u_short j, x, chksum;
-
- x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0);
-
- for (i = 0, k = 0, j = 0; j < 3; j++) {
- k <<= 1;
- if (k > 0xffff)
- k -= 0xffff;
-
- k += (u_char) (tmp = inb(DEPCA_PROM + x));
- dev->dev_addr[i++] = (u_char) tmp;
- k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
- dev->dev_addr[i++] = (u_char) tmp;
-
- if (k > 0xffff)
- k -= 0xffff;
- }
- if (k == 0xffff)
- k = 0;
-
- chksum = (u_char) inb(DEPCA_PROM + x);
- chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
- if (k != chksum)
- status = -1;
-
- return status;
-}
-
-/*
-** Load a packet into the shared memory
-*/
-static int load_packet(struct net_device *dev, struct sk_buff *skb)
-{
- struct depca_private *lp = netdev_priv(dev);
- int i, entry, end, len, status = NETDEV_TX_OK;
-
- entry = lp->tx_new; /* Ring around buffer number. */
- end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
- if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */
- /*
- ** Caution: the write order is important here... don't set up the
- ** ownership rights until all the other information is in place.
- */
- if (end < entry) { /* wrapped buffer */
- len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
- memcpy_toio(lp->tx_buff[entry], skb->data, len);
- memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len);
- } else { /* linear buffer */
- memcpy_toio(lp->tx_buff[entry], skb->data, skb->len);
- }
-
- /* set up the buffer descriptors */
- len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
- for (i = entry; i != end; i = (i+1) & lp->txRingMask) {
- /* clean out flags */
- writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
- writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
- writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */
- len -= TX_BUFF_SZ;
- }
- /* clean out flags */
- writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
- writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
- writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
-
- /* start of packet */
- writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
- /* end of packet */
- writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
-
- for (i = end; i != entry; --i) {
- /* ownership of packet */
- writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
- if (i == 0)
- i = lp->txRingMask + 1;
- }
- writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
-
- lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
- } else {
- status = NETDEV_TX_LOCKED;
- }
-
- return status;
-}
-
-static void depca_dbg_open(struct net_device *dev)
-{
- struct depca_private *lp = netdev_priv(dev);
- u_long ioaddr = dev->base_addr;
- struct depca_init *p = &lp->init_block;
- int i;
-
- if (depca_debug > 1) {
- /* Do not copy the shadow init block into shared memory */
- /* Debugging should not affect normal operation! */
- /* The shadow init block will get copied across during InitRestartDepca */
- printk("%s: depca open with irq %d\n", dev->name, dev->irq);
- printk("Descriptor head addresses (CPU):\n");
- printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring);
- printk("Descriptor addresses (CPU):\nRX: ");
- for (i = 0; i < lp->rxRingMask; i++) {
- if (i < 3) {
- printk("%p ", &lp->rx_ring[i].base);
- }
- }
- printk("...%p\n", &lp->rx_ring[i].base);
- printk("TX: ");
- for (i = 0; i < lp->txRingMask; i++) {
- if (i < 3) {
- printk("%p ", &lp->tx_ring[i].base);
- }
- }
- printk("...%p\n", &lp->tx_ring[i].base);
- printk("\nDescriptor buffers (Device):\nRX: ");
- for (i = 0; i < lp->rxRingMask; i++) {
- if (i < 3) {
- printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
- }
- }
- printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
- printk("TX: ");
- for (i = 0; i < lp->txRingMask; i++) {
- if (i < 3) {
- printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
- }
- }
- printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
- printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
- printk(" mode: 0x%4.4x\n", p->mode);
- printk(" physical address: %pM\n", p->phys_addr);
- printk(" multicast hash table: ");
- for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
- printk("%2.2x:", p->mcast_table[i]);
- }
- printk("%2.2x\n", p->mcast_table[i]);
- printk(" rx_ring at: 0x%8.8x\n", p->rx_ring);
- printk(" tx_ring at: 0x%8.8x\n", p->tx_ring);
- printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset);
- printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen);
- printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen);
- outw(CSR2, DEPCA_ADDR);
- printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA));
- outw(CSR1, DEPCA_ADDR);
- printk("%4.4x\n", inw(DEPCA_DATA));
- outw(CSR3, DEPCA_ADDR);
- printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
- }
-}
-
-/*
-** Perform IOCTL call functions here. Some are privileged operations and the
-** effective uid is checked in those cases.
-** All multicast IOCTLs will not work here and are for testing purposes only.
-*/
-static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct depca_private *lp = netdev_priv(dev);
- struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
- int i, status = 0;
- u_long ioaddr = dev->base_addr;
- union {
- u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
- u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
- u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
- } tmp;
- unsigned long flags;
- void *buf;
-
- switch (ioc->cmd) {
- case DEPCA_GET_HWADDR: /* Get the hardware address */
- for (i = 0; i < ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
- ioc->len = ETH_ALEN;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len))
- return -EFAULT;
- break;
-
- case DEPCA_SET_HWADDR: /* Set the hardware address */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
- return -EFAULT;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp.addr[i];
- }
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_SET_PROM: /* Set Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- lp->init_block.mode |= PROM; /* Set promiscuous mode */
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- netif_stop_queue(dev);
- while (lp->tx_old != lp->tx_new)
- cpu_relax(); /* Wait for the ring to empty */
-
- STOP_DEPCA; /* Temporarily stop the depca. */
- depca_init_ring(dev); /* Initialize the descriptor rings */
- lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
-
- LoadCSRs(dev); /* Reload CSR3 */
- InitRestartDepca(dev); /* Resume normal operation. */
- netif_start_queue(dev); /* Unlock the TX ring */
- break;
-
- case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
- if(!capable(CAP_NET_ADMIN))
- return -EPERM;
- printk("%s: Boo!\n", dev->name);
- break;
-
- case DEPCA_GET_MCA: /* Get the multicast address table */
- ioc->len = (HASH_TABLE_LEN >> 3);
- if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
- return -EFAULT;
- break;
-
- case DEPCA_SET_MCA: /* Set a multicast address */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- if (ioc->len >= HASH_TABLE_LEN)
- return -EINVAL;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
- return -EFAULT;
- set_multicast_list(dev);
- break;
-
- case DEPCA_CLR_MCA: /* Clear all multicast addresses */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- set_multicast_list(dev);
- break;
-
- case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- set_multicast_list(dev);
- break;
-
- case DEPCA_GET_STATS: /* Get the driver statistics */
- ioc->len = sizeof(lp->pktStats);
- buf = kmalloc(ioc->len, GFP_KERNEL);
- if(!buf)
- return -ENOMEM;
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(buf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, buf, ioc->len))
- status = -EFAULT;
- kfree(buf);
- break;
-
- case DEPCA_CLR_STATS: /* Zero out the driver statistics */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- spin_lock_irqsave(&lp->lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->lock, flags);
- break;
-
- case DEPCA_GET_REG: /* Get the DEPCA Registers */
- i = 0;
- tmp.sval[i++] = inw(DEPCA_NICSR);
- outw(CSR0, DEPCA_ADDR); /* status register */
- tmp.sval[i++] = inw(DEPCA_DATA);
- memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
- ioc->len = i + sizeof(struct depca_init);
- if (copy_to_user(ioc->data, tmp.addr, ioc->len))
- return -EFAULT;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return status;
-}
-
-static int __init depca_module_init (void)
-{
- int err = 0;
-
-#ifdef CONFIG_EISA
- err = eisa_driver_register(&depca_eisa_driver);
- if (err)
- goto err_eisa;
-#endif
- err = platform_driver_register(&depca_isa_driver);
- if (err)
- goto err_eisa;
-
- depca_platform_probe();
- return 0;
-
-err_eisa:
-#ifdef CONFIG_EISA
- eisa_driver_unregister(&depca_eisa_driver);
-#endif
- return err;
-}
-
-static void __exit depca_module_exit (void)
-{
- int i;
-#ifdef CONFIG_EISA
- eisa_driver_unregister (&depca_eisa_driver);
-#endif
- platform_driver_unregister (&depca_isa_driver);
-
- for (i = 0; depca_io_ports[i].iobase; i++) {
- if (depca_io_ports[i].device) {
- depca_io_ports[i].device->dev.platform_data = NULL;
- platform_device_unregister (depca_io_ports[i].device);
- depca_io_ports[i].device = NULL;
- }
- }
-}
-
-module_init (depca_module_init);
-module_exit (depca_module_exit);
diff --git a/drivers/net/ethernet/amd/depca.h b/drivers/net/ethernet/amd/depca.h
deleted file mode 100644
index cdcfe4252c1..00000000000
--- a/drivers/net/ethernet/amd/depca.h
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- Written 1994 by David C. Davies.
-
- Copyright 1994 David C. Davies. This software may be used and distributed
- according to the terms of the GNU General Public License, incorporated herein by
- reference.
-*/
-
-/*
-** I/O addresses. Note that the 2k buffer option is not supported in
-** this driver.
-*/
-#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
-#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
-#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
-#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
-#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
-#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
-#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
-#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
-
-/*
-** These are LANCE registers addressable through DEPCA_ADDR
-*/
-#define CSR0 0
-#define CSR1 1
-#define CSR2 2
-#define CSR3 3
-
-/*
-** NETWORK INTERFACE CSR (NI_CSR) bit definitions
-*/
-
-#define TO 0x0100 /* Time Out for remote boot */
-#define SHE 0x0080 /* SHadow memory Enable */
-#define BS 0x0040 /* Bank Select */
-#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
-#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
-#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
-#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
-#define IM 0x0004 /* Interrupt Mask (1->mask) */
-#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
-#define LED 0x0001 /* LED control */
-
-/*
-** Control and Status Register 0 (CSR0) bit definitions
-*/
-
-#define ERR 0x8000 /* Error summary */
-#define BABL 0x4000 /* Babble transmitter timeout error */
-#define CERR 0x2000 /* Collision Error */
-#define MISS 0x1000 /* Missed packet */
-#define MERR 0x0800 /* Memory Error */
-#define RINT 0x0400 /* Receiver Interrupt */
-#define TINT 0x0200 /* Transmit Interrupt */
-#define IDON 0x0100 /* Initialization Done */
-#define INTR 0x0080 /* Interrupt Flag */
-#define INEA 0x0040 /* Interrupt Enable */
-#define RXON 0x0020 /* Receiver on */
-#define TXON 0x0010 /* Transmitter on */
-#define TDMD 0x0008 /* Transmit Demand */
-#define STOP 0x0004 /* Stop */
-#define STRT 0x0002 /* Start */
-#define INIT 0x0001 /* Initialize */
-#define INTM 0xff00 /* Interrupt Mask */
-#define INTE 0xfff0 /* Interrupt Enable */
-
-/*
-** CONTROL AND STATUS REGISTER 3 (CSR3)
-*/
-
-#define BSWP 0x0004 /* Byte SWaP */
-#define ACON 0x0002 /* ALE control */
-#define BCON 0x0001 /* Byte CONtrol */
-
-/*
-** Initialization Block Mode Register
-*/
-
-#define PROM 0x8000 /* Promiscuous Mode */
-#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
-#define INTL 0x0040 /* Internal Loopback */
-#define DRTY 0x0020 /* Disable Retry */
-#define COLL 0x0010 /* Force Collision */
-#define DTCR 0x0008 /* Disable Transmit CRC */
-#define LOOP 0x0004 /* Loopback */
-#define DTX 0x0002 /* Disable the Transmitter */
-#define DRX 0x0001 /* Disable the Receiver */
-
-/*
-** Receive Message Descriptor 1 (RMD1) bit definitions.
-*/
-
-#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
-#define R_ERR 0x4000 /* Error Summary */
-#define R_FRAM 0x2000 /* Framing Error */
-#define R_OFLO 0x1000 /* Overflow Error */
-#define R_CRC 0x0800 /* CRC Error */
-#define R_BUFF 0x0400 /* Buffer Error */
-#define R_STP 0x0200 /* Start of Packet */
-#define R_ENP 0x0100 /* End of Packet */
-
-/*
-** Transmit Message Descriptor 1 (TMD1) bit definitions.
-*/
-
-#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
-#define T_ERR 0x4000 /* Error Summary */
-#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
-#define T_MORE 0x1000 /* >1 retry to transmit packet */
-#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
-#define T_DEF 0x0400 /* Deferred */
-#define T_STP 0x02000000 /* Start of Packet */
-#define T_ENP 0x01000000 /* End of Packet */
-#define T_FLAGS 0xff000000 /* TX Flags Field */
-
-/*
-** Transmit Message Descriptor 3 (TMD3) bit definitions.
-*/
-
-#define TMD3_BUFF 0x8000 /* BUFFer error */
-#define TMD3_UFLO 0x4000 /* UnderFLOw error */
-#define TMD3_RES 0x2000 /* REServed */
-#define TMD3_LCOL 0x1000 /* Late COLlision */
-#define TMD3_LCAR 0x0800 /* Loss of CARrier */
-#define TMD3_RTRY 0x0400 /* ReTRY error */
-
-/*
-** EISA configuration Register (CNFG) bit definitions
-*/
-
-#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
-#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
-#define IRQ11 0x0040 /* Enable -> 1 */
-#define IRQ10 0x0020 /* Enable -> 1 */
-#define IRQ9 0x0010 /* Enable -> 1 */
-#define IRQ5 0x0008 /* Enable -> 1 */
-#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
-#define PADR16 0x0002 /* RAM on 64kB boundary */
-#define PADR17 0x0001 /* RAM on 128kB boundary */
-
-/*
-** Miscellaneous
-*/
-#define HASH_TABLE_LEN 64 /* Bits */
-#define HASH_BITS 0x003f /* 6 LS bits */
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define EISA_EN 0x0001 /* Enable EISA bus buffers */
-#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
-#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-struct depca_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
-#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
-#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
-#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
-#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
-#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
-#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
-#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
-#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
-#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define DEPCA_GET_REG 0x0c /* Get the Register contents */
-#define DEPCA_SET_REG 0x0d /* Set the Register contents */
-#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
-
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index a227ccdcb9b..797f847edf1 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -494,19 +494,15 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_tx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr);
@@ -564,19 +560,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
- new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
- GFP_ATOMIC);
- if (!new_dma_addr_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
goto free_new_rx_ring;
- }
- new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
- GFP_ATOMIC);
- if (!new_skb_list) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!new_skb_list)
goto free_new_lists;
- }
/* first copy the current receive buffers */
overlap = min(size, lp->rx_ring_size);
@@ -1688,10 +1679,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
memcpy(dev->dev_addr, promaddr, 6);
}
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->perm_addr))
+ if (!is_valid_ether_addr(dev->dev_addr))
memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
@@ -1934,31 +1924,23 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->tx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_dma_addr)
return -ENOMEM;
- }
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
- if (!lp->rx_dma_addr) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_dma_addr)
return -ENOMEM;
- }
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->tx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->tx_skbuff)
return -ENOMEM;
- }
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
- if (!lp->rx_skbuff) {
- netif_err(lp, drv, dev, "Memory allocation failed\n");
+ if (!lp->rx_skbuff)
return -ENOMEM;
- }
return 0;
}
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index c2d696c88e4..6a40290d372 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1284,8 +1284,8 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "sunlance");
- strcpy(info->version, "2.02");
+ strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 0035c01660b..1f07fc633ab 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -472,7 +472,6 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
@@ -983,11 +982,9 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
rfd_ring->count);
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
- if (unlikely(!tpd_ring->buffer_info)) {
- dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
- size);
+ if (unlikely(!tpd_ring->buffer_info))
goto err_nomem;
- }
+
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
(tpd_ring->buffer_info + count);
@@ -2075,7 +2072,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
if (unlikely(pci_dma_mapping_error(adapter->pdev,
buffer_info->dma)))
goto err_dma;
-
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
mapped_len += map_len;
@@ -2597,10 +2594,9 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (atl1c_read_mac_addr(&adapter->hw)) {
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e4466a36d10..92f4734f860 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -819,8 +819,6 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
- netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
- size);
err = -ENOMEM;
goto failed;
}
@@ -2342,7 +2340,6 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 71b3d7daa21..5b0d9931c72 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3053,7 +3053,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
if (atl1_read_mac_addr(&adapter->hw)) {
/* mark random mac */
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index aab83a2d4e0..1278b47022e 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1433,14 +1433,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
-/* FIXME: do we still need this? */
-#ifdef ETHTOOL_GPERMADDR
- memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
-#else
if (!is_valid_ether_addr(netdev->dev_addr)) {
-#endif
err = -EIO;
goto err_eeprom;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 77ffbc4a507..f82eb169946 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -84,7 +84,6 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
atlx_set_mac_addr(&adapter->hw);
return 0;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f55267363f3..3e69b3f8809 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -121,4 +121,22 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config BNX2X_SRIOV
+ bool "Broadcom 578xx and 57712 SR-IOV support"
+ depends on BNX2X && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support in the 578xx and 57712 products. This
+ allows for virtual function acceleration in virtual environments.
+
+config BGMAC
+ tristate "BCMA bus GBit core support"
+ depends on BCMA_HOST_SOC && HAS_DMA
+ ---help---
+ This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
+ They can be found on BCM47xx SoCs and provide gigabit ethernet.
+ In case of using this driver on BCM4706 it's also requires to enable
+ BCMA_DRIVER_GMAC_CMN to make it work.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index b7896051d54..68efa1a3fb8 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
+obj-$(CONFIG_BGMAC) += bgmac.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 219f6226fcb..a7efec29303 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -809,11 +809,10 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
- copy_skb = netdev_alloc_skb(bp->dev, len + 2);
+ copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
if (copy_skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
/* DMA sync done above, copy just the actual packet */
skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
@@ -1518,10 +1517,8 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
u8 pwol_mask[B44_PMASK_SIZE];
pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
- if (!pwol_pattern) {
- pr_err("Memory not available for WOL\n");
+ if (!pwol_pattern)
return;
- }
/* Ipv4 magic packet pattern - pattern 0.*/
memset(pwol_mask, 0, B44_PMASK_SIZE);
@@ -2111,8 +2108,6 @@ static int b44_get_invariants(struct b44 *bp)
return -EINVAL;
}
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
-
bp->imask = IMASK_DEF;
/* XXX - really required?
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 39387d67b72..7d81e059e81 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -799,7 +799,7 @@ static int bcm_enet_open(struct net_device *dev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
priv->mii_bus->id, priv->phy_id);
- phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
+ phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
@@ -886,10 +886,9 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
- priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+ priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -900,10 +899,9 @@ static int bcm_enet_open(struct net_device *dev)
spin_lock_init(&priv->tx_lock);
/* init & fill rx ring with skbs */
- priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+ priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -1227,10 +1225,11 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
- strncpy(drvinfo->version, bcm_enet_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "bcm63xx", 32);
+ strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, bcm_enet_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
drvinfo->n_stats = BCM_ENET_STATS_LEN;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
new file mode 100644
index 00000000000..3fd32880e52
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -0,0 +1,1461 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bgmac.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <asm/mach-bcm47xx/nvram.h>
+
+static const struct bcma_device_id bgmac_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
+
+static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+ u32 value, int timeout)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < timeout / 10; i++) {
+ val = bcma_read32(core, reg);
+ if ((val & mask) == value)
+ return true;
+ udelay(10);
+ }
+ pr_err("Timeout waiting for reg 0x%X\n", reg);
+ return false;
+}
+
+/**************************************************
+ * DMA
+ **************************************************/
+
+static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ u32 val;
+ int i;
+
+ if (!ring->mmio_base)
+ return;
+
+ /* Suspend DMA TX ring first.
+ * bgmac_wait_value doesn't support waiting for any of few values, so
+ * implement whole loop here.
+ */
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
+ BGMAC_DMA_TX_SUSPEND);
+ for (i = 0; i < 10000 / 10; i++) {
+ val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ val &= BGMAC_DMA_TX_STAT;
+ if (val == BGMAC_DMA_TX_STAT_DISABLED ||
+ val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
+ val == BGMAC_DMA_TX_STAT_STOPPED) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
+ ring->mmio_base, val);
+
+ /* Remove SUSPEND bit */
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
+ if (!bgmac_wait_value(bgmac->core,
+ ring->mmio_base + BGMAC_DMA_TX_STATUS,
+ BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
+ 10000)) {
+ bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
+ ring->mmio_base);
+ udelay(300);
+ val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
+ bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
+ ring->mmio_base);
+ }
+}
+
+static void bgmac_dma_tx_enable(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ u32 ctl;
+
+ ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
+ ctl |= BGMAC_DMA_TX_ENABLE;
+ ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
+}
+
+static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct net_device *net_dev = bgmac->net_dev;
+ struct bgmac_dma_desc *dma_desc;
+ struct bgmac_slot_info *slot;
+ u32 ctl0, ctl1;
+ int free_slots;
+
+ if (skb->len > BGMAC_DESC_CTL1_LEN) {
+ bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
+ goto err_stop_drop;
+ }
+
+ if (ring->start <= ring->end)
+ free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
+ else
+ free_slots = ring->start - ring->end;
+ if (free_slots == 1) {
+ bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
+ netif_stop_queue(net_dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ slot = &ring->slots[ring->end];
+ slot->skb = skb;
+ slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
+ ring->mmio_base);
+ goto err_stop_drop;
+ }
+
+ ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
+ if (ring->end == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
+
+ dma_desc = ring->cpu_base;
+ dma_desc += ring->end;
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+
+ wmb();
+
+ /* Increase ring->end to point empty slot. We tell hardware the first
+ * slot it should *not* read.
+ */
+ if (++ring->end >= BGMAC_TX_RING_SLOTS)
+ ring->end = 0;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+ ring->end * sizeof(struct bgmac_dma_desc));
+
+ /* Always keep one slot free to allow detecting bugged calls. */
+ if (--free_slots == 1)
+ netif_stop_queue(net_dev);
+
+ return NETDEV_TX_OK;
+
+err_stop_drop:
+ netif_stop_queue(net_dev);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/* Free transmitted packets */
+static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ int empty_slot;
+ bool freed = false;
+
+ /* The last slot that hardware didn't consume yet */
+ empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
+ empty_slot /= sizeof(struct bgmac_dma_desc);
+
+ while (ring->start != empty_slot) {
+ struct bgmac_slot_info *slot = &ring->slots[ring->start];
+
+ if (slot->skb) {
+ /* Unmap no longer used buffer */
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ slot->skb->len, DMA_TO_DEVICE);
+ slot->dma_addr = 0;
+
+ /* Free memory! :) */
+ dev_kfree_skb(slot->skb);
+ slot->skb = NULL;
+ } else {
+ bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
+ ring->start, ring->end);
+ }
+
+ if (++ring->start >= BGMAC_TX_RING_SLOTS)
+ ring->start = 0;
+ freed = true;
+ }
+
+ if (freed && netif_queue_stopped(bgmac->net_dev))
+ netif_wake_queue(bgmac->net_dev);
+}
+
+static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+{
+ if (!ring->mmio_base)
+ return;
+
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
+ if (!bgmac_wait_value(bgmac->core,
+ ring->mmio_base + BGMAC_DMA_RX_STATUS,
+ BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
+ 10000))
+ bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
+ ring->mmio_base);
+}
+
+static void bgmac_dma_rx_enable(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ u32 ctl;
+
+ ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
+ ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
+ ctl |= BGMAC_DMA_RX_ENABLE;
+ ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
+ ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
+ ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
+}
+
+static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
+ struct bgmac_slot_info *slot)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_rx_header *rx;
+
+ /* Alloc skb */
+ slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
+ if (!slot->skb) {
+ bgmac_err(bgmac, "Allocation of skb failed!\n");
+ return -ENOMEM;
+ }
+
+ /* Poison - if everything goes fine, hardware will overwrite it */
+ rx = (struct bgmac_rx_header *)slot->skb->data;
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+
+ /* Map skb for the DMA */
+ slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dma_dev, slot->dma_addr)) {
+ bgmac_err(bgmac, "DMA mapping error\n");
+ return -ENOMEM;
+ }
+ if (slot->dma_addr & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ return 0;
+}
+
+static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+ int weight)
+{
+ u32 end_slot;
+ int handled = 0;
+
+ end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
+ end_slot &= BGMAC_DMA_RX_STATDPTR;
+ end_slot /= sizeof(struct bgmac_dma_desc);
+
+ ring->end = end_slot;
+
+ while (ring->start != ring->end) {
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot = &ring->slots[ring->start];
+ struct sk_buff *skb = slot->skb;
+ struct sk_buff *new_skb;
+ struct bgmac_rx_header *rx;
+ u16 len, flags;
+
+ /* Unmap buffer to make it accessible to the CPU */
+ dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ /* Get info from the header */
+ rx = (struct bgmac_rx_header *)skb->data;
+ len = le16_to_cpu(rx->len);
+ flags = le16_to_cpu(rx->flags);
+
+ /* Check for poison and drop or pass the packet */
+ if (len == 0xdead && flags == 0xbeef) {
+ bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
+ } else {
+ new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
+ if (new_skb) {
+ skb_put(new_skb, len);
+ skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
+ new_skb->data,
+ len);
+ new_skb->protocol =
+ eth_type_trans(new_skb, bgmac->net_dev);
+ netif_receive_skb(new_skb);
+ handled++;
+ } else {
+ bgmac->net_dev->stats.rx_dropped++;
+ bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
+ }
+
+ /* Poison the old skb */
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+ }
+
+ /* Make it back accessible to the hardware */
+ dma_sync_single_for_device(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+
+ if (++ring->start >= BGMAC_RX_RING_SLOTS)
+ ring->start = 0;
+
+ if (handled >= weight) /* Should never be greater */
+ break;
+ }
+
+ return handled;
+}
+
+/* Does ring support unaligned addressing? */
+static bool bgmac_dma_unaligned(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring,
+ enum bgmac_dma_ring_type ring_type)
+{
+ switch (ring_type) {
+ case BGMAC_DMA_RING_TX:
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+ 0xff0);
+ if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
+ return true;
+ break;
+ case BGMAC_DMA_RING_RX:
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+ 0xff0);
+ if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static void bgmac_dma_ring_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot;
+ int size;
+ int i;
+
+ for (i = 0; i < ring->num_slots; i++) {
+ slot = &ring->slots[i];
+ if (slot->skb) {
+ if (slot->dma_addr)
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ slot->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ }
+ }
+
+ if (ring->cpu_base) {
+ /* Free ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ dma_free_coherent(dma_dev, size, ring->cpu_base,
+ ring->dma_base);
+ }
+}
+
+static void bgmac_dma_free(struct bgmac *bgmac)
+{
+ int i;
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+ bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+ bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
+}
+
+static int bgmac_dma_alloc(struct bgmac *bgmac)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_dma_ring *ring;
+ static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
+ BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
+ int size; /* ring size: different for Tx and Rx */
+ int err;
+ int i;
+
+ BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
+ BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
+
+ if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
+ bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ ring = &bgmac->tx_ring[i];
+ ring->num_slots = BGMAC_TX_RING_SLOTS;
+ ring->mmio_base = ring_base[i];
+ if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
+ bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+ ring->mmio_base);
+
+ /* Alloc ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
+ if (!ring->cpu_base) {
+ bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
+ ring->mmio_base);
+ goto err_dma_free;
+ }
+ if (ring->dma_base & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ /* No need to alloc TX slots yet */
+ }
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+ ring->num_slots = BGMAC_RX_RING_SLOTS;
+ ring->mmio_base = ring_base[i];
+ if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
+ bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
+ ring->mmio_base);
+
+ /* Alloc ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
+ &ring->dma_base,
+ GFP_KERNEL);
+ if (!ring->cpu_base) {
+ bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
+ ring->mmio_base);
+ err = -ENOMEM;
+ goto err_dma_free;
+ }
+ if (ring->dma_base & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ /* Alloc RX slots */
+ for (i = 0; i < ring->num_slots; i++) {
+ err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]);
+ if (err) {
+ bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
+ goto err_dma_free;
+ }
+ }
+ }
+
+ return 0;
+
+err_dma_free:
+ bgmac_dma_free(bgmac);
+ return -ENOMEM;
+}
+
+static void bgmac_dma_init(struct bgmac *bgmac)
+{
+ struct bgmac_dma_ring *ring;
+ struct bgmac_dma_desc *dma_desc;
+ u32 ctl0, ctl1;
+ int i;
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ ring = &bgmac->tx_ring[i];
+
+ /* We don't implement unaligned addressing, so enable first */
+ bgmac_dma_tx_enable(bgmac, ring);
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
+ lower_32_bits(ring->dma_base));
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
+ upper_32_bits(ring->dma_base));
+
+ ring->start = 0;
+ ring->end = 0; /* Points the slot that should *not* be read */
+ }
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+
+ /* We don't implement unaligned addressing, so enable first */
+ bgmac_dma_rx_enable(bgmac, ring);
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
+ lower_32_bits(ring->dma_base));
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
+ upper_32_bits(ring->dma_base));
+
+ for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots;
+ i++, dma_desc++) {
+ ctl0 = ctl1 = 0;
+
+ if (i == ring->num_slots - 1)
+ ctl0 |= BGMAC_DESC_CTL0_EOT;
+ ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+ /* Is there any BGMAC device that requires extension? */
+ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+ * B43_DMA64_DCTL1_ADDREXT_MASK;
+ */
+
+ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr));
+ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr));
+ dma_desc->ctl0 = cpu_to_le32(ctl0);
+ dma_desc->ctl1 = cpu_to_le32(ctl1);
+ }
+
+ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->num_slots * sizeof(struct bgmac_dma_desc));
+
+ ring->start = 0;
+ ring->end = 0;
+ }
+}
+
+/**************************************************
+ * PHY ops
+ **************************************************/
+
+static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
+ BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
+ BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
+ BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
+ BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bgmac->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ tmp = BGMAC_PA_START;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+ bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return 0xffff;
+ }
+
+ return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
+static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bgmac->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ bgmac_warn(bgmac, "Error setting MDIO int\n");
+
+ tmp = BGMAC_PA_START;
+ tmp |= BGMAC_PA_WRITE;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ tmp |= value;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
+ bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
+static void bgmac_phy_force(struct bgmac *bgmac)
+{
+ u16 ctl;
+ u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
+ BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
+
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ if (bgmac->autoneg)
+ return;
+
+ ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
+ ctl &= mask;
+ if (bgmac->full_duplex)
+ ctl |= BGMAC_PHY_CTL_DUPLEX;
+ if (bgmac->speed == BGMAC_SPEED_100)
+ ctl |= BGMAC_PHY_CTL_SPEED_100;
+ else if (bgmac->speed == BGMAC_SPEED_1000)
+ ctl |= BGMAC_PHY_CTL_SPEED_1000;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
+static void bgmac_phy_advertise(struct bgmac *bgmac)
+{
+ u16 adv;
+
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ if (!bgmac->autoneg)
+ return;
+
+ /* Adv selected 10/100 speeds */
+ adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
+ adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
+ BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
+ adv |= BGMAC_PHY_ADV_10HALF;
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
+ adv |= BGMAC_PHY_ADV_100HALF;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
+ adv |= BGMAC_PHY_ADV_10FULL;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
+ adv |= BGMAC_PHY_ADV_100FULL;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
+
+ /* Adv selected 1000 speeds */
+ adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
+ adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
+ if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
+ adv |= BGMAC_PHY_ADV2_1000HALF;
+ if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
+ adv |= BGMAC_PHY_ADV2_1000FULL;
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
+
+ /* Restart */
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
+ bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
+ BGMAC_PHY_CTL_RESTART);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
+static void bgmac_phy_init(struct bgmac *bgmac)
+{
+ struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ u8 i;
+
+ if (ci->id == BCMA_CHIP_ID_BCM5356) {
+ for (i = 0; i < 5; i++) {
+ bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
+ bgmac_phy_write(bgmac, i, 0x15, 0x0100);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ }
+ }
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
+ bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
+ bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
+ for (i = 0; i < 5; i++) {
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x16, 0x5284);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ bgmac_phy_write(bgmac, i, 0x17, 0x0010);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
+ bgmac_phy_write(bgmac, i, 0x16, 0x5296);
+ bgmac_phy_write(bgmac, i, 0x17, 0x1073);
+ bgmac_phy_write(bgmac, i, 0x17, 0x9073);
+ bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
+ bgmac_phy_write(bgmac, i, 0x17, 0x9273);
+ bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
+static void bgmac_phy_reset(struct bgmac *bgmac)
+{
+ if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
+ return;
+
+ bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
+ BGMAC_PHY_CTL_RESET);
+ udelay(100);
+ if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
+ BGMAC_PHY_CTL_RESET)
+ bgmac_err(bgmac, "PHY reset failed\n");
+ bgmac_phy_init(bgmac);
+}
+
+/**************************************************
+ * Chip ops
+ **************************************************/
+
+/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
+ * nothing to change? Try if after stabilizng driver.
+ */
+static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
+{
+ u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 new_val = (cmdcfg & mask) | set;
+
+ bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
+ udelay(2);
+
+ if (new_val != cmdcfg || force)
+ bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
+ udelay(2);
+}
+
+static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+{
+ u32 tmp;
+
+ tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
+ bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ tmp = (addr[4] << 8) | addr[5];
+ bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+}
+
+static void bgmac_set_rx_mode(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ if (net_dev->flags & IFF_PROMISC)
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ else
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+}
+
+#if 0 /* We don't use that regs yet */
+static void bgmac_chip_stats_update(struct bgmac *bgmac)
+{
+ int i;
+
+ if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
+ for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+ bgmac->mib_tx_regs[i] =
+ bgmac_read(bgmac,
+ BGMAC_TX_GOOD_OCTETS + (i * 4));
+ for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+ bgmac->mib_rx_regs[i] =
+ bgmac_read(bgmac,
+ BGMAC_RX_GOOD_OCTETS + (i * 4));
+ }
+
+ /* TODO: what else? how to handle BCM4706? Specs are needed */
+}
+#endif
+
+static void bgmac_clear_mib(struct bgmac *bgmac)
+{
+ int i;
+
+ if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ return;
+
+ bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
+ for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
+ bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
+ for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
+ bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
+static void bgmac_speed(struct bgmac *bgmac, int speed)
+{
+ u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 set = 0;
+
+ if (speed & BGMAC_SPEED_10)
+ set |= BGMAC_CMDCFG_ES_10;
+ if (speed & BGMAC_SPEED_100)
+ set |= BGMAC_CMDCFG_ES_100;
+ if (speed & BGMAC_SPEED_1000)
+ set |= BGMAC_CMDCFG_ES_1000;
+ if (!bgmac->full_duplex)
+ set |= BGMAC_CMDCFG_HD;
+ bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+}
+
+static void bgmac_miiconfig(struct bgmac *bgmac)
+{
+ u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
+ BGMAC_DS_MM_SHIFT;
+ if (imode == 0 || imode == 1) {
+ if (bgmac->autoneg)
+ bgmac_speed(bgmac, BGMAC_SPEED_100);
+ else
+ bgmac_speed(bgmac, bgmac->speed);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
+static void bgmac_chip_reset(struct bgmac *bgmac)
+{
+ struct bcma_device *core = bgmac->core;
+ struct bcma_bus *bus = core->bus;
+ struct bcma_chipinfo *ci = &bus->chipinfo;
+ u32 flags = 0;
+ u32 iost;
+ int i;
+
+ if (bcma_core_is_enabled(core)) {
+ if (!bgmac->stats_grabbed) {
+ /* bgmac_chip_stats_update(bgmac); */
+ bgmac->stats_grabbed = true;
+ }
+
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
+ bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
+
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ udelay(1);
+
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
+ bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
+
+ /* TODO: Clear software multicast filter list */
+ }
+
+ iost = bcma_aread32(core, BCMA_IOST);
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
+ iost &= ~BGMAC_BCMA_IOST_ATTACHED;
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED) {
+ flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
+ if (!bgmac->has_robosw)
+ flags |= BGMAC_BCMA_IOCTL_SW_RESET;
+ }
+
+ bcma_core_enable(core, flags);
+
+ if (core->id.rev > 2) {
+ bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
+ bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
+ 1000);
+ }
+
+ if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
+ ci->id == BCMA_CHIP_ID_BCM53572) {
+ struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ u8 et_swtype = 0;
+ u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
+ BGMAC_CHIPCTL_1_IF_TYPE_RMII;
+ char buf[2];
+
+ if (nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (kstrtou8(buf, 0, &et_swtype))
+ bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
+ buf);
+ et_swtype &= 0x0f;
+ et_swtype <<= 4;
+ sw_type = et_swtype;
+ } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
+ sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
+ } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
+ sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
+ BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
+ }
+ bcma_chipco_chipctl_maskset(cc, 1,
+ ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
+ BGMAC_CHIPCTL_1_SW_TYPE_MASK),
+ sw_type);
+ }
+
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ bcma_awrite32(core, BCMA_IOCTL,
+ bcma_aread32(core, BCMA_IOCTL) &
+ ~BGMAC_BCMA_IOCTL_SW_RESET);
+
+ /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
+ * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
+ * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * be keps until taking MAC out of the reset.
+ */
+ bgmac_cmdcfg_maskset(bgmac,
+ ~(BGMAC_CMDCFG_TE |
+ BGMAC_CMDCFG_RE |
+ BGMAC_CMDCFG_RPI |
+ BGMAC_CMDCFG_TAI |
+ BGMAC_CMDCFG_HD |
+ BGMAC_CMDCFG_ML |
+ BGMAC_CMDCFG_CFE |
+ BGMAC_CMDCFG_RL |
+ BGMAC_CMDCFG_RED |
+ BGMAC_CMDCFG_PE |
+ BGMAC_CMDCFG_TPI |
+ BGMAC_CMDCFG_PAD_EN |
+ BGMAC_CMDCFG_PF),
+ BGMAC_CMDCFG_PROM |
+ BGMAC_CMDCFG_NLC |
+ BGMAC_CMDCFG_CFE |
+ BGMAC_CMDCFG_SR,
+ false);
+
+ bgmac_clear_mib(bgmac);
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
+ BCMA_GMAC_CMN_PC_MTE);
+ else
+ bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
+ bgmac_miiconfig(bgmac);
+ bgmac_phy_init(bgmac);
+
+ bgmac->int_status = 0;
+}
+
+static void bgmac_chip_intrs_on(struct bgmac *bgmac)
+{
+ bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
+}
+
+static void bgmac_chip_intrs_off(struct bgmac *bgmac)
+{
+ bgmac_write(bgmac, BGMAC_INT_MASK, 0);
+ bgmac_read(bgmac, BGMAC_INT_MASK);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
+static void bgmac_enable(struct bgmac *bgmac)
+{
+ struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ u32 cmdcfg;
+ u32 mode;
+ u32 rxq_ctl;
+ u32 fl_ctl;
+ u16 bp_clk;
+ u8 mdp;
+
+ cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
+ BGMAC_CMDCFG_SR, true);
+ udelay(2);
+ cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
+ bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+
+ mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
+ BGMAC_DS_MM_SHIFT;
+ if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
+ bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
+ if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
+ bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
+ BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
+
+ switch (ci->id) {
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM47162:
+ fl_ctl = 0x03cb04cb;
+ if (ci->id == BCMA_CHIP_ID_BCM5357 ||
+ ci->id == BCMA_CHIP_ID_BCM4749 ||
+ ci->id == BCMA_CHIP_ID_BCM53572)
+ fl_ctl = 0x2300e1;
+ bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
+ bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ break;
+ }
+
+ rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
+ rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
+ bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
+ mdp = (bp_clk * 128 / 1000) - 3;
+ rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
+ bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
+static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
+{
+ struct bgmac_dma_ring *ring;
+ int i;
+
+ /* 1 interrupt per received frame */
+ bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
+
+ /* Enable 802.3x tx flow control (honor received PAUSE frames) */
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+
+ bgmac_set_rx_mode(bgmac->net_dev);
+
+ bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
+
+ if (bgmac->loopback)
+ bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ else
+ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+
+ bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+
+ if (!bgmac->autoneg) {
+ bgmac_speed(bgmac, bgmac->speed);
+ bgmac_phy_force(bgmac);
+ } else if (bgmac->speed) { /* if there is anything to adv */
+ bgmac_phy_advertise(bgmac);
+ }
+
+ if (full_init) {
+ bgmac_dma_init(bgmac);
+ if (1) /* FIXME: is there any case we don't want IRQs? */
+ bgmac_chip_intrs_on(bgmac);
+ } else {
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ ring = &bgmac->rx_ring[i];
+ bgmac_dma_rx_enable(bgmac, ring);
+ }
+ }
+
+ bgmac_enable(bgmac);
+}
+
+static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
+{
+ struct bgmac *bgmac = netdev_priv(dev_id);
+
+ u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
+ int_status &= bgmac->int_mask;
+
+ if (!int_status)
+ return IRQ_NONE;
+
+ /* Ack */
+ bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
+
+ /* Disable new interrupts until handling existing ones */
+ bgmac_chip_intrs_off(bgmac);
+
+ bgmac->int_status = int_status;
+
+ napi_schedule(&bgmac->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int bgmac_poll(struct napi_struct *napi, int weight)
+{
+ struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
+ struct bgmac_dma_ring *ring;
+ int handled = 0;
+
+ if (bgmac->int_status & BGMAC_IS_TX0) {
+ ring = &bgmac->tx_ring[0];
+ bgmac_dma_tx_free(bgmac, ring);
+ bgmac->int_status &= ~BGMAC_IS_TX0;
+ }
+
+ if (bgmac->int_status & BGMAC_IS_RX) {
+ ring = &bgmac->rx_ring[0];
+ handled += bgmac_dma_rx_read(bgmac, ring, weight);
+ bgmac->int_status &= ~BGMAC_IS_RX;
+ }
+
+ if (bgmac->int_status) {
+ bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
+ bgmac->int_status = 0;
+ }
+
+ if (handled < weight)
+ napi_complete(napi);
+
+ bgmac_chip_intrs_on(bgmac);
+
+ return handled;
+}
+
+/**************************************************
+ * net_device_ops
+ **************************************************/
+
+static int bgmac_open(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ int err = 0;
+
+ bgmac_chip_reset(bgmac);
+ /* Specs say about reclaiming rings here, but we do that in DMA init */
+ bgmac_chip_init(bgmac, true);
+
+ err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, net_dev);
+ if (err < 0) {
+ bgmac_err(bgmac, "IRQ request error: %d!\n", err);
+ goto err_out;
+ }
+ napi_enable(&bgmac->napi);
+
+ netif_carrier_on(net_dev);
+
+err_out:
+ return err;
+}
+
+static int bgmac_stop(struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ netif_carrier_off(net_dev);
+
+ napi_disable(&bgmac->napi);
+ bgmac_chip_intrs_off(bgmac);
+ free_irq(bgmac->core->irq, net_dev);
+
+ bgmac_chip_reset(bgmac);
+
+ return 0;
+}
+
+static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
+ struct net_device *net_dev)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct bgmac_dma_ring *ring;
+
+ /* No QOS support yet */
+ ring = &bgmac->tx_ring[0];
+ return bgmac_dma_tx_add(bgmac, ring, skb);
+}
+
+static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(net_dev, addr);
+ if (ret < 0)
+ return ret;
+ bgmac_write_mac_address(bgmac, (u8 *)addr);
+ eth_commit_mac_addr_change(net_dev, addr);
+ return 0;
+}
+
+static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = bgmac->phyaddr;
+ /* fallthru */
+ case SIOCGMIIREG:
+ if (!netif_running(net_dev))
+ return -EAGAIN;
+ data->val_out = bgmac_phy_read(bgmac, data->phy_id,
+ data->reg_num & 0x1f);
+ return 0;
+ case SIOCSMIIREG:
+ if (!netif_running(net_dev))
+ return -EAGAIN;
+ bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
+ data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops bgmac_netdev_ops = {
+ .ndo_open = bgmac_open,
+ .ndo_stop = bgmac_stop,
+ .ndo_start_xmit = bgmac_start_xmit,
+ .ndo_set_rx_mode = bgmac_set_rx_mode,
+ .ndo_set_mac_address = bgmac_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = bgmac_ioctl,
+};
+
+/**************************************************
+ * ethtool_ops
+ **************************************************/
+
+static int bgmac_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ cmd->supported = SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg;
+
+ if (bgmac->autoneg) {
+ WARN_ON(cmd->advertising);
+ if (bgmac->full_duplex) {
+ if (bgmac->speed & BGMAC_SPEED_10)
+ cmd->advertising |= ADVERTISED_10baseT_Full;
+ if (bgmac->speed & BGMAC_SPEED_100)
+ cmd->advertising |= ADVERTISED_100baseT_Full;
+ if (bgmac->speed & BGMAC_SPEED_1000)
+ cmd->advertising |= ADVERTISED_1000baseT_Full;
+ } else {
+ if (bgmac->speed & BGMAC_SPEED_10)
+ cmd->advertising |= ADVERTISED_10baseT_Half;
+ if (bgmac->speed & BGMAC_SPEED_100)
+ cmd->advertising |= ADVERTISED_100baseT_Half;
+ if (bgmac->speed & BGMAC_SPEED_1000)
+ cmd->advertising |= ADVERTISED_1000baseT_Half;
+ }
+ } else {
+ switch (bgmac->speed) {
+ case BGMAC_SPEED_10:
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+ break;
+ case BGMAC_SPEED_100:
+ ethtool_cmd_speed_set(cmd, SPEED_100);
+ break;
+ case BGMAC_SPEED_1000:
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
+ break;
+ }
+ }
+
+ cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+
+ cmd->autoneg = bgmac->autoneg;
+
+ return 0;
+}
+
+#if 0
+static int bgmac_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bgmac *bgmac = netdev_priv(net_dev);
+
+ return -1;
+}
+#endif
+
+static void bgmac_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
+}
+
+static const struct ethtool_ops bgmac_ethtool_ops = {
+ .get_settings = bgmac_get_settings,
+ .get_drvinfo = bgmac_get_drvinfo,
+};
+
+/**************************************************
+ * BCMA bus ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
+static int bgmac_probe(struct bcma_device *core)
+{
+ struct net_device *net_dev;
+ struct bgmac *bgmac;
+ struct ssb_sprom *sprom = &core->bus->sprom;
+ u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
+ int err;
+
+ /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
+ if (core->core_unit > 1) {
+ pr_err("Unsupported core_unit %d\n", core->core_unit);
+ return -ENOTSUPP;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
+ eth_random_addr(mac);
+ dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
+ }
+
+ /* Allocation and references */
+ net_dev = alloc_etherdev(sizeof(*bgmac));
+ if (!net_dev)
+ return -ENOMEM;
+ net_dev->netdev_ops = &bgmac_netdev_ops;
+ net_dev->irq = core->irq;
+ SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+ bgmac = netdev_priv(net_dev);
+ bgmac->net_dev = net_dev;
+ bgmac->core = core;
+ bcma_set_drvdata(core, bgmac);
+
+ /* Defaults */
+ bgmac->autoneg = true;
+ bgmac->full_duplex = true;
+ bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
+ memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
+
+ /* On BCM4706 we need common core to access PHY */
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+ !core->bus->drv_gmac_cmn.core) {
+ bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
+ err = -ENODEV;
+ goto err_netdev_free;
+ }
+ bgmac->cmn = core->bus->drv_gmac_cmn.core;
+
+ bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
+ sprom->et0phyaddr;
+ bgmac->phyaddr &= BGMAC_PHY_MASK;
+ if (bgmac->phyaddr == BGMAC_PHY_MASK) {
+ bgmac_err(bgmac, "No PHY found\n");
+ err = -ENODEV;
+ goto err_netdev_free;
+ }
+ bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
+ bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+ bgmac_err(bgmac, "PCI setup not implemented\n");
+ err = -ENOTSUPP;
+ goto err_netdev_free;
+ }
+
+ bgmac_chip_reset(bgmac);
+
+ err = bgmac_dma_alloc(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
+ goto err_netdev_free;
+ }
+
+ bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
+ if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
+ bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
+
+ /* TODO: reset the external phy. Specs are needed */
+ bgmac_phy_reset(bgmac);
+
+ bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
+ BGMAC_BFL_ENETROBO);
+ if (bgmac->has_robosw)
+ bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
+
+ if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+
+ err = register_netdev(bgmac->net_dev);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register net device\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
+ netif_carrier_off(net_dev);
+
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
+ return 0;
+
+err_dma_free:
+ bgmac_dma_free(bgmac);
+
+err_netdev_free:
+ bcma_set_drvdata(core, NULL);
+ free_netdev(net_dev);
+
+ return err;
+}
+
+static void bgmac_remove(struct bcma_device *core)
+{
+ struct bgmac *bgmac = bcma_get_drvdata(core);
+
+ netif_napi_del(&bgmac->napi);
+ unregister_netdev(bgmac->net_dev);
+ bgmac_dma_free(bgmac);
+ bcma_set_drvdata(core, NULL);
+ free_netdev(bgmac->net_dev);
+}
+
+static struct bcma_driver bgmac_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bgmac_bcma_tbl,
+ .probe = bgmac_probe,
+ .remove = bgmac_remove,
+};
+
+static int __init bgmac_init(void)
+{
+ int err;
+
+ err = bcma_driver_register(&bgmac_bcma_driver);
+ if (err)
+ return err;
+ pr_info("Broadcom 47xx GBit MAC driver loaded\n");
+
+ return 0;
+}
+
+static void __exit bgmac_exit(void)
+{
+ bcma_driver_unregister(&bgmac_bcma_driver);
+}
+
+module_init(bgmac_init)
+module_exit(bgmac_exit)
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
new file mode 100644
index 00000000000..4ede614c81f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -0,0 +1,453 @@
+#ifndef _BGMAC_H
+#define _BGMAC_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define bgmac_err(bgmac, fmt, ...) \
+ dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_warn(bgmac, fmt, ...) \
+ dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_info(bgmac, fmt, ...) \
+ dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+#define bgmac_dbg(bgmac, fmt, ...) \
+ dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
+
+#include <linux/bcma/bcma.h>
+#include <linux/netdevice.h>
+
+#define BGMAC_DEV_CTL 0x000
+#define BGMAC_DC_TSM 0x00000002
+#define BGMAC_DC_CFCO 0x00000004
+#define BGMAC_DC_RLSS 0x00000008
+#define BGMAC_DC_MROR 0x00000010
+#define BGMAC_DC_FCM_MASK 0x00000060
+#define BGMAC_DC_FCM_SHIFT 5
+#define BGMAC_DC_NAE 0x00000080
+#define BGMAC_DC_TF 0x00000100
+#define BGMAC_DC_RDS_MASK 0x00030000
+#define BGMAC_DC_RDS_SHIFT 16
+#define BGMAC_DC_TDS_MASK 0x000c0000
+#define BGMAC_DC_TDS_SHIFT 18
+#define BGMAC_DEV_STATUS 0x004 /* Configuration of the interface */
+#define BGMAC_DS_RBF 0x00000001
+#define BGMAC_DS_RDF 0x00000002
+#define BGMAC_DS_RIF 0x00000004
+#define BGMAC_DS_TBF 0x00000008
+#define BGMAC_DS_TDF 0x00000010
+#define BGMAC_DS_TIF 0x00000020
+#define BGMAC_DS_PO 0x00000040
+#define BGMAC_DS_MM_MASK 0x00000300 /* Mode of the interface */
+#define BGMAC_DS_MM_SHIFT 8
+#define BGMAC_BIST_STATUS 0x00c
+#define BGMAC_INT_STATUS 0x020 /* Interrupt status */
+#define BGMAC_IS_MRO 0x00000001
+#define BGMAC_IS_MTO 0x00000002
+#define BGMAC_IS_TFD 0x00000004
+#define BGMAC_IS_LS 0x00000008
+#define BGMAC_IS_MDIO 0x00000010
+#define BGMAC_IS_MR 0x00000020
+#define BGMAC_IS_MT 0x00000040
+#define BGMAC_IS_TO 0x00000080
+#define BGMAC_IS_DESC_ERR 0x00000400 /* Descriptor error */
+#define BGMAC_IS_DATA_ERR 0x00000800 /* Data error */
+#define BGMAC_IS_DESC_PROT_ERR 0x00001000 /* Descriptor protocol error */
+#define BGMAC_IS_RX_DESC_UNDERF 0x00002000 /* Receive descriptor underflow */
+#define BGMAC_IS_RX_F_OVERF 0x00004000 /* Receive FIFO overflow */
+#define BGMAC_IS_TX_F_UNDERF 0x00008000 /* Transmit FIFO underflow */
+#define BGMAC_IS_RX 0x00010000 /* Interrupt for RX queue 0 */
+#define BGMAC_IS_TX0 0x01000000 /* Interrupt for TX queue 0 */
+#define BGMAC_IS_TX1 0x02000000 /* Interrupt for TX queue 1 */
+#define BGMAC_IS_TX2 0x04000000 /* Interrupt for TX queue 2 */
+#define BGMAC_IS_TX3 0x08000000 /* Interrupt for TX queue 3 */
+#define BGMAC_IS_TX_MASK 0x0f000000
+#define BGMAC_IS_INTMASK 0x0f01fcff
+#define BGMAC_IS_ERRMASK 0x0000fc00
+#define BGMAC_INT_MASK 0x024 /* Interrupt mask */
+#define BGMAC_GP_TIMER 0x028
+#define BGMAC_INT_RECV_LAZY 0x100
+#define BGMAC_IRL_TO_MASK 0x00ffffff
+#define BGMAC_IRL_FC_MASK 0xff000000
+#define BGMAC_IRL_FC_SHIFT 24 /* Shift the number of interrupts triggered per received frame */
+#define BGMAC_FLOW_CTL_THRESH 0x104 /* Flow control thresholds */
+#define BGMAC_WRRTHRESH 0x108
+#define BGMAC_GMAC_IDLE_CNT_THRESH 0x10c
+#define BGMAC_PHY_ACCESS 0x180 /* PHY access address */
+#define BGMAC_PA_DATA_MASK 0x0000ffff
+#define BGMAC_PA_ADDR_MASK 0x001f0000
+#define BGMAC_PA_ADDR_SHIFT 16
+#define BGMAC_PA_REG_MASK 0x1f000000
+#define BGMAC_PA_REG_SHIFT 24
+#define BGMAC_PA_WRITE 0x20000000
+#define BGMAC_PA_START 0x40000000
+#define BGMAC_PHY_CNTL 0x188 /* PHY control address */
+#define BGMAC_PC_EPA_MASK 0x0000001f
+#define BGMAC_PC_MCT_MASK 0x007f0000
+#define BGMAC_PC_MCT_SHIFT 16
+#define BGMAC_PC_MTE 0x00800000
+#define BGMAC_TXQ_CTL 0x18c
+#define BGMAC_TXQ_CTL_DBT_MASK 0x00000fff
+#define BGMAC_TXQ_CTL_DBT_SHIFT 0
+#define BGMAC_RXQ_CTL 0x190
+#define BGMAC_RXQ_CTL_DBT_MASK 0x00000fff
+#define BGMAC_RXQ_CTL_DBT_SHIFT 0
+#define BGMAC_RXQ_CTL_PTE 0x00001000
+#define BGMAC_RXQ_CTL_MDP_MASK 0x3f000000
+#define BGMAC_RXQ_CTL_MDP_SHIFT 24
+#define BGMAC_GPIO_SELECT 0x194
+#define BGMAC_GPIO_OUTPUT_EN 0x198
+/* For 0x1e0 see BCMA_CLKCTLST */
+#define BGMAC_HW_WAR 0x1e4
+#define BGMAC_PWR_CTL 0x1e8
+#define BGMAC_DMA_BASE0 0x200 /* Tx and Rx controller */
+#define BGMAC_DMA_BASE1 0x240 /* Tx controller only */
+#define BGMAC_DMA_BASE2 0x280 /* Tx controller only */
+#define BGMAC_DMA_BASE3 0x2C0 /* Tx controller only */
+#define BGMAC_TX_GOOD_OCTETS 0x300
+#define BGMAC_TX_GOOD_OCTETS_HIGH 0x304
+#define BGMAC_TX_GOOD_PKTS 0x308
+#define BGMAC_TX_OCTETS 0x30c
+#define BGMAC_TX_OCTETS_HIGH 0x310
+#define BGMAC_TX_PKTS 0x314
+#define BGMAC_TX_BROADCAST_PKTS 0x318
+#define BGMAC_TX_MULTICAST_PKTS 0x31c
+#define BGMAC_TX_LEN_64 0x320
+#define BGMAC_TX_LEN_65_TO_127 0x324
+#define BGMAC_TX_LEN_128_TO_255 0x328
+#define BGMAC_TX_LEN_256_TO_511 0x32c
+#define BGMAC_TX_LEN_512_TO_1023 0x330
+#define BGMAC_TX_LEN_1024_TO_1522 0x334
+#define BGMAC_TX_LEN_1523_TO_2047 0x338
+#define BGMAC_TX_LEN_2048_TO_4095 0x33c
+#define BGMAC_TX_LEN_4095_TO_8191 0x340
+#define BGMAC_TX_LEN_8192_TO_MAX 0x344
+#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
+#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
+#define BGMAC_TX_FRAGMENT_PKTS 0x350
+#define BGMAC_TX_UNDERRUNS 0x354 /* Error */
+#define BGMAC_TX_TOTAL_COLS 0x358
+#define BGMAC_TX_SINGLE_COLS 0x35c
+#define BGMAC_TX_MULTIPLE_COLS 0x360
+#define BGMAC_TX_EXCESSIVE_COLS 0x364 /* Error */
+#define BGMAC_TX_LATE_COLS 0x368 /* Error */
+#define BGMAC_TX_DEFERED 0x36c
+#define BGMAC_TX_CARRIER_LOST 0x370
+#define BGMAC_TX_PAUSE_PKTS 0x374
+#define BGMAC_TX_UNI_PKTS 0x378
+#define BGMAC_TX_Q0_PKTS 0x37c
+#define BGMAC_TX_Q0_OCTETS 0x380
+#define BGMAC_TX_Q0_OCTETS_HIGH 0x384
+#define BGMAC_TX_Q1_PKTS 0x388
+#define BGMAC_TX_Q1_OCTETS 0x38c
+#define BGMAC_TX_Q1_OCTETS_HIGH 0x390
+#define BGMAC_TX_Q2_PKTS 0x394
+#define BGMAC_TX_Q2_OCTETS 0x398
+#define BGMAC_TX_Q2_OCTETS_HIGH 0x39c
+#define BGMAC_TX_Q3_PKTS 0x3a0
+#define BGMAC_TX_Q3_OCTETS 0x3a4
+#define BGMAC_TX_Q3_OCTETS_HIGH 0x3a8
+#define BGMAC_RX_GOOD_OCTETS 0x3b0
+#define BGMAC_RX_GOOD_OCTETS_HIGH 0x3b4
+#define BGMAC_RX_GOOD_PKTS 0x3b8
+#define BGMAC_RX_OCTETS 0x3bc
+#define BGMAC_RX_OCTETS_HIGH 0x3c0
+#define BGMAC_RX_PKTS 0x3c4
+#define BGMAC_RX_BROADCAST_PKTS 0x3c8
+#define BGMAC_RX_MULTICAST_PKTS 0x3cc
+#define BGMAC_RX_LEN_64 0x3d0
+#define BGMAC_RX_LEN_65_TO_127 0x3d4
+#define BGMAC_RX_LEN_128_TO_255 0x3d8
+#define BGMAC_RX_LEN_256_TO_511 0x3dc
+#define BGMAC_RX_LEN_512_TO_1023 0x3e0
+#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
+#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
+#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
+#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
+#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
+#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
+#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
+#define BGMAC_RX_FRAGMENT_PKTS 0x400
+#define BGMAC_RX_MISSED_PKTS 0x404 /* Error */
+#define BGMAC_RX_CRC_ALIGN_ERRS 0x408 /* Error */
+#define BGMAC_RX_UNDERSIZE 0x40c /* Error */
+#define BGMAC_RX_CRC_ERRS 0x410 /* Error */
+#define BGMAC_RX_ALIGN_ERRS 0x414 /* Error */
+#define BGMAC_RX_SYMBOL_ERRS 0x418 /* Error */
+#define BGMAC_RX_PAUSE_PKTS 0x41c
+#define BGMAC_RX_NONPAUSE_PKTS 0x420
+#define BGMAC_RX_SACHANGES 0x424
+#define BGMAC_RX_UNI_PKTS 0x428
+#define BGMAC_UNIMAC_VERSION 0x800
+#define BGMAC_HDBKP_CTL 0x804
+#define BGMAC_CMDCFG 0x808 /* Configuration */
+#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
+#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
+#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
+#define BGMAC_CMDCFG_ES_10 0x00000000
+#define BGMAC_CMDCFG_ES_100 0x00000004
+#define BGMAC_CMDCFG_ES_1000 0x00000008
+#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
+#define BGMAC_CMDCFG_PAD_EN 0x00000020
+#define BGMAC_CMDCFG_CF 0x00000040
+#define BGMAC_CMDCFG_PF 0x00000080
+#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
+#define BGMAC_CMDCFG_TAI 0x00000200
+#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
+#define BGMAC_CMDCFG_HD_SHIFT 10
+#define BGMAC_CMDCFG_SR 0x00000800 /* Set to reset mode */
+#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
+#define BGMAC_CMDCFG_AE 0x00400000
+#define BGMAC_CMDCFG_CFE 0x00800000
+#define BGMAC_CMDCFG_NLC 0x01000000
+#define BGMAC_CMDCFG_RL 0x02000000
+#define BGMAC_CMDCFG_RED 0x04000000
+#define BGMAC_CMDCFG_PE 0x08000000
+#define BGMAC_CMDCFG_TPI 0x10000000
+#define BGMAC_CMDCFG_AT 0x20000000
+#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
+#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
+#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
+#define BGMAC_PAUSEQUANTA 0x818
+#define BGMAC_MAC_MODE 0x844
+#define BGMAC_OUTERTAG 0x848
+#define BGMAC_INNERTAG 0x84c
+#define BGMAC_TXIPG 0x85c
+#define BGMAC_PAUSE_CTL 0xb30
+#define BGMAC_TX_FLUSH 0xb34
+#define BGMAC_RX_STATUS 0xb38
+#define BGMAC_TX_STATUS 0xb3c
+
+#define BGMAC_PHY_CTL 0x00
+#define BGMAC_PHY_CTL_SPEED_MSB 0x0040
+#define BGMAC_PHY_CTL_DUPLEX 0x0100 /* duplex mode */
+#define BGMAC_PHY_CTL_RESTART 0x0200 /* restart autonegotiation */
+#define BGMAC_PHY_CTL_ANENAB 0x1000 /* enable autonegotiation */
+#define BGMAC_PHY_CTL_SPEED 0x2000
+#define BGMAC_PHY_CTL_LOOP 0x4000 /* loopback */
+#define BGMAC_PHY_CTL_RESET 0x8000 /* reset */
+/* Helpers */
+#define BGMAC_PHY_CTL_SPEED_10 0
+#define BGMAC_PHY_CTL_SPEED_100 BGMAC_PHY_CTL_SPEED
+#define BGMAC_PHY_CTL_SPEED_1000 BGMAC_PHY_CTL_SPEED_MSB
+#define BGMAC_PHY_ADV 0x04
+#define BGMAC_PHY_ADV_10HALF 0x0020 /* advertise 10MBits/s half duplex */
+#define BGMAC_PHY_ADV_10FULL 0x0040 /* advertise 10MBits/s full duplex */
+#define BGMAC_PHY_ADV_100HALF 0x0080 /* advertise 100MBits/s half duplex */
+#define BGMAC_PHY_ADV_100FULL 0x0100 /* advertise 100MBits/s full duplex */
+#define BGMAC_PHY_ADV2 0x09
+#define BGMAC_PHY_ADV2_1000HALF 0x0100 /* advertise 1000MBits/s half duplex */
+#define BGMAC_PHY_ADV2_1000FULL 0x0200 /* advertise 1000MBits/s full duplex */
+
+/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
+#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
+#define BGMAC_BCMA_IOCTL_SW_RESET 0x00000008 /* PHY Reset */
+
+/* BCMA GMAC core specific IO status (BCMA_IOST) flags */
+#define BGMAC_BCMA_IOST_ATTACHED 0x00000800
+
+#define BGMAC_NUM_MIB_TX_REGS \
+ (((BGMAC_TX_Q3_OCTETS_HIGH - BGMAC_TX_GOOD_OCTETS) / 4) + 1)
+#define BGMAC_NUM_MIB_RX_REGS \
+ (((BGMAC_RX_UNI_PKTS - BGMAC_RX_GOOD_OCTETS) / 4) + 1)
+
+#define BGMAC_DMA_TX_CTL 0x00
+#define BGMAC_DMA_TX_ENABLE 0x00000001
+#define BGMAC_DMA_TX_SUSPEND 0x00000002
+#define BGMAC_DMA_TX_LOOPBACK 0x00000004
+#define BGMAC_DMA_TX_FLUSH 0x00000010
+#define BGMAC_DMA_TX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_TX_ADDREXT_MASK 0x00030000
+#define BGMAC_DMA_TX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_TX_INDEX 0x04
+#define BGMAC_DMA_TX_RINGLO 0x08
+#define BGMAC_DMA_TX_RINGHI 0x0C
+#define BGMAC_DMA_TX_STATUS 0x10
+#define BGMAC_DMA_TX_STATDPTR 0x00001FFF
+#define BGMAC_DMA_TX_STAT 0xF0000000
+#define BGMAC_DMA_TX_STAT_DISABLED 0x00000000
+#define BGMAC_DMA_TX_STAT_ACTIVE 0x10000000
+#define BGMAC_DMA_TX_STAT_IDLEWAIT 0x20000000
+#define BGMAC_DMA_TX_STAT_STOPPED 0x30000000
+#define BGMAC_DMA_TX_STAT_SUSP 0x40000000
+#define BGMAC_DMA_TX_ERROR 0x14
+#define BGMAC_DMA_TX_ERRDPTR 0x0001FFFF
+#define BGMAC_DMA_TX_ERR 0xF0000000
+#define BGMAC_DMA_TX_ERR_NOERR 0x00000000
+#define BGMAC_DMA_TX_ERR_PROT 0x10000000
+#define BGMAC_DMA_TX_ERR_UNDERRUN 0x20000000
+#define BGMAC_DMA_TX_ERR_TRANSFER 0x30000000
+#define BGMAC_DMA_TX_ERR_DESCREAD 0x40000000
+#define BGMAC_DMA_TX_ERR_CORE 0x50000000
+#define BGMAC_DMA_RX_CTL 0x20
+#define BGMAC_DMA_RX_ENABLE 0x00000001
+#define BGMAC_DMA_RX_FRAME_OFFSET_MASK 0x000000FE
+#define BGMAC_DMA_RX_FRAME_OFFSET_SHIFT 1
+#define BGMAC_DMA_RX_DIRECT_FIFO 0x00000100
+#define BGMAC_DMA_RX_OVERFLOW_CONT 0x00000400
+#define BGMAC_DMA_RX_PARITY_DISABLE 0x00000800
+#define BGMAC_DMA_RX_ADDREXT_MASK 0x00030000
+#define BGMAC_DMA_RX_ADDREXT_SHIFT 16
+#define BGMAC_DMA_RX_INDEX 0x24
+#define BGMAC_DMA_RX_RINGLO 0x28
+#define BGMAC_DMA_RX_RINGHI 0x2C
+#define BGMAC_DMA_RX_STATUS 0x30
+#define BGMAC_DMA_RX_STATDPTR 0x00001FFF
+#define BGMAC_DMA_RX_STAT 0xF0000000
+#define BGMAC_DMA_RX_STAT_DISABLED 0x00000000
+#define BGMAC_DMA_RX_STAT_ACTIVE 0x10000000
+#define BGMAC_DMA_RX_STAT_IDLEWAIT 0x20000000
+#define BGMAC_DMA_RX_STAT_STOPPED 0x30000000
+#define BGMAC_DMA_RX_STAT_SUSP 0x40000000
+#define BGMAC_DMA_RX_ERROR 0x34
+#define BGMAC_DMA_RX_ERRDPTR 0x0001FFFF
+#define BGMAC_DMA_RX_ERR 0xF0000000
+#define BGMAC_DMA_RX_ERR_NOERR 0x00000000
+#define BGMAC_DMA_RX_ERR_PROT 0x10000000
+#define BGMAC_DMA_RX_ERR_UNDERRUN 0x20000000
+#define BGMAC_DMA_RX_ERR_TRANSFER 0x30000000
+#define BGMAC_DMA_RX_ERR_DESCREAD 0x40000000
+#define BGMAC_DMA_RX_ERR_CORE 0x50000000
+
+#define BGMAC_DESC_CTL0_EOT 0x10000000 /* End of ring */
+#define BGMAC_DESC_CTL0_IOC 0x20000000 /* IRQ on complete */
+#define BGMAC_DESC_CTL0_SOF 0x40000000 /* Start of frame */
+#define BGMAC_DESC_CTL0_EOF 0x80000000 /* End of frame */
+#define BGMAC_DESC_CTL1_LEN 0x00001FFF
+
+#define BGMAC_PHY_NOREGS 0x1E
+#define BGMAC_PHY_MASK 0x1F
+
+#define BGMAC_MAX_TX_RINGS 4
+#define BGMAC_MAX_RX_RINGS 1
+
+#define BGMAC_TX_RING_SLOTS 128
+#define BGMAC_RX_RING_SLOTS 512 - 1 /* Why -1? Well, Broadcom does that... */
+
+#define BGMAC_RX_HEADER_LEN 28 /* Last 24 bytes are unused. Well... */
+#define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
+#define BGMAC_RX_MAX_FRAME_SIZE 1536 /* Copied from b44/tg3 */
+#define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
+
+#define BGMAC_BFL_ENETROBO 0x0010 /* has ephy roboswitch spi */
+#define BGMAC_BFL_ENETADM 0x0080 /* has ADMtek switch */
+#define BGMAC_BFL_ENETVLAN 0x0100 /* can do vlan */
+
+#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
+#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
+#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
+#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYMII 0x00000040
+#define BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII 0x00000080
+#define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0
+#define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000
+
+#define BGMAC_SPEED_10 0x0001
+#define BGMAC_SPEED_100 0x0002
+#define BGMAC_SPEED_1000 0x0004
+
+#define BGMAC_WEIGHT 64
+
+#define ETHER_MAX_LEN 1518
+
+struct bgmac_slot_info {
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+};
+
+struct bgmac_dma_desc {
+ __le32 ctl0;
+ __le32 ctl1;
+ __le32 addr_low;
+ __le32 addr_high;
+} __packed;
+
+enum bgmac_dma_ring_type {
+ BGMAC_DMA_RING_TX,
+ BGMAC_DMA_RING_RX,
+};
+
+/**
+ * bgmac_dma_ring - contains info about DMA ring (either TX or RX one)
+ * @start: index of the first slot containing data
+ * @end: index of a slot that can *not* be read (yet)
+ *
+ * Be really aware of the specific @end meaning. It's an index of a slot *after*
+ * the one containing data that can be read. If @start equals @end the ring is
+ * empty.
+ */
+struct bgmac_dma_ring {
+ u16 num_slots;
+ u16 start;
+ u16 end;
+
+ u16 mmio_base;
+ struct bgmac_dma_desc *cpu_base;
+ dma_addr_t dma_base;
+
+ struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
+};
+
+struct bgmac_rx_header {
+ __le16 len;
+ __le16 flags;
+ __le16 pad[12];
+};
+
+struct bgmac {
+ struct bcma_device *core;
+ struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
+ struct net_device *net_dev;
+ struct napi_struct napi;
+
+ /* DMA */
+ struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
+ struct bgmac_dma_ring rx_ring[BGMAC_MAX_RX_RINGS];
+
+ /* Stats */
+ bool stats_grabbed;
+ u32 mib_tx_regs[BGMAC_NUM_MIB_TX_REGS];
+ u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS];
+
+ /* Int */
+ u32 int_mask;
+ u32 int_status;
+
+ /* Speed-related */
+ int speed;
+ bool autoneg;
+ bool full_duplex;
+
+ u8 phyaddr;
+ bool has_robosw;
+
+ bool loopback;
+};
+
+static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bcma_read32(bgmac->core, offset);
+}
+
+static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bcma_write32(bgmac->core, offset, value);
+}
+
+static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask,
+ u32 set)
+{
+ bgmac_write(bgmac, offset, (bgmac_read(bgmac, offset) & mask) | set);
+}
+
+static inline void bgmac_mask(struct bgmac *bgmac, u16 offset, u32 mask)
+{
+ bgmac_maskset(bgmac, offset, mask, 0);
+}
+
+static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
+{
+ bgmac_maskset(bgmac, offset, ~0, set);
+}
+
+#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a1adfaf87f4..2f0ba8f2fd6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8543,7 +8543,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
memcpy(dev->dev_addr, bp->mac_addr, 6);
- memcpy(dev->perm_addr, bp->mac_addr, 6);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
index 48fbdd48f88..116762daae0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/Makefile
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -4,4 +4,5 @@
obj-$(CONFIG_BNX2X) += bnx2x.o
-bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e8d4db10c8f..e4605a96508 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,9 +13,12 @@
#ifndef BNX2X_H
#define BNX2X_H
+
+#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/dma-mapping.h>
#include <linux/types.h>
+#include <linux/pci_regs.h>
/* compilation time flags */
@@ -23,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.00-0"
-#define DRV_MODULE_RELDATE "2012/09/27"
+#define DRV_MODULE_VERSION "1.78.02-0"
+#define DRV_MODULE_RELDATE "2013/01/14"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -48,6 +51,13 @@
#include "bnx2x_sp.h"
#include "bnx2x_dcb.h"
#include "bnx2x_stats.h"
+#include "bnx2x_vfpf.h"
+
+enum bnx2x_int_mode {
+ BNX2X_INT_MODE_MSIX,
+ BNX2X_INT_MODE_INTX,
+ BNX2X_INT_MODE_MSI
+};
/* error/debug prints */
@@ -112,29 +122,29 @@ do { \
dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int);
#ifdef BNX2X_STOP_ON_ERROR
-void bnx2x_int_disable(struct bnx2x *bp);
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_int_disable(bp); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, true); \
} while (0)
#else
#define bnx2x_panic() \
do { \
bp->panic = 1; \
BNX2X_ERR("driver assert\n"); \
- bnx2x_panic_dump(bp); \
+ bnx2x_panic_dump(bp, false); \
} while (0)
#endif
#define bnx2x_mc_addr(ha) ((ha)->addr)
#define bnx2x_uc_addr(ha) ((ha)->addr)
-#define U64_LO(x) (u32)(((u64)(x)) & 0xffffffff)
-#define U64_HI(x) (u32)(((u64)(x)) >> 32)
+#define U64_LO(x) ((u32)(((u64)(x)) & 0xffffffff))
+#define U64_HI(x) ((u32)(((u64)(x)) >> 32))
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
@@ -334,6 +344,9 @@ union db_prod {
#define SGE_PAGE_SIZE PAGE_SIZE
#define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
+#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
+ SGE_PAGES), 0xffff)
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
@@ -789,48 +802,63 @@ struct bnx2x_common {
#define CHIP_NUM_57711E 0x1650
#define CHIP_NUM_57712 0x1662
#define CHIP_NUM_57712_MF 0x1663
+#define CHIP_NUM_57712_VF 0x166f
#define CHIP_NUM_57713 0x1651
#define CHIP_NUM_57713E 0x1652
#define CHIP_NUM_57800 0x168a
#define CHIP_NUM_57800_MF 0x16a5
+#define CHIP_NUM_57800_VF 0x16a9
#define CHIP_NUM_57810 0x168e
#define CHIP_NUM_57810_MF 0x16ae
+#define CHIP_NUM_57810_VF 0x16af
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57811_VF 0x163f
+#define CHIP_NUM_57840_OBSOLETE 0x168d
#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
#define CHIP_NUM_57840_4_10 0x16a1
#define CHIP_NUM_57840_2_20 0x16a2
#define CHIP_NUM_57840_MF 0x16a4
+#define CHIP_NUM_57840_VF 0x16ad
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
#define CHIP_IS_57712(bp) (CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_VF)
#define CHIP_IS_57712_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57712_MF)
#define CHIP_IS_57800(bp) (CHIP_NUM(bp) == CHIP_NUM_57800)
#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57800_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_VF)
#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57810_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_VF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
+#define CHIP_IS_57811_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_VF)
#define CHIP_IS_57840(bp) \
((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
(CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
+#define CHIP_IS_57840_VF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_VF)
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
- CHIP_IS_57712_MF(bp))
+ CHIP_IS_57712_MF(bp) || \
+ CHIP_IS_57712_VF(bp))
#define CHIP_IS_E3(bp) (CHIP_IS_57800(bp) || \
CHIP_IS_57800_MF(bp) || \
+ CHIP_IS_57800_VF(bp) || \
CHIP_IS_57810(bp) || \
CHIP_IS_57810_MF(bp) || \
+ CHIP_IS_57810_VF(bp) || \
CHIP_IS_57811(bp) || \
CHIP_IS_57811_MF(bp) || \
+ CHIP_IS_57811_VF(bp) || \
CHIP_IS_57840(bp) || \
- CHIP_IS_57840_MF(bp))
+ CHIP_IS_57840_MF(bp) || \
+ CHIP_IS_57840_VF(bp))
#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
#define USES_WARPCORE(bp) (CHIP_IS_E3(bp))
#define IS_E1H_OFFSET (!CHIP_IS_E1(bp))
@@ -954,6 +982,11 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
+#define BNX2X_VF_CID_WND 0
+#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
+#define BNX2X_CLIENTS_PER_VF 1
+#define BNX2X_FIRST_VF_CID 256
+#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
#define BNX2X_VF_ID_INVALID 0xFF
/*
@@ -1104,6 +1137,7 @@ struct hw_context {
/* forward */
struct bnx2x_ilt;
+struct bnx2x_vfdb;
enum bnx2x_recovery_state {
BNX2X_RECOVERY_DONE,
@@ -1165,19 +1199,22 @@ struct bnx2x_fw_stats_req {
};
struct bnx2x_fw_stats_data {
- struct stats_counter storm_counters;
- struct per_port_stats port;
- struct per_pf_stats pf;
+ struct stats_counter storm_counters;
+ struct per_port_stats port;
+ struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[1];
};
/* Public slow path states */
enum {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
- BNX2X_SP_RTNL_AFEX_F_UPDATE,
BNX2X_SP_RTNL_FAN_FAILURE,
+ BNX2X_SP_RTNL_AFEX_F_UPDATE,
+ BNX2X_SP_RTNL_ENABLE_SRIOV,
+ BNX2X_SP_RTNL_VFPF_MCAST,
+ BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
};
@@ -1231,6 +1268,21 @@ struct bnx2x {
(vn) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2 : 1))
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
+#ifdef CONFIG_BNX2X_SRIOV
+ /* vf pf channel mailbox contains request and response buffers */
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ dma_addr_t vf2pf_mbox_mapping;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* bulletin board for messages from pf to vf */
+ union pf_vf_bulletin *pf2vf_bulletin;
+ dma_addr_t pf2vf_bulletin_mapping;
+
+ struct pf_vf_bulletin_content old_bulletin;
+#endif /* CONFIG_BNX2X_SRIOV */
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1295,8 +1347,6 @@ struct bnx2x {
__le16 *eq_cons_sb;
atomic_t eq_spq_left; /* COMMON_XXX ramrods credit */
-
-
/* Counter for marking that there is a STAT_QUERY ramrod pending */
u16 stats_pending;
/* Counter for completed statistics ramrods */
@@ -1318,8 +1368,6 @@ struct bnx2x {
#define DISABLE_MSI_FLAG (1 << 7)
#define TPA_ENABLE_FLAG (1 << 8)
#define NO_MCP_FLAG (1 << 9)
-
-#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
@@ -1330,6 +1378,17 @@ struct bnx2x {
#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
+#define IS_VF_FLAG (1 << 22)
+
+#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
+
+#ifdef CONFIG_BNX2X_SRIOV
+#define IS_VF(bp) ((bp)->flags & IS_VF_FLAG)
+#define IS_PF(bp) (!((bp)->flags & IS_VF_FLAG))
+#else
+#define IS_VF(bp) false
+#define IS_PF(bp) true
+#endif
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1349,6 +1408,7 @@ struct bnx2x {
int mrrs;
struct delayed_work sp_task;
+ atomic_t interrupt_occurred;
struct delayed_work sp_rtnl_task;
struct delayed_work period_task;
@@ -1432,6 +1492,7 @@ struct bnx2x {
u8 igu_sb_cnt;
u8 min_msix_vec_cnt;
+ u32 igu_base_addr;
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1580,6 +1641,9 @@ struct bnx2x {
char fw_ver[32];
const struct firmware *firmware;
+ struct bnx2x_vfdb *vfdb;
+#define IS_SRIOV(bp) ((bp)->vfdb)
+
/* DCB support on/off */
u16 dcb_state;
#define BNX2X_DCB_STATE_OFF 0
@@ -1599,6 +1663,10 @@ struct bnx2x {
int dcb_version;
/* CAM credit pools */
+
+ /* used only in sriov */
+ struct bnx2x_credit_pool_obj vlans_pool;
+
struct bnx2x_credit_pool_obj macs_pool;
/* RX_MODE object */
@@ -1636,6 +1704,9 @@ struct bnx2x {
/* priority to cos mapping */
u8 prio_to_cos[8];
+
+ int fp_array_size;
+ u32 dump_preset_idx;
};
/* Tx queues may be less or equal to Rx queues */
@@ -1813,12 +1884,16 @@ int bnx2x_del_all_macs(struct bnx2x *bp,
/* Init Function API */
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+ u8 vf_valid, int fw_sb_id, int igu_sb_id);
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp);
int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
void bnx2x_read_mf_cfg(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
@@ -1830,6 +1905,18 @@ u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
bool with_comp, u8 comp_type);
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+ u8 src_type, u8 dst_type);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl);
+
+/* FLR related routines */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count);
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt);
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev);
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
@@ -1854,6 +1941,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
return val;
}
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
+ bool is_pf);
+
#define BNX2X_ILT_ZALLOC(x, y, size) \
do { \
x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
@@ -1990,10 +2080,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
-
#define STROM_ASSERT_ARRAY_SIZE 50
-
/* must be used on a CID before placing it on a HW ring */
#define HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
(BP_VN(bp) << BNX2X_SWCID_SHIFT) | \
@@ -2024,7 +2112,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
/* Memory of fairness algorithm . 2 cycles */
#define FAIR_MEM 2
-
#define ATTN_NIG_FOR_FUNC (1L << 8)
#define ATTN_SW_TIMER_4_FUNC (1L << 9)
#define GPIO_2_FUNC (1L << 10)
@@ -2067,6 +2154,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+ AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT | \
AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
#define HW_PRTY_ASSERT_SET_0 (AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
@@ -2128,7 +2216,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MULTI_MASK 0x7f
-
#define DEF_USB_FUNC_OFF offsetof(struct cstorm_def_status_block_u, func)
#define DEF_CSB_FUNC_OFF offsetof(struct cstorm_def_status_block_c, func)
#define DEF_XSB_FUNC_OFF offsetof(struct xstorm_def_status_block, func)
@@ -2156,18 +2243,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_ETH_DEF_CONS])
-#define SET_FLAG(value, mask, flag) \
- do {\
- (value) &= ~(mask);\
- (value) |= ((flag) << (mask##_SHIFT));\
- } while (0)
-
-#define GET_FLAG(value, mask) \
- (((value) & (mask)) >> (mask##_SHIFT))
-
-#define GET_FIELD(value, fname) \
- (((value) & (fname##_MASK)) >> (fname##_SHIFT))
-
#define CAM_IS_INVALID(x) \
(GET_FLAG(x.flags, \
MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
@@ -2178,7 +2253,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
-
#ifndef PXP2_REG_PXP2_INT_STS
#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
#endif
@@ -2190,9 +2264,16 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+#define VF_ACQUIRE_MC_FILTERS 10
+
+#define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
+ (!((me_reg) & ME_REG_VF_ERR)))
+int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code);
/* Congestion management fairness mode */
-#define CMNG_FNS_NONE 0
-#define CMNG_FNS_MINMAX 1
+#define CMNG_FNS_NONE 0
+#define CMNG_FNS_MINMAX 1
#define HC_SEG_ACCESS_DEF 0 /*Driver decision 0-3*/
#define HC_SEG_ACCESS_ATTN 4
@@ -2208,7 +2289,6 @@ static const u32 dmae_reg_go_c[] = {
void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
-
#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
@@ -2229,6 +2309,18 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
(BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
+#define SET_FLAG(value, mask, flag) \
+ do {\
+ (value) &= ~(mask);\
+ (value) |= ((flag) << (mask##_SHIFT));\
+ } while (0)
+
+#define GET_FLAG(value, mask) \
+ (((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+ (((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
enum {
SWITCH_UPDATE,
AFEX_UPDATE,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a5edac8df67..ecac04a3687 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
+#include <net/tcp.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
@@ -28,8 +29,6 @@
#include "bnx2x_init.h"
#include "bnx2x_sp.h"
-
-
/**
* bnx2x_move_fp - move content of the fastpath structure.
*
@@ -87,6 +86,34 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
}
/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string.
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
+{
+ if (IS_PF(bp)) {
+ u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+ phy_fw_ver[0] = '\0';
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ phy_fw_ver, PHY_FW_VER_LEN);
+ strlcpy(buf, bp->fw_ver, buf_len);
+ snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+ "bc %d.%d.%d%s%s",
+ (bp->common.bc_ver & 0xff0000) >> 16,
+ (bp->common.bc_ver & 0xff00) >> 8,
+ (bp->common.bc_ver & 0xff),
+ ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ } else {
+ bnx2x_vf_fill_fw_str(bp, buf, buf_len);
+ }
+}
+
+/**
* bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
*
* @bp: driver handle
@@ -210,7 +237,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
- &pkts_compl, &bytes_compl);
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
@@ -316,14 +343,14 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
-/* Set Toeplitz hash value in the skb using the value from the
+/* Get Toeplitz hash value in the skb using the value from the
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
const struct eth_fast_path_rx_cqe *cqe,
bool *l4_rxhash)
{
- /* Set Toeplitz hash from CQE */
+ /* Get Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
(cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
enum eth_rss_hash_type htype;
@@ -390,8 +417,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
- tpa_info->full_page =
- SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
tpa_info->gro_size = gro_size;
}
@@ -412,31 +438,34 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
*/
#define TPA_TSTAMP_OPT_LEN 12
/**
- * bnx2x_set_lro_mss - calculate the approximate value of the MSS
+ * bnx2x_set_gro_params - compute GRO values
*
- * @bp: driver handle
+ * @skb: packet skb
* @parsing_flags: parsing flags from the START CQE
* @len_on_bd: total length of the first packet for the
* aggregation.
+ * @pkt_len: length of all segments
*
* Approximate value of the MSS for this aggregation calculated using
* the first packet of it.
+ * Compute number of aggregated segments, and gso_type.
*/
-static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
- u16 len_on_bd)
+static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
+ u16 len_on_bd, unsigned int pkt_len)
{
- /*
- * TPA arrgregation won't have either IP options or TCP options
+ /* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
*/
u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6)
+ PRS_FLAG_OVERETH_IPV6) {
hdrs_len += sizeof(struct ipv6hdr);
- else /* IPv4 */
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
hdrs_len += sizeof(struct iphdr);
-
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
/* Check if there was a TCP timestamp, if there is it's will
* always be 12 bytes length: nop nop kind length echo val.
@@ -446,7 +475,13 @@ static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
hdrs_len += TPA_TSTAMP_OPT_LEN;
- return len_on_bd - hdrs_len;
+ skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
+ skb_shinfo(skb)->gso_size);
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -463,7 +498,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
}
mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
BNX2X_ERR("Can't map sge\n");
@@ -500,20 +535,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
/* This is needed in order to enable forwarding support */
- if (frag_size) {
- skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
- tpa_info->parsing_flags, len_on_bd);
-
- skb_shinfo(skb)->gso_type =
- (GET_FLAG(tpa_info->parsing_flags,
- PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
- PRS_FLAG_OVERETH_IPV6) ?
- SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
- }
-
+ if (frag_size)
+ bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
+ le16_to_cpu(cqe->pkt_len));
#ifdef BNX2X_STOP_ON_ERROR
- if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+ if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx);
BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
@@ -531,8 +558,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (fp->mode == TPA_MODE_GRO)
frag_len = min_t(u32, frag_size, (u32)full_page);
else /* LRO */
- frag_len = min_t(u32, frag_size,
- (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+ frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
@@ -548,7 +574,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap the page as we r going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -566,7 +592,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
skb->data_len += frag_len;
- skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
+ skb->truesize += SGE_PAGES;
skb->len += frag_len;
frag_size -= frag_len;
@@ -591,6 +617,54 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}
+#ifdef CONFIG_INET
+static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+}
+
+static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+}
+#endif
+
+static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ skb_set_network_header(skb, 0);
+ switch (be16_to_cpu(skb->protocol)) {
+ case ETH_P_IP:
+ bnx2x_gro_ip_csum(bp, skb);
+ break;
+ case ETH_P_IPV6:
+ bnx2x_gro_ipv6_csum(bp, skb);
+ break;
+ default:
+ BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ be16_to_cpu(skb->protocol));
+ }
+ tcp_gro_complete(skb);
+ }
+#endif
+ napi_gro_receive(&fp->napi, skb);
+}
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_agg_info *tpa_info,
@@ -645,7 +719,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
- napi_gro_receive(&fp->napi, skb);
+ bnx2x_gro_receive(bp, fp, skb);
} else {
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate new pages - dropping packet!\n");
@@ -1087,7 +1161,7 @@ void __bnx2x_link_report(struct bnx2x *bp)
struct bnx2x_link_report_data cur_data;
/* reread mf_cfg */
- if (!CHIP_IS_E1(bp))
+ if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
/* Read the current link report info */
@@ -1429,10 +1503,14 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
if (nvecs == offset)
return;
- free_irq(bp->msix_table[offset].vector, bp->dev);
- DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
- bp->msix_table[offset].vector);
- offset++;
+
+ /* VFs don't have a default SB */
+ if (IS_PF(bp)) {
+ free_irq(bp->msix_table[offset].vector, bp->dev);
+ DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+ bp->msix_table[offset].vector);
+ offset++;
+ }
if (CNIC_SUPPORT(bp)) {
if (nvecs == offset)
@@ -1453,21 +1531,30 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
void bnx2x_free_irq(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG &&
- !(bp->flags & USING_SINGLE_MSIX_FLAG))
- bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
- CNIC_SUPPORT(bp) + 1);
- else
+ !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
+ int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
+
+ /* vfs don't have a default status block */
+ if (IS_PF(bp))
+ nvecs++;
+
+ bnx2x_free_msix_irqs(bp, nvecs);
+ } else {
free_irq(bp->dev->irq, bp->dev);
+ }
}
int bnx2x_enable_msix(struct bnx2x *bp)
{
- int msix_vec = 0, i, rc, req_cnt;
+ int msix_vec = 0, i, rc;
- bp->msix_table[msix_vec].entry = msix_vec;
- BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
- bp->msix_table[0].entry);
- msix_vec++;
+ /* VFs don't have a default status block */
+ if (IS_PF(bp)) {
+ bp->msix_table[msix_vec].entry = msix_vec;
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
+ bp->msix_table[0].entry);
+ msix_vec++;
+ }
/* Cnic requires an msix vector for itself */
if (CNIC_SUPPORT(bp)) {
@@ -1485,9 +1572,10 @@ int bnx2x_enable_msix(struct bnx2x *bp)
msix_vec++;
}
- req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp) + 1;
+ DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
+ msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
+ rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
/*
* reconfigure number of tx/rx queues according to available
@@ -1495,7 +1583,7 @@ int bnx2x_enable_msix(struct bnx2x *bp)
*/
if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
/* how less vectors we will have? */
- int diff = req_cnt - rc;
+ int diff = msix_vec - rc;
BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
@@ -1549,12 +1637,15 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
{
int i, rc, offset = 0;
- rc = request_irq(bp->msix_table[offset++].vector,
- bnx2x_msix_sp_int, 0,
- bp->dev->name, bp->dev);
- if (rc) {
- BNX2X_ERR("request sp irq failed\n");
- return -EBUSY;
+ /* no default status block for vf */
+ if (IS_PF(bp)) {
+ rc = request_irq(bp->msix_table[offset++].vector,
+ bnx2x_msix_sp_int, 0,
+ bp->dev->name, bp->dev);
+ if (rc) {
+ BNX2X_ERR("request sp irq failed\n");
+ return -EBUSY;
+ }
}
if (CNIC_SUPPORT(bp))
@@ -1578,12 +1669,20 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_ETH_QUEUES(bp);
- offset = 1 + CNIC_SUPPORT(bp);
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
- bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
-
+ if (IS_PF(bp)) {
+ offset = 1 + CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ } else {
+ offset = CNIC_SUPPORT(bp);
+ netdev_info(bp->dev,
+ "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
+ }
return 0;
}
@@ -1628,7 +1727,6 @@ static int bnx2x_setup_irqs(struct bnx2x *bp)
if (rc)
return rc;
} else {
- bnx2x_ack_int(bp);
rc = bnx2x_req_irq(bp);
if (rc) {
BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
@@ -1726,7 +1824,6 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
}
-
void bnx2x_set_num_queues(struct bnx2x *bp)
{
/* RSS queues */
@@ -1991,27 +2088,212 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
} while (0)
#endif /*BNX2X_STOP_ON_ERROR*/
-bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
+static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
+{
+ BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ return;
+}
+
+static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
{
- /* build FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ int num_groups, vf_headroom = 0;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
+
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
+
+ /* Total number of FW statistics requests =
+ * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
+ * and fcoe l2 queue) stats + num of queues (which includes another 1
+ * for fcoe l2 queue if applicable)
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
+
+ /* vf stats appear in the request list, but their data is allocated by
+ * the VFs themselves. We don't include them in the bp->fw_stats_num as
+ * it is used to determine where to place the vf stats queries in the
+ * request struct
+ */
+ if (IS_SRIOV(bp))
+ vf_headroom = bnx2x_vf_headroom(bp);
- /* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ /* Request is built from stats_query_header and an array of
+ * stats_query_cmd_group each of which contains
+ * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+ * configured in the stats_query_header.
+ */
+ num_groups =
+ (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
+ 1 : 0));
+
+ DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
+ bp->fw_stats_num, vf_headroom, num_groups);
+ bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+ num_groups * sizeof(struct stats_query_cmd_group);
+
+ /* Data for statistics requests + stats_counter
+ * stats_counter holds per-STORM counters that are incremented
+ * when STORM has finished with the current request.
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
+ * VF stats are not accounted for here as the data of VF stats is stored
+ * in memory allocated by the VF, not here.
+ */
+ bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+ sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
+ sizeof(struct per_queue_stats) * num_queue_stats +
+ sizeof(struct stats_counter);
+
+ BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+ /* Set shortcuts */
+ bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+ bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+ bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+ ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+ bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+ bp->fw_stats_req_sz;
+
+ DP(BNX2X_MSG_SP, "statistics request base address set to %x %x",
+ U64_HI(bp->fw_stats_req_mapping),
+ U64_LO(bp->fw_stats_req_mapping));
+ DP(BNX2X_MSG_SP, "statistics data base address set to %x %x",
+ U64_HI(bp->fw_stats_data_mapping),
+ U64_LO(bp->fw_stats_data_mapping));
+ return 0;
- DP(NETIF_MSG_IFUP, "loaded fw %x, my fw %x\n", loaded_fw, my_fw);
+alloc_mem_err:
+ bnx2x_free_fw_stats_mem(bp);
+ BNX2X_ERR("Can't allocate FW stats memory\n");
+ return -ENOMEM;
+}
+
+/* send load request to mcp and analyze response */
+static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
+{
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
+ /* load request */
+ (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
+ DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
+
+ /* if mcp fails to respond we must abort */
+ if (!(*load_code)) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ return -EBUSY;
+ }
+
+ /* If mcp refused (e.g. other port is in diagnostic mode) we
+ * must abort
+ */
+ if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("MCP refused load request, aborting\n");
+ return -EBUSY;
+ }
+ return 0;
+}
- if (loaded_fw != my_fw) {
- if (is_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded, which mismatches my %x FW. aborting\n",
+/* check whether another PF has already loaded FW to chip. In
+ * virtualized environments a pf from another VM may have already
+ * initialized the device including loading FW
+ */
+int bnx2x_nic_load_analyze_req(struct bnx2x *bp, u32 load_code)
+{
+ /* is another pf loaded on this engine? */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ /* build my FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. aborting\n",
loaded_fw, my_fw);
- return false;
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* returns the "mcp load_code" according to global load_count array */
+static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
+{
+ int path = BP_PATH(bp);
+
+ DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ load_count[path][0]++;
+ load_count[path][1 + port]++;
+ DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
+ path, load_count[path][0], load_count[path][1],
+ load_count[path][2]);
+ if (load_count[path][0] == 1)
+ return FW_MSG_CODE_DRV_LOAD_COMMON;
+ else if (load_count[path][1 + port] == 1)
+ return FW_MSG_CODE_DRV_LOAD_PORT;
+ else
+ return FW_MSG_CODE_DRV_LOAD_FUNCTION;
+}
+
+/* mark PMF if applicable */
+static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
+{
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+ bp->port.pmf = 1;
+ /* We need the barrier to ensure the ordering between the
+ * writing to bp->port.pmf here and reading it from the
+ * bnx2x_periodic_task().
+ */
+ smp_mb();
+ } else {
+ bp->port.pmf = 0;
+ }
+
+ DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+}
+
+static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
+{
+ if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+ (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+ (bp->common.shmem2_base)) {
+ if (SHMEM2_HAS(bp, dcc_support))
+ SHMEM2_WR(bp, dcc_support,
+ (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+ SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+ if (SHMEM2_HAS(bp, afex_driver_support))
+ SHMEM2_WR(bp, afex_driver_support,
+ SHMEM_AFEX_SUPPORTED_VERSION_ONE);
}
- return true;
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
}
/**
@@ -2026,49 +2308,15 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
int cos;
struct napi_struct orig_napi = fp->napi;
struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init) {
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
- } else {
- /* Keep Queue statistics */
- struct bnx2x_eth_q_stats *tmp_eth_q_stats;
- struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
-
- tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
- GFP_KERNEL);
- if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
-
- tmp_eth_q_stats_old =
- kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
- GFP_KERNEL);
- if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
-
- memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
- memset(fp, 0, sizeof(*fp));
-
- if (tmp_eth_q_stats) {
- memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
- kfree(tmp_eth_q_stats);
- }
-
- if (tmp_eth_q_stats_old) {
- memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
- sizeof(struct bnx2x_eth_q_stats_old));
- kfree(tmp_eth_q_stats_old);
- }
-
- }
+ if (fp->tpa_info)
+ memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
+ sizeof(struct bnx2x_agg_info));
+ memset(fp, 0, sizeof(*fp));
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
@@ -2114,10 +2362,12 @@ int bnx2x_load_cnic(struct bnx2x *bp)
mutex_init(&bp->cnic_mutex);
- rc = bnx2x_alloc_mem_cnic(bp);
- if (rc) {
- BNX2X_ERR("Unable to allocate bp memory for cnic\n");
- LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem_cnic(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory for cnic\n");
+ LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
+ }
}
rc = bnx2x_alloc_fp_mem_cnic(bp);
@@ -2144,14 +2394,17 @@ int bnx2x_load_cnic(struct bnx2x *bp)
bnx2x_nic_init_cnic(bp);
- /* Enable Timer scan */
- REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+ if (IS_PF(bp)) {
+ /* Enable Timer scan */
+ REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
- for_each_cnic_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ /* setup cnic queues */
+ for_each_cnic_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error_cnic2);
+ }
}
}
@@ -2192,13 +2445,11 @@ load_error_cnic0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-
/* must be called with rtnl_lock */
int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
{
int port = BP_PORT(bp);
- u32 load_code;
- int i, rc;
+ int i, rc = 0, load_code = 0;
DP(NETIF_MSG_IFUP, "Starting NIC load\n");
DP(NETIF_MSG_IFUP,
@@ -2213,15 +2464,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
- /* Set the initial link reported state to link down */
- bnx2x_acquire_phy_lock(bp);
memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&bp->last_reported_link.link_report_flags);
- bnx2x_release_phy_lock(bp);
- /* must be called before memory allocation and HW init */
- bnx2x_ilt_set_info(bp);
+ if (IS_PF(bp))
+ /* must be called before memory allocation and HW init */
+ bnx2x_ilt_set_info(bp);
/*
* Zero fastpath structures preserving invariants like napi, which are
@@ -2240,8 +2489,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
- if (bnx2x_alloc_mem(bp))
- return -ENOMEM;
+ if (IS_PF(bp)) {
+ rc = bnx2x_alloc_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate bp memory\n");
+ return rc;
+ }
+ }
+
+ /* Allocated memory for FW statistics */
+ if (bnx2x_alloc_fw_stats_mem(bp))
+ LOAD_ERROR_EXIT(bp, load_error0);
+
+ /* need to be done after alloc mem, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ rc = bnx2x_alloc_fp_mem(bp);
+ if (rc) {
+ BNX2X_ERR("Unable to allocate memory for fps\n");
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
+
+ /* request pf to initialize status blocks */
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_init(bp);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error0);
+ }
/* As long as bnx2x_alloc_mem() may possibly update
* bp->num_queues, bnx2x_set_real_num_queues() should always
@@ -2264,98 +2538,48 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
DP(NETIF_MSG_IFUP, "napi added\n");
bnx2x_napi_enable(bp);
- /* set pf load just before approaching the MCP */
- bnx2x_set_pf_load(bp);
-
- /* Send LOAD_REQUEST command to MCP
- * Returns the type of LOAD command:
- * if it is the first port to be initialized
- * common blocks should be initialized, otherwise - not
- */
- if (!BP_NOMCP(bp)) {
- /* init fw_seq */
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
-
- /* Get current FW pulse sequence */
- bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
- DRV_PULSE_SEQ_MASK);
- BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
-
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
- DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
- BNX2X_ERR("Driver load refused\n");
- rc = -EBUSY; /* other port in diagnostic mode */
- LOAD_ERROR_EXIT(bp, load_error1);
- }
- if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
- load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* abort nic load if version mismatch */
- if (!bnx2x_test_firmware_version(bp, true)) {
- rc = -EBUSY;
+ if (IS_PF(bp)) {
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
+ /* if mcp exists send load request and analyze response */
+ if (!BP_NOMCP(bp)) {
+ /* attempt to load pf */
+ rc = bnx2x_nic_load_request(bp, &load_code);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error1);
+
+ /* what did mcp say? */
+ rc = bnx2x_nic_load_analyze_req(bp, load_code);
+ if (rc) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
+ } else {
+ load_code = bnx2x_nic_load_no_mcp(bp, port);
}
- } else {
- int path = BP_PATH(bp);
-
- DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- load_count[path][0]++;
- load_count[path][1 + port]++;
- DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
- path, load_count[path][0], load_count[path][1],
- load_count[path][2]);
- if (load_count[path][0] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
- else if (load_count[path][1 + port] == 1)
- load_code = FW_MSG_CODE_DRV_LOAD_PORT;
- else
- load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
- }
-
- if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
- bp->port.pmf = 1;
- /*
- * We need the barrier to ensure the ordering between the
- * writing to bp->port.pmf here and reading it from the
- * bnx2x_periodic_task().
- */
- smp_mb();
- } else
- bp->port.pmf = 0;
-
- DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
+ /* mark pmf if applicable */
+ bnx2x_nic_load_pmf(bp, load_code);
- /* Init Function state controlling object */
- bnx2x__init_func_obj(bp);
+ /* Init Function state controlling object */
+ bnx2x__init_func_obj(bp);
- /* Initialize HW */
- rc = bnx2x_init_hw(bp, load_code);
- if (rc) {
- BNX2X_ERR("HW init failed, aborting\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error2);
+ /* Initialize HW */
+ rc = bnx2x_init_hw(bp, load_code);
+ if (rc) {
+ BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
}
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
- BNX2X_ERR("IRQs setup failed\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ BNX2X_ERR("setup irqs failed\n");
+ if (IS_PF(bp))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
@@ -2363,78 +2587,89 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_nic_init(bp, load_code);
/* Init per-function objects */
- bnx2x_init_bp_objs(bp);
-
- if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
- (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
- (bp->common.shmem2_base)) {
- if (SHMEM2_HAS(bp, dcc_support))
- SHMEM2_WR(bp, dcc_support,
- (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
- SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
- if (SHMEM2_HAS(bp, afex_driver_support))
- SHMEM2_WR(bp, afex_driver_support,
- SHMEM_AFEX_SUPPORTED_VERSION_ONE);
- }
+ if (IS_PF(bp)) {
+ bnx2x_init_bp_objs(bp);
+ bnx2x_iov_nic_init(bp);
+
+ /* Set AFEX default VLAN tag to an invalid value */
+ bp->afex_def_vlan_tag = -1;
+ bnx2x_nic_load_afex_dcc(bp, load_code);
+ bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+ rc = bnx2x_func_start(bp);
+ if (rc) {
+ BNX2X_ERR("Function start failed!\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- /* Set AFEX default VLAN tag to an invalid value */
- bp->afex_def_vlan_tag = -1;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
- bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
- rc = bnx2x_func_start(bp);
- if (rc) {
- BNX2X_ERR("Function start failed!\n");
- bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- LOAD_ERROR_EXIT(bp, load_error3);
- }
+ /* Send LOAD_DONE command to MCP */
+ if (!BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp,
+ DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
- /* Send LOAD_DONE command to MCP */
- if (!BP_NOMCP(bp)) {
- load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
- if (!load_code) {
- BNX2X_ERR("MCP response failure, aborting\n");
- rc = -EBUSY;
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- }
- rc = bnx2x_setup_leading(bp);
- if (rc) {
- BNX2X_ERR("Setup leading failed!\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
- for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+ /* setup rss */
+ rc = bnx2x_init_rss_pf(bp);
if (rc) {
- BNX2X_ERR("Queue setup failed\n");
+ BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- }
- rc = bnx2x_init_rss_pf(bp);
- if (rc) {
- BNX2X_ERR("PF RSS init failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
+ } else { /* vf */
+ for_each_eth_queue(bp, i) {
+ rc = bnx2x_vfpf_setup_q(bp, i);
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
+ }
}
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
/* Configure a ucast MAC */
- rc = bnx2x_set_eth_mac(bp, true);
+ if (IS_PF(bp))
+ rc = bnx2x_set_eth_mac(bp, true);
+ else /* vf */
+ rc = bnx2x_vfpf_set_mac(bp);
if (rc) {
BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error3);
}
- if (bp->pending_max) {
+ if (IS_PF(bp) && bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
bp->pending_max = 0;
}
- if (bp->port.pmf)
- bnx2x_initial_phy_init(bp, load_mode);
+ if (bp->port.pmf) {
+ rc = bnx2x_initial_phy_init(bp, load_mode);
+ if (rc)
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
/* Start fast path */
@@ -2476,8 +2711,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (CNIC_ENABLED(bp))
bnx2x_load_cnic(bp);
- /* mark driver is loaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ /* mark driver is loaded in shmem2 */
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
@@ -2486,7 +2721,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
/* Wait for all pending SP commands to complete */
- if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+ if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
BNX2X_ERR("Timeout waiting for SP elements to complete\n");
bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
return -EBUSY;
@@ -2502,10 +2737,12 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
#ifndef BNX2X_STOP_ON_ERROR
load_error3:
- bnx2x_int_disable_sync(bp, 1);
+ if (IS_PF(bp)) {
+ bnx2x_int_disable_sync(bp, 1);
- /* Clean queueable objects */
- bnx2x_squeeze_objects(bp);
+ /* Clean queueable objects */
+ bnx2x_squeeze_objects(bp);
+ }
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
@@ -2515,7 +2752,7 @@ load_error3:
/* Release IRQs */
bnx2x_free_irq(bp);
load_error2:
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
}
@@ -2523,15 +2760,35 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+
/* clear pf_load status, as it was already set */
- bnx2x_clear_pf_load(bp);
+ if (IS_PF(bp))
+ bnx2x_clear_pf_load(bp);
load_error0:
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_fw_stats_mem(bp);
bnx2x_free_mem(bp);
return rc;
#endif /* ! BNX2X_STOP_ON_ERROR */
}
+static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+{
+ u8 rc = 0, cos, i;
+
+ /* Wait until tx fastpath tasks complete */
+ for_each_tx_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+
+ for_each_cos_in_tx_queue(fp, cos)
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
/* must be called with rtnl_lock */
int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
{
@@ -2541,15 +2798,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
/* mark driver is unloaded in shmem2 */
- if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
u32 val;
val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
}
- if ((bp->state == BNX2X_STATE_CLOSED) ||
- (bp->state == BNX2X_STATE_ERROR)) {
+ if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ (bp->state == BNX2X_STATE_CLOSED ||
+ bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
* during parity error recovery and is either waiting for a
* leader to complete or for other functions to unload and
@@ -2567,8 +2825,16 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
return -EINVAL;
}
- /*
- * It's important to set the bp->state to the value different from
+ /* Nothing to do during unload if previous bnx2x_nic_load()
+ * have not completed succesfully - all resourses are released.
+ *
+ * we can get here only after unsuccessful ndo_* callback, during which
+ * dev->IFF_UP flag is still on.
+ */
+ if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
+ return 0;
+
+ /* It's important to set the bp->state to the value different from
* BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
* may restart the Tx from the NAPI context (see bnx2x_tx_int()).
*/
@@ -2586,16 +2852,24 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
del_timer_sync(&bp->timer);
- /* Set ALWAYS_ALIVE bit in shmem */
- bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
-
- bnx2x_drv_pulse(bp);
+ if (IS_PF(bp)) {
+ /* Set ALWAYS_ALIVE bit in shmem */
+ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+ bnx2x_drv_pulse(bp);
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
+ }
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- bnx2x_save_statistics(bp);
+ /* wait till consumers catch up with producers in all queues */
+ bnx2x_drain_tx_queues(bp);
- /* Cleanup the chip if needed */
- if (unload_mode != UNLOAD_RECOVERY)
+ /* if VF indicate to PF this function is going down (PF will delete sp
+ * elements and clear initializations
+ */
+ if (IS_VF(bp))
+ bnx2x_vfpf_close_vf(bp);
+ else if (unload_mode != UNLOAD_RECOVERY)
+ /* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
else {
/* Send the UNLOAD_REQUEST to the MCP */
@@ -2628,7 +2902,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
* At this stage no more interrupts will arrive so we may safly clean
* the queueable objects here in case they failed to get cleaned so far.
*/
- bnx2x_squeeze_objects(bp);
+ if (IS_PF(bp))
+ bnx2x_squeeze_objects(bp);
/* There should be no more pending SP commands at this stage */
bp->sp_state = 0;
@@ -2642,19 +2917,22 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
for_each_rx_queue(bp, i)
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
- if (CNIC_LOADED(bp)) {
+ bnx2x_free_fp_mem(bp);
+ if (CNIC_LOADED(bp))
bnx2x_free_fp_mem_cnic(bp);
- bnx2x_free_mem_cnic(bp);
- }
- bnx2x_free_mem(bp);
+ if (IS_PF(bp)) {
+ bnx2x_free_mem(bp);
+ if (CNIC_LOADED(bp))
+ bnx2x_free_mem_cnic(bp);
+ }
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
/* Check if there are pending parity attentions. If there are - set
* RECOVERY_IN_PROGRESS.
*/
- if (bnx2x_chk_parity_attn(bp, &global, false)) {
+ if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
bnx2x_set_reset_in_progress(bp);
/* Set RESET_IS_GLOBAL if needed */
@@ -2666,7 +2944,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ if (IS_PF(bp) &&
+ !bnx2x_clear_pf_load(bp) &&
+ bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
@@ -2750,7 +3030,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
-
if (bnx2x_has_rx_work(fp)) {
work_done += bnx2x_rx_int(fp, budget - work_done);
@@ -2849,17 +3128,21 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
return bd_prod;
}
-static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
+#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
+static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
+ __sum16 tsum = (__force __sum16) csum;
+
if (fix > 0)
- csum = (u16) ~csum_fold(csum_sub(csum,
- csum_partial(t_header - fix, fix, 0)));
+ tsum = ~csum_fold(csum_sub((__force __wsum) csum,
+ csum_partial(t_header - fix, fix, 0)));
else if (fix < 0)
- csum = (u16) ~csum_fold(csum_add(csum,
- csum_partial(t_header, -fix, 0)));
+ tsum = ~csum_fold(csum_add((__force __wsum) csum,
+ csum_partial(t_header, -fix, 0)));
- return swab16(csum);
+ return bswab16(csum);
}
static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
@@ -2993,23 +3276,24 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
- pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+ pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
pbd->tcp_flags = pbd_tcp_flags(skb);
if (xmit_type & XMIT_GSO_V4) {
- pbd->ip_id = swab16(ip_hdr(skb)->id);
+ pbd->ip_id = bswab16(ip_hdr(skb)->id);
pbd->tcp_pseudo_csum =
- swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
} else
pbd->tcp_pseudo_csum =
- swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0));
+ bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
- pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+ pbd->global_data |=
+ cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
}
/**
@@ -3023,12 +3307,12 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* 57712 related
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
- ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3036,12 +3320,11 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
- } else
- /* We support checksum offload for TCP and UDP only.
- * No need to pass the UDP header length - it's a constant.
- */
- return skb_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ }
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
@@ -3076,8 +3359,9 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
/* for now NS flag is not used in Linux */
pbd->global_data =
- (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
- ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+ cpu_to_le16(hlen |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd->ip_hlen_w = (skb_transport_header(skb) -
skb_network_header(skb)) >> 1;
@@ -3094,7 +3378,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
hlen = hlen*2;
if (xmit_type & XMIT_CSUM_TCP) {
- pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+ pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
} else {
s8 fix = SKB_CS_OFF(skb); /* signed! */
@@ -3174,17 +3458,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
- netif_tx_stop_queue(txq);
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
DP(NETIF_MSG_TX_QUEUED,
- "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
- ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+ ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
+ skb->len);
eth = (struct ethhdr *)skb->data;
@@ -3265,8 +3550,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le16(vlan_tx_tag_get(skb));
tx_start_bd->bd_flags.as_bitfield |=
(X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
- } else
- tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ } else {
+ /* when transmitting in a vf, start bd must hold the ethertype
+ * for fw to enforce it
+ */
+#ifndef BNX2X_STOP_ON_ERROR
+ if (IS_VF(bp)) {
+#endif
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+#ifndef BNX2X_STOP_ON_ERROR
+ } else {
+ /* used by FW for packet accounting */
+ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
+#endif
+ }
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3282,9 +3581,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
- if (IS_MF_SI(bp)) {
- /*
- * fill in the MAC addresses in the PBD - for local
+
+ if (IS_MF_SI(bp) || IS_VF(bp)) {
+ /* fill in the MAC addresses in the PBD - for local
* switching
*/
bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
@@ -3565,7 +3864,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -3761,6 +4059,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
} else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
+ DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
+
/* Common */
sb = &bnx2x_fp(bp, index, status_blk);
@@ -3907,7 +4207,10 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
- kfree(bp->fp->tpa_info);
+ int i;
+
+ for (i = 0; i < bp->fp_array_size; i++)
+ kfree(bp->fp[i].tpa_info);
kfree(bp->fp);
kfree(bp->sp_objs);
kfree(bp->fp_stats);
@@ -3927,18 +4230,22 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
/*
* The biggest MSI-X table we might need is as a maximum number of fast
- * path IGU SBs plus default SB (for PF).
+ * path IGU SBs plus default SB (for PF only).
*/
- msix_table_size = bp->igu_sb_cnt + 1;
+ msix_table_size = bp->igu_sb_cnt;
+ if (IS_PF(bp))
+ msix_table_size++;
+ BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
/* fp array: RSS plus CNIC related L2 queues */
fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
- BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+ bp->fp_array_size = fp_array_size;
+ BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
- fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
+ fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
- for (i = 0; i < fp_array_size; i++) {
+ for (i = 0; i < bp->fp_array_size; i++) {
fp[i].tpa_info =
kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
sizeof(struct bnx2x_agg_info), GFP_KERNEL);
@@ -3949,13 +4256,13 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
bp->fp = fp;
/* allocate sp objs */
- bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
GFP_KERNEL);
if (!bp->sp_objs)
goto alloc_err;
/* allocate fp_stats */
- bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
GFP_KERNEL);
if (!bp->fp_stats)
goto alloc_err;
@@ -4034,7 +4341,7 @@ int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
{
u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
/*
- * The selected actived PHY is always after swapping (in case PHY
+ * The selected activated PHY is always after swapping (in case PHY
* swapping is enabled). So when swapping is enabled, we need to reverse
* the configuration
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0991534f61d..aee7671ff4c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
#include "bnx2x.h"
+#include "bnx2x_sriov.h"
/* This is used as a replacement for an MCP if it's not present */
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -196,6 +197,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
/* Disable transactions from chip to host */
void bnx2x_pf_disable(struct bnx2x *bp);
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
/**
* bnx2x__link_status_update - handles link status change.
@@ -401,7 +403,7 @@ void bnx2x_set_rx_mode(struct net_device *dev);
* If bp->state is OPEN, should be called with
* netif_addr_lock_bh().
*/
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/**
* bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
@@ -413,11 +415,11 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
* @tx_accept_flags: tx accept configuration (tx switch)
* @ramrod_flags: ramrod configuration
*/
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags);
+int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags);
/* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp);
@@ -477,8 +479,6 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
*/
void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
/* Error handling */
-void bnx2x_panic_dump(struct bnx2x *bp);
-
void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
/* validate currect fw is loaded */
@@ -496,9 +496,44 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp,
+ u16 bd_prod, u16 rx_comp_prod,
+ u16 rx_sge_prod)
+{
+ struct ustorm_eth_rx_producers rx_prods = {0};
+ u32 i;
+
+ /* Update producers */
+ rx_prods.bd_prod = bd_prod;
+ rx_prods.cqe_prod = rx_comp_prod;
+ rx_prods.sge_prod = rx_sge_prod;
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ * This is only applicable for weak-ordered memory model archs such
+ * as IA-64. The following barrier is also mandatory since FW will
+ * assumes BDs must have buffers.
+ */
+ wmb();
+
+ for (i = 0; i < sizeof(rx_prods)/4; i++)
+ REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
+ ((u32 *)&rx_prods)[i]);
+
+ mmiowb(); /* keep prod updates ordered */
+
+ DP(NETIF_MSG_RX_STATUS,
+ "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
+ fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
/* reload helper */
int bnx2x_reload_if_running(struct net_device *dev);
@@ -507,9 +542,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p);
/* NAPI poll Rx part */
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
-
/* NAPI poll Tx part */
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
@@ -612,38 +644,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
}
-static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 bd_prod,
- u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
-{
- struct ustorm_eth_rx_producers rx_prods = {0};
- u32 i;
-
- /* Update producers */
- rx_prods.bd_prod = bd_prod;
- rx_prods.cqe_prod = rx_comp_prod;
- rx_prods.sge_prod = rx_sge_prod;
-
- /*
- * Make sure that the BD and SGE data is updated before updating the
- * producers since FW might read the BD/SGE right after the producer
- * is updated.
- * This is only applicable for weak-ordered memory model archs such
- * as IA-64. The following barrier is also mandatory since FW will
- * assumes BDs must have buffers.
- */
- wmb();
-
- for (i = 0; i < sizeof(rx_prods)/4; i++)
- REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
-
- mmiowb(); /* keep prod updates ordered */
-
- DP(NETIF_MSG_RX_STATUS,
- "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
- fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
-}
-
static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
u8 segment, u16 index, u8 op,
u8 update, u32 igu_addr)
@@ -819,7 +819,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
return;
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+ SGE_PAGES, DMA_FROM_DEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT);
sw_buf->page = NULL;
@@ -863,7 +863,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
-void bnx2x_set_int_mode(struct bnx2x *bp);
+int bnx2x_set_int_mode(struct bnx2x *bp);
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
@@ -973,7 +973,6 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
return bnx2x_func_state_change(bp, &func_params);
}
-
/**
* bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
*
@@ -982,8 +981,8 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
* @fw_lo: pointer to lower part
* @mac: pointer to MAC address
*/
-static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
- u8 *mac)
+static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+ __le16 *fw_lo, u8 *mac)
{
((u8 *)fw_hi)[0] = mac[1];
((u8 *)fw_hi)[1] = mac[0];
@@ -1108,6 +1107,9 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
bnx2x_get_path_func_num(bp));
+ bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+ bnx2x_get_path_func_num(bp));
+
/* RSS configuration object */
bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
@@ -1125,15 +1127,7 @@ static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
return fp->cl_id;
}
-static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
-
- if (!CHIP_IS_E1x(bp))
- return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
- else
- return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
-}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
static inline void bnx2x_init_txdata(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata, u32 cid,
@@ -1228,7 +1222,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
#endif
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 0;
@@ -1263,7 +1257,7 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
}
netif_addr_unlock_bh(bp->dev);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
smp_mb();
@@ -1393,4 +1387,13 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
return false;
}
+/**
+ * bnx2x_fill_fw_str - Fill buffer with FW version string
+ *
+ * @bp: driver handle
+ * @buf: character buffer to fill with the fw name
+ * @buf_len: length of the above buffer
+ *
+ */
+void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 10bc093d2ca..568205436a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
/* bnx2x_dcb.c: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -416,6 +416,7 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
if (bp->dcbx_port_params.pfc.enabled &&
(!(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR) || mfw_configured))
/*
@@ -558,6 +559,7 @@ static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
int mfw_configured = SHMEM2_HAS(bp, drv_flags) &&
GET_FLAGS(SHMEM2_RD(bp, drv_flags),
1 << DRV_FLAGS_DCB_MFW_CONFIGURED);
+
bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
if (!bp->dcbx_port_params.ets.enabled ||
@@ -1904,11 +1906,13 @@ static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
struct bnx2x *bp = netdev_priv(netdev);
DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
+ /* Fail to set state to "enabled" if dcbx is disabled in nvram */
if (state && ((bp->dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ||
(bp->dcbx_enabled == BNX2X_DCBX_ENABLED_INVALID))) {
DP(BNX2X_MSG_DCB, "Can not set dcbx to enabled while it is disabled in nvm\n");
return 1;
}
+
bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
return 0;
}
@@ -2052,7 +2056,6 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
return;
-
if (setting) {
bp->dcbx_config_params.admin_pfc_bitmap |= (1 << prio);
bp->dcbx_config_params.admin_pfc_tx_enable = 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 06c7a043594..d153f44cf8f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
- * Copyright 2009-2012 Broadcom Corporation
+ * Copyright 2009-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b926f58e983..bff5e33eaa1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,6 +1,6 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -22,120 +22,37 @@
#ifndef BNX2X_DUMP_H
#define BNX2X_DUMP_H
+/* WaitP Definitions */
+#define DRV_DUMP_XSTORM_WAITP_ADDRESS 0x2b8a80
+#define DRV_DUMP_TSTORM_WAITP_ADDRESS 0x1b8a80
+#define DRV_DUMP_USTORM_WAITP_ADDRESS 0x338a80
+#define DRV_DUMP_CSTORM_WAITP_ADDRESS 0x238a80
-/*definitions */
-#define XSTORM_WAITP_ADDR 0x2b8a80
-#define TSTORM_WAITP_ADDR 0x1b8a80
-#define USTORM_WAITP_ADDR 0x338a80
-#define CSTORM_WAITP_ADDR 0x238a80
-#define TSTORM_CAM_MODE 0x1B1440
+/* Possible Chips */
+#define DUMP_CHIP_E1 1
+#define DUMP_CHIP_E1H 2
+#define DUMP_CHIP_E2 4
+#define DUMP_CHIP_E3A0 8
+#define DUMP_CHIP_E3B0 16
+#define DUMP_PATH_0 512
+#define DUMP_PATH_1 1024
+#define NUM_PRESETS 13
+#define NUM_CHIPS 5
-#define MAX_TIMER_PENDING 200
-#define TIMER_SCAN_DONT_CARE 0xFF
-#define RI_E1 0x1
-#define RI_E1H 0x2
-#define RI_E2 0x4
-#define RI_E3 0x8
-#define RI_E3B0 0x10
-#define RI_ONLINE 0x100
-#define RI_OFFLINE 0x0
-#define RI_PATH0_DUMP 0x200
-#define RI_PATH1_DUMP 0x400
-
-#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
-#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
-#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
-#define RI_E1E2_ONLINE (RI_E1 | RI_E2 | RI_ONLINE)
-#define RI_E1HE2_ONLINE (RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E1E1HE2_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-#define RI_E3_ONLINE (RI_E3 | RI_ONLINE)
-#define RI_E1E3_ONLINE (RI_E1 | RI_E3 | RI_ONLINE)
-#define RI_E1HE3_ONLINE (RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE3_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
-#define RI_E2E3_ONLINE (RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E2E3_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1HE2E3_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E1E1HE2E3_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
-#define RI_E3B0_ONLINE (RI_E3B0 | RI_ONLINE)
-#define RI_E1E3B0_ONLINE (RI_E1 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3B0_ONLINE (RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3B0_ONLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3B0_ONLINE (RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3B0_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
-#define RI_E3E3B0_ONLINE (RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E3E3B0_ONLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE3E3B0_ONLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE3E3B0_ONLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E2E3E3B0_ONLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E2E3E3B0_ONLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1HE2E3E3B0_ONLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1E1HE2E3E3B0_ONLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
-#define RI_E1_OFFLINE (RI_E1 | RI_OFFLINE)
-#define RI_E1H_OFFLINE (RI_E1H | RI_OFFLINE)
-#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H | RI_OFFLINE)
-#define RI_E2_OFFLINE (RI_E2 | RI_OFFLINE)
-#define RI_E1E2_OFFLINE (RI_E1 | RI_E2 | RI_OFFLINE)
-#define RI_E1HE2_OFFLINE (RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E1E1HE2_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
-#define RI_E3_OFFLINE (RI_E3 | RI_OFFLINE)
-#define RI_E1E3_OFFLINE (RI_E1 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE3_OFFLINE (RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE3_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
-#define RI_E2E3_OFFLINE (RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E2E3_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1HE2E3_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E1E1HE2E3_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
-#define RI_E3B0_OFFLINE (RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3B0_OFFLINE (RI_E1 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3B0_OFFLINE (RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3B0_OFFLINE (RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
-#define RI_E3E3B0_OFFLINE (RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E3E3B0_OFFLINE (RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE3E3B0_OFFLINE (RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE3E3B0_OFFLINE (RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E2E3E3B0_OFFLINE (RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E2E3E3B0_OFFLINE (RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1HE2E3E3B0_OFFLINE (RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_E1E1HE2E3E3B0_OFFLINE \
- (RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
-#define RI_ALL_ONLINE RI_E1E1HE2E3E3B0_ONLINE
-#define RI_ALL_OFFLINE RI_E1E1HE2E3E3B0_OFFLINE
-
-#define DBG_DMP_TRACE_BUFFER_SIZE 0x800
-#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
- ((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
-
-struct dump_sign {
- u32 time_stamp;
- u32 diag_ver;
- u32 grc_dump_ver;
-};
-
-struct dump_hdr {
- u32 hdr_size; /* in dwords, excluding this field */
- struct dump_sign dump_sign;
- u32 xstorm_waitp;
- u32 tstorm_waitp;
- u32 ustorm_waitp;
- u32 cstorm_waitp;
- u16 info;
- u8 idle_chk;
- u8 reserved;
+struct dump_header {
+ u32 header_size; /* Size in DWORDs excluding this field */
+ u32 version;
+ u32 preset;
+ u32 dump_meta_data; /* OR of CHIP and PATH. */
};
+#define BNX2X_DUMP_VERSION 0x50acff01
struct reg_addr {
u32 addr;
u32 size;
- u16 info;
+ u32 chips;
+ u32 presets;
};
struct wreg_addr {
@@ -143,1005 +60,2168 @@ struct wreg_addr {
u32 size;
u32 read_regs_count;
const u32 *read_regs;
- u16 info;
+ u32 chips;
+ u32 presets;
+};
+
+#define PAGE_MODE_VALUES_E2 2
+#define PAGE_READ_REGS_E2 1
+#define PAGE_WRITE_REGS_E2 1
+static const u32 page_vals_e2[] = {0, 128};
+static const u32 page_write_regs_e2[] = {328476};
+static const struct reg_addr page_read_regs_e2[] = {
+ {0x58000, 4608, DUMP_CHIP_E2, 0x30}
+};
+
+#define PAGE_MODE_VALUES_E3 2
+#define PAGE_READ_REGS_E3 1
+#define PAGE_WRITE_REGS_E3 1
+static const u32 page_vals_e3[] = {0, 128};
+static const u32 page_write_regs_e3[] = {328476};
+static const struct reg_addr page_read_regs_e3[] = {
+ {0x58000, 4608, DUMP_CHIP_E3A0 | DUMP_CHIP_E3B0, 0x30}
};
static const struct reg_addr reg_addrs[] = {
- { 0x2000, 341, RI_ALL_ONLINE },
- { 0x2800, 103, RI_ALL_ONLINE },
- { 0x3000, 287, RI_ALL_ONLINE },
- { 0x3800, 331, RI_ALL_ONLINE },
- { 0x8800, 6, RI_ALL_ONLINE },
- { 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x9000, 147, RI_E2E3E3B0_ONLINE },
- { 0x924c, 1, RI_E2_ONLINE },
- { 0x9250, 16, RI_E2E3E3B0_ONLINE },
- { 0x9400, 33, RI_E2E3E3B0_ONLINE },
- { 0x9484, 5, RI_E3E3B0_ONLINE },
- { 0xa000, 27, RI_ALL_ONLINE },
- { 0xa06c, 1, RI_E1E1H_ONLINE },
- { 0xa070, 71, RI_ALL_ONLINE },
- { 0xa18c, 4, RI_E1E1H_ONLINE },
- { 0xa19c, 62, RI_ALL_ONLINE },
- { 0xa294, 2, RI_E1E1H_ONLINE },
- { 0xa29c, 2, RI_ALL_ONLINE },
- { 0xa2a4, 2, RI_E1E1HE2_ONLINE },
- { 0xa2ac, 52, RI_ALL_ONLINE },
- { 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3b8, 2, RI_E3E3B0_ONLINE },
- { 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa400, 40, RI_ALL_ONLINE },
- { 0xa4a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa4a4, 2, RI_ALL_ONLINE },
- { 0xa4ac, 2, RI_E1E1H_ONLINE },
- { 0xa4b4, 1, RI_E1E1HE2_ONLINE },
- { 0xa4b8, 2, RI_E1E1H_ONLINE },
- { 0xa4c0, 3, RI_ALL_ONLINE },
- { 0xa4cc, 5, RI_E1E1H_ONLINE },
- { 0xa4e0, 3, RI_ALL_ONLINE },
- { 0xa4fc, 2, RI_ALL_ONLINE },
- { 0xa504, 1, RI_E1E1H_ONLINE },
- { 0xa508, 3, RI_ALL_ONLINE },
- { 0xa518, 1, RI_ALL_ONLINE },
- { 0xa520, 1, RI_ALL_ONLINE },
- { 0xa528, 1, RI_ALL_ONLINE },
- { 0xa530, 1, RI_ALL_ONLINE },
- { 0xa538, 1, RI_ALL_ONLINE },
- { 0xa540, 1, RI_ALL_ONLINE },
- { 0xa548, 1, RI_E1E1H_ONLINE },
- { 0xa550, 1, RI_E1E1H_ONLINE },
- { 0xa558, 1, RI_E1E1H_ONLINE },
- { 0xa560, 1, RI_E1E1H_ONLINE },
- { 0xa568, 1, RI_E1E1H_ONLINE },
- { 0xa570, 1, RI_ALL_ONLINE },
- { 0xa580, 1, RI_ALL_ONLINE },
- { 0xa590, 1, RI_ALL_ONLINE },
- { 0xa5a0, 1, RI_E1E1HE2_ONLINE },
- { 0xa5c0, 1, RI_ALL_ONLINE },
- { 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0xa5f8, 1, RI_E1HE2_ONLINE },
- { 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
- { 0xa620, 6, RI_E2E3E3B0_ONLINE },
- { 0xa638, 20, RI_E2_ONLINE },
- { 0xa688, 42, RI_E2E3E3B0_ONLINE },
- { 0xa730, 1, RI_E2_ONLINE },
- { 0xa734, 2, RI_E2E3E3B0_ONLINE },
- { 0xa73c, 4, RI_E2_ONLINE },
- { 0xa74c, 5, RI_E2E3E3B0_ONLINE },
- { 0xa760, 5, RI_E2_ONLINE },
- { 0xa774, 7, RI_E2E3E3B0_ONLINE },
- { 0xa790, 15, RI_E2_ONLINE },
- { 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
- { 0xa7e0, 6, RI_E3E3B0_ONLINE },
- { 0xa800, 18, RI_E2_ONLINE },
- { 0xa848, 33, RI_E2E3E3B0_ONLINE },
- { 0xa8cc, 2, RI_E3E3B0_ONLINE },
- { 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
- { 0xa8e4, 1, RI_E3E3B0_ONLINE },
- { 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
- { 0xa8f8, 30, RI_E3E3B0_ONLINE },
- { 0xa974, 73, RI_E3E3B0_ONLINE },
- { 0xac30, 1, RI_E3E3B0_ONLINE },
- { 0xac40, 1, RI_E3E3B0_ONLINE },
- { 0xac50, 1, RI_E3E3B0_ONLINE },
- { 0xac60, 1, RI_E3B0_ONLINE },
- { 0x10000, 9, RI_ALL_ONLINE },
- { 0x10024, 1, RI_E1E1HE2_ONLINE },
- { 0x10028, 5, RI_ALL_ONLINE },
- { 0x1003c, 6, RI_E1E1HE2_ONLINE },
- { 0x10054, 20, RI_ALL_ONLINE },
- { 0x100a4, 4, RI_E1E1HE2_ONLINE },
- { 0x100b4, 11, RI_ALL_ONLINE },
- { 0x100e0, 4, RI_E1E1HE2_ONLINE },
- { 0x100f0, 8, RI_ALL_ONLINE },
- { 0x10110, 6, RI_E1E1HE2_ONLINE },
- { 0x10128, 110, RI_ALL_ONLINE },
- { 0x102e0, 4, RI_E1E1HE2_ONLINE },
- { 0x102f0, 18, RI_ALL_ONLINE },
- { 0x10338, 20, RI_E1E1HE2_ONLINE },
- { 0x10388, 10, RI_ALL_ONLINE },
- { 0x10400, 6, RI_E1E1HE2_ONLINE },
- { 0x10418, 6, RI_ALL_ONLINE },
- { 0x10430, 10, RI_E1E1HE2_ONLINE },
- { 0x10458, 22, RI_ALL_ONLINE },
- { 0x104b0, 12, RI_E1E1HE2_ONLINE },
- { 0x104e0, 1, RI_ALL_ONLINE },
- { 0x104e8, 2, RI_ALL_ONLINE },
- { 0x104f4, 2, RI_ALL_ONLINE },
- { 0x10500, 146, RI_ALL_ONLINE },
- { 0x10750, 2, RI_E1E1HE2_ONLINE },
- { 0x10760, 2, RI_E1E1HE2_ONLINE },
- { 0x10770, 2, RI_E1E1HE2_ONLINE },
- { 0x10780, 2, RI_E1E1HE2_ONLINE },
- { 0x10790, 2, RI_ALL_ONLINE },
- { 0x107a0, 2, RI_E1E1HE2_ONLINE },
- { 0x107b0, 2, RI_E1E1HE2_ONLINE },
- { 0x107c0, 2, RI_E1E1HE2_ONLINE },
- { 0x107d0, 2, RI_E1E1HE2_ONLINE },
- { 0x107e0, 2, RI_ALL_ONLINE },
- { 0x10880, 2, RI_ALL_ONLINE },
- { 0x10900, 2, RI_ALL_ONLINE },
- { 0x16000, 1, RI_E1HE2_ONLINE },
- { 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
- { 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x16090, 4, RI_E1HE2E3_ONLINE },
- { 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0x160dc, 2, RI_E1HE2_ONLINE },
- { 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
- { 0x1610c, 2, RI_E1HE2_ONLINE },
- { 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
- { 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x18010, 35, RI_E2E3E3B0_ONLINE },
- { 0x180a4, 2, RI_E2E3E3B0_ONLINE },
- { 0x180c0, 9, RI_E2E3E3B0_ONLINE },
- { 0x180e4, 1, RI_E2E3_ONLINE },
- { 0x180e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x180f0, 1, RI_E2E3_ONLINE },
- { 0x180f4, 79, RI_E2E3E3B0_ONLINE },
- { 0x18230, 1, RI_E2E3_ONLINE },
- { 0x18234, 2, RI_E2E3E3B0_ONLINE },
- { 0x1823c, 1, RI_E2E3_ONLINE },
- { 0x18240, 13, RI_E2E3E3B0_ONLINE },
- { 0x18274, 1, RI_E2_ONLINE },
- { 0x18278, 81, RI_E2E3E3B0_ONLINE },
- { 0x18440, 63, RI_E2E3E3B0_ONLINE },
- { 0x18570, 42, RI_E3E3B0_ONLINE },
- { 0x18618, 25, RI_E3B0_ONLINE },
- { 0x18680, 44, RI_E3B0_ONLINE },
- { 0x18748, 12, RI_E3B0_ONLINE },
- { 0x18788, 1, RI_E3B0_ONLINE },
- { 0x1879c, 6, RI_E3B0_ONLINE },
- { 0x187c4, 51, RI_E3B0_ONLINE },
- { 0x18a00, 48, RI_E3B0_ONLINE },
- { 0x20000, 24, RI_ALL_ONLINE },
- { 0x20060, 8, RI_ALL_ONLINE },
- { 0x20080, 94, RI_ALL_ONLINE },
- { 0x201f8, 1, RI_E1E1H_ONLINE },
- { 0x201fc, 1, RI_ALL_ONLINE },
- { 0x20200, 1, RI_E1E1H_ONLINE },
- { 0x20204, 1, RI_ALL_ONLINE },
- { 0x20208, 1, RI_E1E1H_ONLINE },
- { 0x2020c, 39, RI_ALL_ONLINE },
- { 0x202c8, 1, RI_E2E3E3B0_ONLINE },
- { 0x202d8, 4, RI_E2E3E3B0_ONLINE },
- { 0x202f0, 1, RI_E3B0_ONLINE },
- { 0x20400, 2, RI_ALL_ONLINE },
- { 0x2040c, 8, RI_ALL_ONLINE },
- { 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0x20480, 1, RI_ALL_ONLINE },
- { 0x20500, 1, RI_ALL_ONLINE },
- { 0x20600, 1, RI_ALL_ONLINE },
- { 0x28000, 1, RI_ALL_ONLINE },
- { 0x28004, 8191, RI_ALL_OFFLINE },
- { 0x30000, 1, RI_ALL_ONLINE },
- { 0x30004, 16383, RI_ALL_OFFLINE },
- { 0x40000, 98, RI_ALL_ONLINE },
- { 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
- { 0x401c8, 1, RI_E1H_ONLINE },
- { 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x401d4, 2, RI_E2E3E3B0_ONLINE },
- { 0x40200, 4, RI_ALL_ONLINE },
- { 0x40220, 6, RI_E2E3E3B0_ONLINE },
- { 0x40238, 8, RI_E2E3_ONLINE },
- { 0x40258, 4, RI_E2E3E3B0_ONLINE },
- { 0x40268, 2, RI_E3E3B0_ONLINE },
- { 0x40270, 17, RI_E3B0_ONLINE },
- { 0x40400, 43, RI_ALL_ONLINE },
- { 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
- { 0x404e0, 1, RI_E2E3E3B0_ONLINE },
- { 0x40500, 2, RI_ALL_ONLINE },
- { 0x40510, 2, RI_ALL_ONLINE },
- { 0x40520, 2, RI_ALL_ONLINE },
- { 0x40530, 2, RI_ALL_ONLINE },
- { 0x40540, 2, RI_ALL_ONLINE },
- { 0x40550, 10, RI_E2E3E3B0_ONLINE },
- { 0x40610, 2, RI_E2E3E3B0_ONLINE },
- { 0x42000, 164, RI_ALL_ONLINE },
- { 0x422c0, 4, RI_E2E3E3B0_ONLINE },
- { 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x422e8, 1, RI_E2E3E3B0_ONLINE },
- { 0x42400, 49, RI_ALL_ONLINE },
- { 0x424c8, 38, RI_ALL_ONLINE },
- { 0x42568, 2, RI_ALL_ONLINE },
- { 0x42640, 5, RI_E2E3E3B0_ONLINE },
- { 0x42800, 1, RI_ALL_ONLINE },
- { 0x50000, 1, RI_ALL_ONLINE },
- { 0x50004, 19, RI_ALL_ONLINE },
- { 0x50050, 8, RI_ALL_ONLINE },
- { 0x50070, 88, RI_ALL_ONLINE },
- { 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x50200, 2, RI_ALL_ONLINE },
- { 0x5020c, 7, RI_ALL_ONLINE },
- { 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x50240, 1, RI_ALL_ONLINE },
- { 0x50280, 1, RI_ALL_ONLINE },
- { 0x50300, 1, RI_E2E3E3B0_ONLINE },
- { 0x5030c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50318, 1, RI_E2E3E3B0_ONLINE },
- { 0x5031c, 1, RI_E2E3E3B0_ONLINE },
- { 0x50320, 2, RI_E2E3E3B0_ONLINE },
- { 0x50330, 1, RI_E3B0_ONLINE },
- { 0x52000, 1, RI_ALL_ONLINE },
- { 0x54000, 1, RI_ALL_ONLINE },
- { 0x54004, 3327, RI_ALL_OFFLINE },
- { 0x58000, 1, RI_ALL_ONLINE },
- { 0x58004, 8191, RI_E1E1H_OFFLINE },
- { 0x60000, 26, RI_ALL_ONLINE },
- { 0x60068, 8, RI_E1E1H_ONLINE },
- { 0x60088, 12, RI_ALL_ONLINE },
- { 0x600b8, 9, RI_E1E1H_ONLINE },
- { 0x600dc, 1, RI_ALL_ONLINE },
- { 0x600e0, 5, RI_E1E1H_ONLINE },
- { 0x600f4, 1, RI_E1E1HE2_ONLINE },
- { 0x600f8, 1, RI_E1E1H_ONLINE },
- { 0x600fc, 8, RI_ALL_ONLINE },
- { 0x6013c, 24, RI_E1H_ONLINE },
- { 0x6019c, 2, RI_E2E3E3B0_ONLINE },
- { 0x601ac, 18, RI_E2E3E3B0_ONLINE },
- { 0x60200, 1, RI_ALL_ONLINE },
- { 0x60204, 2, RI_ALL_OFFLINE },
- { 0x60210, 13, RI_E2E3E3B0_ONLINE },
- { 0x60244, 16, RI_E3B0_ONLINE },
- { 0x61000, 1, RI_ALL_ONLINE },
- { 0x61004, 511, RI_ALL_OFFLINE },
- { 0x61800, 512, RI_E3E3B0_OFFLINE },
- { 0x70000, 8, RI_ALL_ONLINE },
- { 0x70020, 8184, RI_ALL_OFFLINE },
- { 0x78000, 8192, RI_E3E3B0_OFFLINE },
- { 0x85000, 3, RI_ALL_OFFLINE },
- { 0x8501c, 7, RI_ALL_OFFLINE },
- { 0x85048, 1, RI_ALL_OFFLINE },
- { 0x85200, 32, RI_ALL_OFFLINE },
- { 0xb0000, 16384, RI_E1H_OFFLINE },
- { 0xc1000, 7, RI_ALL_ONLINE },
- { 0xc103c, 2, RI_E2E3E3B0_ONLINE },
- { 0xc1800, 2, RI_ALL_ONLINE },
- { 0xc2000, 164, RI_ALL_ONLINE },
- { 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
- { 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
- { 0xc2400, 49, RI_ALL_ONLINE },
- { 0xc24c8, 38, RI_ALL_ONLINE },
- { 0xc2568, 2, RI_ALL_ONLINE },
- { 0xc2600, 1, RI_ALL_ONLINE },
- { 0xc4000, 165, RI_ALL_ONLINE },
- { 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
- { 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
- { 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xc4400, 51, RI_ALL_ONLINE },
- { 0xc44d0, 38, RI_ALL_ONLINE },
- { 0xc4570, 2, RI_ALL_ONLINE },
- { 0xc4578, 5, RI_E2E3E3B0_ONLINE },
- { 0xc4600, 1, RI_ALL_ONLINE },
- { 0xd0000, 19, RI_ALL_ONLINE },
- { 0xd004c, 8, RI_ALL_ONLINE },
- { 0xd006c, 91, RI_ALL_ONLINE },
- { 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
- { 0xd0200, 2, RI_ALL_ONLINE },
- { 0xd020c, 7, RI_ALL_ONLINE },
- { 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xd0280, 1, RI_ALL_ONLINE },
- { 0xd0300, 1, RI_ALL_ONLINE },
- { 0xd0400, 1, RI_ALL_ONLINE },
- { 0xd0818, 1, RI_E3B0_ONLINE },
- { 0xd4000, 1, RI_ALL_ONLINE },
- { 0xd4004, 2559, RI_ALL_OFFLINE },
- { 0xd8000, 1, RI_ALL_ONLINE },
- { 0xd8004, 8191, RI_ALL_OFFLINE },
- { 0xe0000, 21, RI_ALL_ONLINE },
- { 0xe0054, 8, RI_ALL_ONLINE },
- { 0xe0074, 49, RI_ALL_ONLINE },
- { 0xe0138, 1, RI_E1E1H_ONLINE },
- { 0xe013c, 35, RI_ALL_ONLINE },
- { 0xe01f4, 1, RI_E2_ONLINE },
- { 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
- { 0xe0200, 2, RI_ALL_ONLINE },
- { 0xe020c, 8, RI_ALL_ONLINE },
- { 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
- { 0xe0280, 1, RI_ALL_ONLINE },
- { 0xe0300, 1, RI_ALL_ONLINE },
- { 0xe0400, 1, RI_E3B0_ONLINE },
- { 0xe1000, 1, RI_ALL_ONLINE },
- { 0xe2000, 1, RI_ALL_ONLINE },
- { 0xe2004, 2047, RI_ALL_OFFLINE },
- { 0xf0000, 1, RI_ALL_ONLINE },
- { 0xf0004, 16383, RI_ALL_OFFLINE },
- { 0x101000, 12, RI_ALL_ONLINE },
- { 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x101054, 3, RI_E2E3E3B0_ONLINE },
- { 0x101100, 1, RI_ALL_ONLINE },
- { 0x101800, 8, RI_ALL_ONLINE },
- { 0x102000, 18, RI_ALL_ONLINE },
- { 0x102068, 6, RI_E2E3E3B0_ONLINE },
- { 0x102080, 17, RI_ALL_ONLINE },
- { 0x1020c8, 8, RI_E1H_ONLINE },
- { 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
- { 0x102400, 1, RI_ALL_ONLINE },
- { 0x103000, 26, RI_ALL_ONLINE },
- { 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
- { 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
- { 0x1030b4, 1, RI_E2_ONLINE },
- { 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
- { 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
- { 0x103400, 1, RI_E2E3E3B0_ONLINE },
- { 0x103404, 135, RI_E2E3E3B0_OFFLINE },
- { 0x103800, 8, RI_ALL_ONLINE },
- { 0x104000, 63, RI_ALL_ONLINE },
- { 0x10411c, 16, RI_E2E3E3B0_ONLINE },
- { 0x104200, 17, RI_ALL_ONLINE },
- { 0x104400, 64, RI_ALL_ONLINE },
- { 0x104500, 192, RI_ALL_OFFLINE },
- { 0x104800, 64, RI_ALL_ONLINE },
- { 0x104900, 192, RI_ALL_OFFLINE },
- { 0x105000, 256, RI_ALL_ONLINE },
- { 0x105400, 768, RI_ALL_OFFLINE },
- { 0x107000, 7, RI_E2E3E3B0_ONLINE },
- { 0x10701c, 1, RI_E3E3B0_ONLINE },
- { 0x108000, 33, RI_E1E1H_ONLINE },
- { 0x1080ac, 5, RI_E1H_ONLINE },
- { 0x108100, 5, RI_E1E1H_ONLINE },
- { 0x108120, 5, RI_E1E1H_ONLINE },
- { 0x108200, 74, RI_E1E1H_ONLINE },
- { 0x108400, 74, RI_E1E1H_ONLINE },
- { 0x108800, 152, RI_E1E1H_ONLINE },
- { 0x110000, 111, RI_E2E3E3B0_ONLINE },
- { 0x1101dc, 1, RI_E3E3B0_ONLINE },
- { 0x110200, 4, RI_E2E3E3B0_ONLINE },
- { 0x120000, 2, RI_ALL_ONLINE },
- { 0x120008, 4, RI_ALL_ONLINE },
- { 0x120018, 3, RI_ALL_ONLINE },
- { 0x120024, 4, RI_ALL_ONLINE },
- { 0x120034, 3, RI_ALL_ONLINE },
- { 0x120040, 4, RI_ALL_ONLINE },
- { 0x120050, 3, RI_ALL_ONLINE },
- { 0x12005c, 4, RI_ALL_ONLINE },
- { 0x12006c, 3, RI_ALL_ONLINE },
- { 0x120078, 4, RI_ALL_ONLINE },
- { 0x120088, 3, RI_ALL_ONLINE },
- { 0x120094, 4, RI_ALL_ONLINE },
- { 0x1200a4, 3, RI_ALL_ONLINE },
- { 0x1200b0, 4, RI_ALL_ONLINE },
- { 0x1200c0, 3, RI_ALL_ONLINE },
- { 0x1200cc, 4, RI_ALL_ONLINE },
- { 0x1200dc, 3, RI_ALL_ONLINE },
- { 0x1200e8, 4, RI_ALL_ONLINE },
- { 0x1200f8, 3, RI_ALL_ONLINE },
- { 0x120104, 4, RI_ALL_ONLINE },
- { 0x120114, 1, RI_ALL_ONLINE },
- { 0x120118, 22, RI_ALL_ONLINE },
- { 0x120170, 2, RI_E1E1H_ONLINE },
- { 0x120178, 243, RI_ALL_ONLINE },
- { 0x120544, 4, RI_E1E1H_ONLINE },
- { 0x120554, 6, RI_ALL_ONLINE },
- { 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
- { 0x1205f4, 1, RI_E1HE2_ONLINE },
- { 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
- { 0x120618, 1, RI_E2E3E3B0_ONLINE },
- { 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
- { 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
- { 0x120698, 3, RI_E2E3E3B0_ONLINE },
- { 0x1206a4, 1, RI_E2_ONLINE },
- { 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
- { 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
- { 0x1207dc, 1, RI_E2_ONLINE },
- { 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
- { 0x12080c, 65, RI_ALL_ONLINE },
- { 0x120910, 7, RI_E2E3E3B0_ONLINE },
- { 0x120930, 9, RI_E2E3E3B0_ONLINE },
- { 0x12095c, 37, RI_E3E3B0_ONLINE },
- { 0x120a00, 2, RI_E1E1HE2_ONLINE },
- { 0x120b00, 1, RI_E3E3B0_ONLINE },
- { 0x122000, 2, RI_ALL_ONLINE },
- { 0x122008, 2046, RI_E1_OFFLINE },
- { 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
- { 0x130000, 35, RI_E2E3E3B0_ONLINE },
- { 0x130100, 29, RI_E2E3E3B0_ONLINE },
- { 0x130180, 1, RI_E2E3E3B0_ONLINE },
- { 0x130200, 1, RI_E2E3E3B0_ONLINE },
- { 0x130280, 1, RI_E2E3E3B0_ONLINE },
- { 0x130300, 5, RI_E2E3E3B0_ONLINE },
- { 0x130380, 1, RI_E2E3E3B0_ONLINE },
- { 0x130400, 1, RI_E2E3E3B0_ONLINE },
- { 0x130480, 5, RI_E2E3E3B0_ONLINE },
- { 0x130800, 72, RI_E2E3E3B0_ONLINE },
- { 0x131000, 136, RI_E2E3E3B0_ONLINE },
- { 0x132000, 148, RI_E2E3E3B0_ONLINE },
- { 0x134000, 544, RI_E2E3E3B0_ONLINE },
- { 0x140000, 1, RI_ALL_ONLINE },
- { 0x140004, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140028, 8, RI_ALL_ONLINE },
- { 0x140048, 10, RI_E1E1HE2E3_ONLINE },
- { 0x140070, 1, RI_ALL_ONLINE },
- { 0x140074, 10, RI_E1E1HE2E3_ONLINE },
- { 0x14009c, 1, RI_ALL_ONLINE },
- { 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
- { 0x1400b4, 7, RI_ALL_ONLINE },
- { 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
- { 0x1400f8, 2, RI_ALL_ONLINE },
- { 0x140100, 5, RI_E1E1H_ONLINE },
- { 0x140114, 5, RI_E1E1HE2E3_ONLINE },
- { 0x140128, 7, RI_ALL_ONLINE },
- { 0x140144, 9, RI_E1E1HE2E3_ONLINE },
- { 0x140168, 8, RI_ALL_ONLINE },
- { 0x140188, 3, RI_E1E1HE2E3_ONLINE },
- { 0x140194, 13, RI_ALL_ONLINE },
- { 0x140200, 6, RI_E1E1HE2E3_ONLINE },
- { 0x140260, 4, RI_E2E3_ONLINE },
- { 0x140280, 4, RI_E2E3_ONLINE },
- { 0x1402e0, 2, RI_E2E3_ONLINE },
- { 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
- { 0x1402f0, 9, RI_E2E3_ONLINE },
- { 0x140314, 44, RI_E3B0_ONLINE },
- { 0x144000, 4, RI_E1E1H_ONLINE },
- { 0x148000, 4, RI_E1E1H_ONLINE },
- { 0x14c000, 4, RI_E1E1H_ONLINE },
- { 0x150000, 4, RI_E1E1H_ONLINE },
- { 0x154000, 4, RI_E1E1H_ONLINE },
- { 0x158000, 4, RI_E1E1H_ONLINE },
- { 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x15c008, 5, RI_E1H_ONLINE },
- { 0x15c020, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c040, 1, RI_E2E3_ONLINE },
- { 0x15c044, 2, RI_E2E3E3B0_ONLINE },
- { 0x15c04c, 8, RI_E2E3_ONLINE },
- { 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c090, 13, RI_E2E3E3B0_ONLINE },
- { 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
- { 0x15c128, 2, RI_E2E3_ONLINE },
- { 0x15c130, 8, RI_E2E3E3B0_ONLINE },
- { 0x15c150, 2, RI_E3E3B0_ONLINE },
- { 0x15c158, 2, RI_E3_ONLINE },
- { 0x15c160, 149, RI_E3B0_ONLINE },
- { 0x161000, 7, RI_ALL_ONLINE },
- { 0x16103c, 2, RI_E2E3E3B0_ONLINE },
- { 0x161800, 2, RI_ALL_ONLINE },
- { 0x162000, 54, RI_E3E3B0_ONLINE },
- { 0x162200, 60, RI_E3E3B0_ONLINE },
- { 0x162400, 54, RI_E3E3B0_ONLINE },
- { 0x162600, 60, RI_E3E3B0_ONLINE },
- { 0x162800, 54, RI_E3E3B0_ONLINE },
- { 0x162a00, 60, RI_E3E3B0_ONLINE },
- { 0x162c00, 54, RI_E3E3B0_ONLINE },
- { 0x162e00, 60, RI_E3E3B0_ONLINE },
- { 0x164000, 60, RI_ALL_ONLINE },
- { 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x164118, 15, RI_E2E3E3B0_ONLINE },
- { 0x164200, 1, RI_ALL_ONLINE },
- { 0x164208, 1, RI_ALL_ONLINE },
- { 0x164210, 1, RI_ALL_ONLINE },
- { 0x164218, 1, RI_ALL_ONLINE },
- { 0x164220, 1, RI_ALL_ONLINE },
- { 0x164228, 1, RI_ALL_ONLINE },
- { 0x164230, 1, RI_ALL_ONLINE },
- { 0x164238, 1, RI_ALL_ONLINE },
- { 0x164240, 1, RI_ALL_ONLINE },
- { 0x164248, 1, RI_ALL_ONLINE },
- { 0x164250, 1, RI_ALL_ONLINE },
- { 0x164258, 1, RI_ALL_ONLINE },
- { 0x164260, 1, RI_ALL_ONLINE },
- { 0x164270, 2, RI_ALL_ONLINE },
- { 0x164280, 2, RI_ALL_ONLINE },
- { 0x164800, 2, RI_ALL_ONLINE },
- { 0x165000, 2, RI_ALL_ONLINE },
- { 0x166000, 164, RI_ALL_ONLINE },
- { 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x166400, 49, RI_ALL_ONLINE },
- { 0x1664c8, 38, RI_ALL_ONLINE },
- { 0x166568, 2, RI_ALL_ONLINE },
- { 0x166570, 5, RI_E2E3E3B0_ONLINE },
- { 0x166800, 1, RI_ALL_ONLINE },
- { 0x168000, 137, RI_ALL_ONLINE },
- { 0x168224, 2, RI_E1E1H_ONLINE },
- { 0x16822c, 29, RI_ALL_ONLINE },
- { 0x1682a0, 12, RI_E1E1H_ONLINE },
- { 0x1682d0, 12, RI_ALL_ONLINE },
- { 0x168300, 2, RI_E1E1H_ONLINE },
- { 0x168308, 68, RI_ALL_ONLINE },
- { 0x168418, 2, RI_E1E1H_ONLINE },
- { 0x168420, 6, RI_ALL_ONLINE },
- { 0x168800, 19, RI_ALL_ONLINE },
- { 0x168900, 1, RI_ALL_ONLINE },
- { 0x168a00, 128, RI_ALL_ONLINE },
- { 0x16a000, 1, RI_ALL_ONLINE },
- { 0x16a004, 1535, RI_ALL_OFFLINE },
- { 0x16c000, 1, RI_ALL_ONLINE },
- { 0x16c004, 1535, RI_ALL_OFFLINE },
- { 0x16e000, 16, RI_E1H_ONLINE },
- { 0x16e040, 8, RI_E2E3E3B0_ONLINE },
- { 0x16e100, 1, RI_E1H_ONLINE },
- { 0x16e200, 2, RI_E1H_ONLINE },
- { 0x16e400, 161, RI_E1H_ONLINE },
- { 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e68c, 12, RI_E1H_ONLINE },
- { 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
- { 0x16e6cc, 4, RI_E1H_ONLINE },
- { 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
- { 0x16e6e8, 5, RI_E2E3_ONLINE },
- { 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
- { 0x16e768, 17, RI_E2E3E3B0_ONLINE },
- { 0x16e7ac, 12, RI_E3B0_ONLINE },
- { 0x170000, 24, RI_ALL_ONLINE },
- { 0x170060, 4, RI_E1E1H_ONLINE },
- { 0x170070, 65, RI_ALL_ONLINE },
- { 0x170194, 11, RI_E2E3E3B0_ONLINE },
- { 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
- { 0x1701e8, 1, RI_E3E3B0_ONLINE },
- { 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
- { 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
- { 0x170200, 4, RI_ALL_ONLINE },
- { 0x170214, 1, RI_ALL_ONLINE },
- { 0x170218, 77, RI_E2E3E3B0_ONLINE },
- { 0x170400, 64, RI_E2E3E3B0_ONLINE },
- { 0x178000, 1, RI_ALL_ONLINE },
- { 0x180000, 61, RI_ALL_ONLINE },
- { 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x180200, 58, RI_ALL_ONLINE },
- { 0x180340, 4, RI_ALL_ONLINE },
- { 0x180380, 1, RI_E2E3E3B0_ONLINE },
- { 0x180388, 1, RI_E2E3E3B0_ONLINE },
- { 0x180390, 1, RI_E2E3E3B0_ONLINE },
- { 0x180398, 1, RI_E2E3E3B0_ONLINE },
- { 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
- { 0x1803b4, 2, RI_E3E3B0_ONLINE },
- { 0x180404, 255, RI_E1E1H_OFFLINE },
- { 0x181000, 4, RI_ALL_ONLINE },
- { 0x181010, 1020, RI_ALL_OFFLINE },
- { 0x182000, 4, RI_E3E3B0_ONLINE },
- { 0x1a0000, 1, RI_ALL_ONLINE },
- { 0x1a0004, 5631, RI_ALL_OFFLINE },
- { 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1a8000, 1, RI_ALL_ONLINE },
- { 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x1b0000, 1, RI_ALL_ONLINE },
- { 0x1b0004, 15, RI_E1H_OFFLINE },
- { 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0044, 239, RI_E1H_OFFLINE },
- { 0x1b0400, 1, RI_ALL_ONLINE },
- { 0x1b0404, 255, RI_E1H_OFFLINE },
- { 0x1b0800, 1, RI_ALL_ONLINE },
- { 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b0c00, 1, RI_ALL_ONLINE },
- { 0x1b1000, 1, RI_ALL_ONLINE },
- { 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1400, 1, RI_ALL_ONLINE },
- { 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b1800, 128, RI_ALL_OFFLINE },
- { 0x1b1c00, 128, RI_ALL_OFFLINE },
- { 0x1b2000, 1, RI_ALL_ONLINE },
- { 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x1b8000, 1, RI_ALL_ONLINE },
- { 0x1b8040, 1, RI_ALL_ONLINE },
- { 0x1b8080, 1, RI_ALL_ONLINE },
- { 0x1b80c0, 1, RI_ALL_ONLINE },
- { 0x1b8100, 1, RI_ALL_ONLINE },
- { 0x1b8140, 1, RI_ALL_ONLINE },
- { 0x1b8180, 1, RI_ALL_ONLINE },
- { 0x1b81c0, 1, RI_ALL_ONLINE },
- { 0x1b8200, 1, RI_ALL_ONLINE },
- { 0x1b8240, 1, RI_ALL_ONLINE },
- { 0x1b8280, 1, RI_ALL_ONLINE },
- { 0x1b82c0, 1, RI_ALL_ONLINE },
- { 0x1b8300, 1, RI_ALL_ONLINE },
- { 0x1b8340, 1, RI_ALL_ONLINE },
- { 0x1b8380, 1, RI_ALL_ONLINE },
- { 0x1b83c0, 1, RI_ALL_ONLINE },
- { 0x1b8400, 1, RI_ALL_ONLINE },
- { 0x1b8440, 1, RI_ALL_ONLINE },
- { 0x1b8480, 1, RI_ALL_ONLINE },
- { 0x1b84c0, 1, RI_ALL_ONLINE },
- { 0x1b8500, 1, RI_ALL_ONLINE },
- { 0x1b8540, 1, RI_ALL_ONLINE },
- { 0x1b8580, 1, RI_ALL_ONLINE },
- { 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x1b8800, 1, RI_ALL_ONLINE },
- { 0x1b8840, 1, RI_ALL_ONLINE },
- { 0x1b8880, 1, RI_ALL_ONLINE },
- { 0x1b88c0, 1, RI_ALL_ONLINE },
- { 0x1b8900, 1, RI_ALL_ONLINE },
- { 0x1b8940, 1, RI_ALL_ONLINE },
- { 0x1b8980, 1, RI_ALL_ONLINE },
- { 0x1b89c0, 1, RI_ALL_ONLINE },
- { 0x1b8a00, 1, RI_ALL_ONLINE },
- { 0x1b8a40, 1, RI_ALL_ONLINE },
- { 0x1b8a80, 1, RI_ALL_ONLINE },
- { 0x1b8ac0, 1, RI_ALL_ONLINE },
- { 0x1b8b00, 1, RI_ALL_ONLINE },
- { 0x1b8b40, 1, RI_ALL_ONLINE },
- { 0x1b8b80, 1, RI_ALL_ONLINE },
- { 0x1b8bc0, 1, RI_ALL_ONLINE },
- { 0x1b8c00, 1, RI_ALL_ONLINE },
- { 0x1b8c40, 1, RI_ALL_ONLINE },
- { 0x1b8c80, 1, RI_ALL_ONLINE },
- { 0x1b8cc0, 1, RI_ALL_ONLINE },
- { 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8d00, 1, RI_ALL_ONLINE },
- { 0x1b8d40, 1, RI_ALL_ONLINE },
- { 0x1b8d80, 1, RI_ALL_ONLINE },
- { 0x1b8dc0, 1, RI_ALL_ONLINE },
- { 0x1b8e00, 1, RI_ALL_ONLINE },
- { 0x1b8e40, 1, RI_ALL_ONLINE },
- { 0x1b8e80, 1, RI_ALL_ONLINE },
- { 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x1b905c, 1, RI_E3E3B0_ONLINE },
- { 0x1b9064, 1, RI_E3B0_ONLINE },
- { 0x1b9080, 10, RI_E3B0_ONLINE },
- { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE },
- { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE },
- { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE },
- { 0x1c0000, 2, RI_ALL_ONLINE },
- { 0x200000, 65, RI_ALL_ONLINE },
- { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x200200, 58, RI_ALL_ONLINE },
- { 0x200340, 4, RI_ALL_ONLINE },
- { 0x200380, 1, RI_E2E3E3B0_ONLINE },
- { 0x200388, 1, RI_E2E3E3B0_ONLINE },
- { 0x200390, 1, RI_E2E3E3B0_ONLINE },
- { 0x200398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x200404, 255, RI_E1E1H_OFFLINE },
- { 0x202000, 4, RI_ALL_ONLINE },
- { 0x202010, 2044, RI_ALL_OFFLINE },
- { 0x204000, 4, RI_E3E3B0_ONLINE },
- { 0x220000, 1, RI_ALL_ONLINE },
- { 0x220004, 5631, RI_ALL_OFFLINE },
- { 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x228000, 1, RI_ALL_ONLINE },
- { 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x230000, 1, RI_ALL_ONLINE },
- { 0x230004, 15, RI_E1H_OFFLINE },
- { 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230044, 239, RI_E1H_OFFLINE },
- { 0x230400, 1, RI_ALL_ONLINE },
- { 0x230404, 255, RI_E1H_OFFLINE },
- { 0x230800, 1, RI_ALL_ONLINE },
- { 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x230c00, 1, RI_ALL_ONLINE },
- { 0x231000, 1, RI_ALL_ONLINE },
- { 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231400, 1, RI_ALL_ONLINE },
- { 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x231800, 128, RI_ALL_OFFLINE },
- { 0x231c00, 128, RI_ALL_OFFLINE },
- { 0x232000, 1, RI_ALL_ONLINE },
- { 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x238000, 1, RI_ALL_ONLINE },
- { 0x238040, 1, RI_ALL_ONLINE },
- { 0x238080, 1, RI_ALL_ONLINE },
- { 0x2380c0, 1, RI_ALL_ONLINE },
- { 0x238100, 1, RI_ALL_ONLINE },
- { 0x238140, 1, RI_ALL_ONLINE },
- { 0x238180, 1, RI_ALL_ONLINE },
- { 0x2381c0, 1, RI_ALL_ONLINE },
- { 0x238200, 1, RI_ALL_ONLINE },
- { 0x238240, 1, RI_ALL_ONLINE },
- { 0x238280, 1, RI_ALL_ONLINE },
- { 0x2382c0, 1, RI_ALL_ONLINE },
- { 0x238300, 1, RI_ALL_ONLINE },
- { 0x238340, 1, RI_ALL_ONLINE },
- { 0x238380, 1, RI_ALL_ONLINE },
- { 0x2383c0, 1, RI_ALL_ONLINE },
- { 0x238400, 1, RI_ALL_ONLINE },
- { 0x238440, 1, RI_ALL_ONLINE },
- { 0x238480, 1, RI_ALL_ONLINE },
- { 0x2384c0, 1, RI_ALL_ONLINE },
- { 0x238500, 1, RI_ALL_ONLINE },
- { 0x238540, 1, RI_ALL_ONLINE },
- { 0x238580, 1, RI_ALL_ONLINE },
- { 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x238800, 1, RI_ALL_ONLINE },
- { 0x238840, 1, RI_ALL_ONLINE },
- { 0x238880, 1, RI_ALL_ONLINE },
- { 0x2388c0, 1, RI_ALL_ONLINE },
- { 0x238900, 1, RI_ALL_ONLINE },
- { 0x238940, 1, RI_ALL_ONLINE },
- { 0x238980, 1, RI_ALL_ONLINE },
- { 0x2389c0, 1, RI_ALL_ONLINE },
- { 0x238a00, 1, RI_ALL_ONLINE },
- { 0x238a40, 1, RI_ALL_ONLINE },
- { 0x238a80, 1, RI_ALL_ONLINE },
- { 0x238ac0, 1, RI_ALL_ONLINE },
- { 0x238b00, 1, RI_ALL_ONLINE },
- { 0x238b40, 1, RI_ALL_ONLINE },
- { 0x238b80, 1, RI_ALL_ONLINE },
- { 0x238bc0, 1, RI_ALL_ONLINE },
- { 0x238c00, 1, RI_ALL_ONLINE },
- { 0x238c40, 1, RI_ALL_ONLINE },
- { 0x238c80, 1, RI_ALL_ONLINE },
- { 0x238cc0, 1, RI_ALL_ONLINE },
- { 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x238d00, 1, RI_ALL_ONLINE },
- { 0x238d40, 1, RI_ALL_ONLINE },
- { 0x238d80, 1, RI_ALL_ONLINE },
- { 0x238dc0, 1, RI_ALL_ONLINE },
- { 0x238e00, 1, RI_ALL_ONLINE },
- { 0x238e40, 1, RI_ALL_ONLINE },
- { 0x238e80, 1, RI_ALL_ONLINE },
- { 0x238e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x238fe8, 2, RI_E3E3B0_ONLINE },
- { 0x239000, 1, RI_E2E3E3B0_ONLINE },
- { 0x239040, 3, RI_E2E3E3B0_ONLINE },
- { 0x23905c, 1, RI_E3E3B0_ONLINE },
- { 0x239064, 1, RI_E3B0_ONLINE },
- { 0x239080, 10, RI_E3B0_ONLINE },
- { 0x240000, 2, RI_ALL_ONLINE },
- { 0x280000, 65, RI_ALL_ONLINE },
- { 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x280200, 58, RI_ALL_ONLINE },
- { 0x280340, 4, RI_ALL_ONLINE },
- { 0x280380, 1, RI_E2E3E3B0_ONLINE },
- { 0x280388, 1, RI_E2E3E3B0_ONLINE },
- { 0x280390, 1, RI_E2E3E3B0_ONLINE },
- { 0x280398, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x280404, 255, RI_E1E1H_OFFLINE },
- { 0x282000, 4, RI_ALL_ONLINE },
- { 0x282010, 2044, RI_ALL_OFFLINE },
- { 0x284000, 4, RI_E3E3B0_ONLINE },
- { 0x2a0000, 1, RI_ALL_ONLINE },
- { 0x2a0004, 5631, RI_ALL_OFFLINE },
- { 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2a8000, 1, RI_ALL_ONLINE },
- { 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x2b0000, 1, RI_ALL_ONLINE },
- { 0x2b0004, 15, RI_E1H_OFFLINE },
- { 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0044, 239, RI_E1H_OFFLINE },
- { 0x2b0400, 1, RI_ALL_ONLINE },
- { 0x2b0404, 255, RI_E1H_OFFLINE },
- { 0x2b0800, 1, RI_ALL_ONLINE },
- { 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b0c00, 1, RI_ALL_ONLINE },
- { 0x2b1000, 1, RI_ALL_ONLINE },
- { 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1400, 1, RI_ALL_ONLINE },
- { 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b1800, 128, RI_ALL_OFFLINE },
- { 0x2b1c00, 128, RI_ALL_OFFLINE },
- { 0x2b2000, 1, RI_ALL_ONLINE },
- { 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x2b8000, 1, RI_ALL_ONLINE },
- { 0x2b8040, 1, RI_ALL_ONLINE },
- { 0x2b8080, 1, RI_ALL_ONLINE },
- { 0x2b80c0, 1, RI_ALL_ONLINE },
- { 0x2b8100, 1, RI_ALL_ONLINE },
- { 0x2b8140, 1, RI_ALL_ONLINE },
- { 0x2b8180, 1, RI_ALL_ONLINE },
- { 0x2b81c0, 1, RI_ALL_ONLINE },
- { 0x2b8200, 1, RI_ALL_ONLINE },
- { 0x2b8240, 1, RI_ALL_ONLINE },
- { 0x2b8280, 1, RI_ALL_ONLINE },
- { 0x2b82c0, 1, RI_ALL_ONLINE },
- { 0x2b8300, 1, RI_ALL_ONLINE },
- { 0x2b8340, 1, RI_ALL_ONLINE },
- { 0x2b8380, 1, RI_ALL_ONLINE },
- { 0x2b83c0, 1, RI_ALL_ONLINE },
- { 0x2b8400, 1, RI_ALL_ONLINE },
- { 0x2b8440, 1, RI_ALL_ONLINE },
- { 0x2b8480, 1, RI_ALL_ONLINE },
- { 0x2b84c0, 1, RI_ALL_ONLINE },
- { 0x2b8500, 1, RI_ALL_ONLINE },
- { 0x2b8540, 1, RI_ALL_ONLINE },
- { 0x2b8580, 1, RI_ALL_ONLINE },
- { 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b8800, 1, RI_ALL_ONLINE },
- { 0x2b8840, 1, RI_ALL_ONLINE },
- { 0x2b8880, 1, RI_ALL_ONLINE },
- { 0x2b88c0, 1, RI_ALL_ONLINE },
- { 0x2b8900, 1, RI_ALL_ONLINE },
- { 0x2b8940, 1, RI_ALL_ONLINE },
- { 0x2b8980, 1, RI_ALL_ONLINE },
- { 0x2b89c0, 1, RI_ALL_ONLINE },
- { 0x2b8a00, 1, RI_ALL_ONLINE },
- { 0x2b8a40, 1, RI_ALL_ONLINE },
- { 0x2b8a80, 1, RI_ALL_ONLINE },
- { 0x2b8ac0, 1, RI_ALL_ONLINE },
- { 0x2b8b00, 1, RI_ALL_ONLINE },
- { 0x2b8b40, 1, RI_ALL_ONLINE },
- { 0x2b8b80, 1, RI_ALL_ONLINE },
- { 0x2b8bc0, 1, RI_ALL_ONLINE },
- { 0x2b8c00, 1, RI_ALL_ONLINE },
- { 0x2b8c40, 1, RI_ALL_ONLINE },
- { 0x2b8c80, 1, RI_ALL_ONLINE },
- { 0x2b8cc0, 1, RI_ALL_ONLINE },
- { 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8d00, 1, RI_ALL_ONLINE },
- { 0x2b8d40, 1, RI_ALL_ONLINE },
- { 0x2b8d80, 1, RI_ALL_ONLINE },
- { 0x2b8dc0, 1, RI_ALL_ONLINE },
- { 0x2b8e00, 1, RI_ALL_ONLINE },
- { 0x2b8e40, 1, RI_ALL_ONLINE },
- { 0x2b8e80, 1, RI_ALL_ONLINE },
- { 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
- { 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
- { 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
- { 0x2b905c, 1, RI_E3E3B0_ONLINE },
- { 0x2b9064, 1, RI_E3B0_ONLINE },
- { 0x2b9080, 10, RI_E3B0_ONLINE },
- { 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
- { 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
- { 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
- { 0x2c0000, 2, RI_ALL_ONLINE },
- { 0x300000, 65, RI_ALL_ONLINE },
- { 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
- { 0x300200, 58, RI_ALL_ONLINE },
- { 0x300340, 4, RI_ALL_ONLINE },
- { 0x300380, 1, RI_E2E3E3B0_ONLINE },
- { 0x300388, 1, RI_E2E3E3B0_ONLINE },
- { 0x300390, 1, RI_E2E3E3B0_ONLINE },
- { 0x300398, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
- { 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
- { 0x300404, 255, RI_E1E1H_OFFLINE },
- { 0x302000, 4, RI_ALL_ONLINE },
- { 0x302010, 2044, RI_ALL_OFFLINE },
- { 0x304000, 4, RI_E3E3B0_ONLINE },
- { 0x320000, 1, RI_ALL_ONLINE },
- { 0x320004, 5631, RI_ALL_OFFLINE },
- { 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
- { 0x328000, 1, RI_ALL_ONLINE },
- { 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
- { 0x330000, 1, RI_ALL_ONLINE },
- { 0x330004, 15, RI_E1H_OFFLINE },
- { 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330044, 239, RI_E1H_OFFLINE },
- { 0x330400, 1, RI_ALL_ONLINE },
- { 0x330404, 255, RI_E1H_OFFLINE },
- { 0x330800, 1, RI_ALL_ONLINE },
- { 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x330c00, 1, RI_ALL_ONLINE },
- { 0x331000, 1, RI_ALL_ONLINE },
- { 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331400, 1, RI_ALL_ONLINE },
- { 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x331800, 128, RI_ALL_OFFLINE },
- { 0x331c00, 128, RI_ALL_OFFLINE },
- { 0x332000, 1, RI_ALL_ONLINE },
- { 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
- { 0x338000, 1, RI_ALL_ONLINE },
- { 0x338040, 1, RI_ALL_ONLINE },
- { 0x338080, 1, RI_ALL_ONLINE },
- { 0x3380c0, 1, RI_ALL_ONLINE },
- { 0x338100, 1, RI_ALL_ONLINE },
- { 0x338140, 1, RI_ALL_ONLINE },
- { 0x338180, 1, RI_ALL_ONLINE },
- { 0x3381c0, 1, RI_ALL_ONLINE },
- { 0x338200, 1, RI_ALL_ONLINE },
- { 0x338240, 1, RI_ALL_ONLINE },
- { 0x338280, 1, RI_ALL_ONLINE },
- { 0x3382c0, 1, RI_ALL_ONLINE },
- { 0x338300, 1, RI_ALL_ONLINE },
- { 0x338340, 1, RI_ALL_ONLINE },
- { 0x338380, 1, RI_ALL_ONLINE },
- { 0x3383c0, 1, RI_ALL_ONLINE },
- { 0x338400, 1, RI_ALL_ONLINE },
- { 0x338440, 1, RI_ALL_ONLINE },
- { 0x338480, 1, RI_ALL_ONLINE },
- { 0x3384c0, 1, RI_ALL_ONLINE },
- { 0x338500, 1, RI_ALL_ONLINE },
- { 0x338540, 1, RI_ALL_ONLINE },
- { 0x338580, 1, RI_ALL_ONLINE },
- { 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
- { 0x338800, 1, RI_ALL_ONLINE },
- { 0x338840, 1, RI_ALL_ONLINE },
- { 0x338880, 1, RI_ALL_ONLINE },
- { 0x3388c0, 1, RI_ALL_ONLINE },
- { 0x338900, 1, RI_ALL_ONLINE },
- { 0x338940, 1, RI_ALL_ONLINE },
- { 0x338980, 1, RI_ALL_ONLINE },
- { 0x3389c0, 1, RI_ALL_ONLINE },
- { 0x338a00, 1, RI_ALL_ONLINE },
- { 0x338a40, 1, RI_ALL_ONLINE },
- { 0x338a80, 1, RI_ALL_ONLINE },
- { 0x338ac0, 1, RI_ALL_ONLINE },
- { 0x338b00, 1, RI_ALL_ONLINE },
- { 0x338b40, 1, RI_ALL_ONLINE },
- { 0x338b80, 1, RI_ALL_ONLINE },
- { 0x338bc0, 1, RI_ALL_ONLINE },
- { 0x338c00, 1, RI_ALL_ONLINE },
- { 0x338c40, 1, RI_ALL_ONLINE },
- { 0x338c80, 1, RI_ALL_ONLINE },
- { 0x338cc0, 1, RI_ALL_ONLINE },
- { 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
- { 0x338d00, 1, RI_ALL_ONLINE },
- { 0x338d40, 1, RI_ALL_ONLINE },
- { 0x338d80, 1, RI_ALL_ONLINE },
- { 0x338dc0, 1, RI_ALL_ONLINE },
- { 0x338e00, 1, RI_ALL_ONLINE },
- { 0x338e40, 1, RI_ALL_ONLINE },
- { 0x338e80, 1, RI_ALL_ONLINE },
- { 0x338e84, 1, RI_E2E3E3B0_ONLINE },
- { 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
- { 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
- { 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
- { 0x338fe8, 2, RI_E3E3B0_ONLINE },
- { 0x339000, 1, RI_E2E3E3B0_ONLINE },
- { 0x339040, 3, RI_E2E3E3B0_ONLINE },
- { 0x33905c, 1, RI_E3E3B0_ONLINE },
- { 0x339064, 1, RI_E3B0_ONLINE },
- { 0x339080, 10, RI_E3B0_ONLINE },
- { 0x340000, 2, RI_ALL_ONLINE },
+ { 0x2000, 1, 0x1f, 0xfff},
+ { 0x2004, 1, 0x1f, 0x1fff},
+ { 0x2008, 25, 0x1f, 0xfff},
+ { 0x206c, 1, 0x1f, 0x1fff},
+ { 0x2070, 313, 0x1f, 0xfff},
+ { 0x2800, 103, 0x1f, 0xfff},
+ { 0x3000, 287, 0x1f, 0xfff},
+ { 0x3800, 331, 0x1f, 0xfff},
+ { 0x8800, 6, 0x1f, 0x924},
+ { 0x8818, 1, 0x1e, 0x924},
+ { 0x9000, 4, 0x1c, 0x924},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x902c, 1, 0x1c, 0x924},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9034, 13, 0x1c, 0x924},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x90a8, 98, 0x1c, 0x924},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9238, 3, 0x1c, 0x924},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9248, 1, 0x1c, 0x924},
+ { 0x924c, 1, 0x4, 0x924},
+ { 0x9250, 16, 0x1c, 0x924},
+ { 0x92a8, 2, 0x1c, 0x1fff},
+ { 0x92b4, 1, 0x1c, 0x1fff},
+ { 0x9400, 33, 0x1c, 0x924},
+ { 0x9484, 5, 0x18, 0x924},
+ { 0xa000, 27, 0x1f, 0x924},
+ { 0xa06c, 1, 0x3, 0x924},
+ { 0xa070, 2, 0x1f, 0x924},
+ { 0xa078, 1, 0x1f, 0x1fff},
+ { 0xa07c, 31, 0x1f, 0x924},
+ { 0xa0f8, 1, 0x1f, 0x1fff},
+ { 0xa0fc, 3, 0x1f, 0x924},
+ { 0xa108, 1, 0x1f, 0x1fff},
+ { 0xa10c, 3, 0x1f, 0x924},
+ { 0xa118, 1, 0x1f, 0x1fff},
+ { 0xa11c, 28, 0x1f, 0x924},
+ { 0xa18c, 4, 0x3, 0x924},
+ { 0xa19c, 3, 0x1f, 0x924},
+ { 0xa1a8, 1, 0x1f, 0x1fff},
+ { 0xa1ac, 3, 0x1f, 0x924},
+ { 0xa1b8, 1, 0x1f, 0x1fff},
+ { 0xa1bc, 54, 0x1f, 0x924},
+ { 0xa294, 2, 0x3, 0x924},
+ { 0xa29c, 2, 0x1f, 0x924},
+ { 0xa2a4, 2, 0x7, 0x924},
+ { 0xa2ac, 2, 0x1f, 0x924},
+ { 0xa2b4, 1, 0x1f, 0x1fff},
+ { 0xa2b8, 49, 0x1f, 0x924},
+ { 0xa38c, 2, 0x1f, 0x1fff},
+ { 0xa398, 1, 0x1f, 0x1fff},
+ { 0xa39c, 7, 0x1e, 0x924},
+ { 0xa3b8, 2, 0x18, 0x924},
+ { 0xa3c0, 1, 0x1e, 0x924},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa3c8, 1, 0x1e, 0x924},
+ { 0xa3d0, 1, 0x1e, 0x924},
+ { 0xa3d8, 1, 0x1e, 0x924},
+ { 0xa3e0, 1, 0x1e, 0x924},
+ { 0xa3e8, 1, 0x1e, 0x924},
+ { 0xa3f0, 1, 0x1e, 0x924},
+ { 0xa3f8, 1, 0x1e, 0x924},
+ { 0xa400, 1, 0x1f, 0x924},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa410, 7, 0x1f, 0x924},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa45c, 1, 0x1f, 0x924},
+ { 0xa460, 1, 0x1f, 0x1924},
+ { 0xa464, 15, 0x1f, 0x924},
+ { 0xa4a0, 1, 0x7, 0x924},
+ { 0xa4a4, 2, 0x1f, 0x924},
+ { 0xa4ac, 2, 0x3, 0x924},
+ { 0xa4b4, 1, 0x7, 0x924},
+ { 0xa4b8, 2, 0x3, 0x924},
+ { 0xa4c0, 3, 0x1f, 0x924},
+ { 0xa4cc, 5, 0x3, 0x924},
+ { 0xa4e0, 3, 0x1f, 0x924},
+ { 0xa4fc, 2, 0x1f, 0x924},
+ { 0xa504, 1, 0x3, 0x924},
+ { 0xa508, 3, 0x1f, 0x924},
+ { 0xa518, 1, 0x1f, 0x924},
+ { 0xa520, 1, 0x1f, 0x924},
+ { 0xa528, 1, 0x1f, 0x924},
+ { 0xa530, 1, 0x1f, 0x924},
+ { 0xa538, 1, 0x1f, 0x924},
+ { 0xa540, 1, 0x1f, 0x924},
+ { 0xa548, 1, 0x3, 0x924},
+ { 0xa550, 1, 0x3, 0x924},
+ { 0xa558, 1, 0x3, 0x924},
+ { 0xa560, 1, 0x3, 0x924},
+ { 0xa568, 1, 0x3, 0x924},
+ { 0xa570, 1, 0x1f, 0x924},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa5a0, 1, 0x7, 0x924},
+ { 0xa5c0, 1, 0x1f, 0x924},
+ { 0xa5e0, 1, 0x1e, 0x924},
+ { 0xa5e8, 1, 0x1e, 0x924},
+ { 0xa5f0, 1, 0x1e, 0x924},
+ { 0xa5f8, 1, 0x6, 0x924},
+ { 0xa5fc, 1, 0x1e, 0x924},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa614, 1, 0x1e, 0x924},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa61c, 1, 0x1e, 0x924},
+ { 0xa620, 6, 0x1c, 0x924},
+ { 0xa638, 20, 0x4, 0x924},
+ { 0xa688, 35, 0x1c, 0x924},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa718, 2, 0x1c, 0x924},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa724, 3, 0x1c, 0x924},
+ { 0xa730, 1, 0x4, 0x924},
+ { 0xa734, 2, 0x1c, 0x924},
+ { 0xa73c, 4, 0x4, 0x924},
+ { 0xa74c, 1, 0x1c, 0x924},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xa754, 3, 0x1c, 0x924},
+ { 0xa760, 5, 0x4, 0x924},
+ { 0xa774, 7, 0x1c, 0x924},
+ { 0xa790, 15, 0x4, 0x924},
+ { 0xa7cc, 4, 0x1c, 0x924},
+ { 0xa7e0, 6, 0x18, 0x924},
+ { 0xa800, 18, 0x4, 0x924},
+ { 0xa848, 33, 0x1c, 0x924},
+ { 0xa8cc, 2, 0x18, 0x924},
+ { 0xa8d4, 4, 0x1c, 0x924},
+ { 0xa8e4, 1, 0x18, 0x924},
+ { 0xa8e8, 1, 0x1c, 0x924},
+ { 0xa8f0, 1, 0x1c, 0x924},
+ { 0xa8f8, 30, 0x18, 0x924},
+ { 0xa974, 73, 0x18, 0x924},
+ { 0xac30, 1, 0x18, 0x924},
+ { 0xac40, 1, 0x18, 0x924},
+ { 0xac50, 1, 0x18, 0x924},
+ { 0xac60, 1, 0x10, 0x924},
+ { 0x10000, 9, 0x1f, 0x924},
+ { 0x10024, 1, 0x7, 0x924},
+ { 0x10028, 5, 0x1f, 0x924},
+ { 0x1003c, 6, 0x7, 0x924},
+ { 0x10054, 20, 0x1f, 0x924},
+ { 0x100a4, 4, 0x7, 0x924},
+ { 0x100b4, 11, 0x1f, 0x924},
+ { 0x100e0, 4, 0x7, 0x924},
+ { 0x100f0, 8, 0x1f, 0x924},
+ { 0x10110, 6, 0x7, 0x924},
+ { 0x10128, 110, 0x1f, 0x924},
+ { 0x102e0, 4, 0x7, 0x924},
+ { 0x102f0, 18, 0x1f, 0x924},
+ { 0x10338, 20, 0x7, 0x924},
+ { 0x10388, 10, 0x1f, 0x924},
+ { 0x103d0, 2, 0x3, 0x1fff},
+ { 0x103dc, 1, 0x3, 0x1fff},
+ { 0x10400, 6, 0x7, 0x924},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x1041c, 1, 0x1f, 0x924},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10424, 1, 0x1f, 0x924},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x1042c, 1, 0x1f, 0x924},
+ { 0x10430, 10, 0x7, 0x924},
+ { 0x10458, 2, 0x1f, 0x924},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10464, 4, 0x1f, 0x924},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x10478, 14, 0x1f, 0x924},
+ { 0x104b0, 12, 0x7, 0x924},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104e8, 1, 0x1f, 0x924},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f4, 1, 0x1f, 0x924},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10500, 2, 0x1f, 0x924},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x1050c, 9, 0x1f, 0x924},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10534, 1, 0x1f, 0x924},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x1053c, 3, 0x1f, 0x924},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x1054c, 3, 0x1f, 0x924},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x1055c, 123, 0x1f, 0x924},
+ { 0x10750, 2, 0x7, 0x924},
+ { 0x10760, 2, 0x7, 0x924},
+ { 0x10770, 2, 0x7, 0x924},
+ { 0x10780, 2, 0x7, 0x924},
+ { 0x10790, 2, 0x1f, 0x924},
+ { 0x107a0, 2, 0x7, 0x924},
+ { 0x107b0, 2, 0x7, 0x924},
+ { 0x107c0, 2, 0x7, 0x924},
+ { 0x107d0, 2, 0x7, 0x924},
+ { 0x107e0, 2, 0x1f, 0x924},
+ { 0x10880, 2, 0x1f, 0x924},
+ { 0x10900, 2, 0x1f, 0x924},
+ { 0x16000, 1, 0x6, 0x924},
+ { 0x16004, 25, 0x1e, 0x924},
+ { 0x16070, 8, 0x1e, 0x924},
+ { 0x16090, 4, 0xe, 0x924},
+ { 0x160a0, 6, 0x1e, 0x924},
+ { 0x160c0, 7, 0x1e, 0x924},
+ { 0x160dc, 2, 0x6, 0x924},
+ { 0x160e4, 6, 0x1e, 0x924},
+ { 0x160fc, 4, 0x1e, 0x1fff},
+ { 0x1610c, 2, 0x6, 0x924},
+ { 0x16114, 6, 0x1e, 0x924},
+ { 0x16140, 48, 0x1e, 0x1fff},
+ { 0x16204, 5, 0x1e, 0x924},
+ { 0x18000, 1, 0x1e, 0x924},
+ { 0x18008, 1, 0x1e, 0x924},
+ { 0x18010, 35, 0x1c, 0x924},
+ { 0x180a4, 2, 0x1c, 0x924},
+ { 0x180c0, 9, 0x1c, 0x924},
+ { 0x180e4, 1, 0xc, 0x924},
+ { 0x180e8, 2, 0x1c, 0x924},
+ { 0x180f0, 1, 0xc, 0x924},
+ { 0x180f4, 79, 0x1c, 0x924},
+ { 0x18230, 1, 0xc, 0x924},
+ { 0x18234, 2, 0x1c, 0x924},
+ { 0x1823c, 1, 0xc, 0x924},
+ { 0x18240, 13, 0x1c, 0x924},
+ { 0x18274, 1, 0x4, 0x924},
+ { 0x18278, 12, 0x1c, 0x924},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182ac, 3, 0x1c, 0x924},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x182bc, 19, 0x1c, 0x924},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x1830c, 3, 0x1c, 0x924},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x1831c, 7, 0x1c, 0x924},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x1833c, 3, 0x1c, 0x924},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x1834c, 28, 0x1c, 0x924},
+ { 0x183bc, 2, 0x1c, 0x1fff},
+ { 0x183c8, 3, 0x1c, 0x1fff},
+ { 0x183d8, 1, 0x1c, 0x1fff},
+ { 0x18440, 48, 0x1c, 0x1fff},
+ { 0x18500, 15, 0x1c, 0x924},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18574, 1, 0x18, 0x924},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1857c, 4, 0x18, 0x924},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18590, 1, 0x18, 0x924},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x18598, 32, 0x18, 0x924},
+ { 0x18618, 5, 0x10, 0x924},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x1863c, 16, 0x10, 0x924},
+ { 0x18680, 44, 0x10, 0x924},
+ { 0x18748, 12, 0x10, 0x924},
+ { 0x18788, 1, 0x10, 0x924},
+ { 0x1879c, 6, 0x10, 0x924},
+ { 0x187c4, 51, 0x10, 0x924},
+ { 0x18a00, 48, 0x10, 0x924},
+ { 0x20000, 24, 0x1f, 0x924},
+ { 0x20060, 8, 0x1f, 0x9e4},
+ { 0x20080, 94, 0x1f, 0x924},
+ { 0x201f8, 1, 0x3, 0x924},
+ { 0x201fc, 1, 0x1f, 0x924},
+ { 0x20200, 1, 0x3, 0x924},
+ { 0x20204, 1, 0x1f, 0x924},
+ { 0x20208, 1, 0x3, 0x924},
+ { 0x2020c, 4, 0x1f, 0x924},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x20248, 24, 0x1f, 0x924},
+ { 0x202b8, 2, 0x1f, 0x1fff},
+ { 0x202c4, 1, 0x1f, 0x1fff},
+ { 0x202c8, 1, 0x1c, 0x924},
+ { 0x202d8, 4, 0x1c, 0x924},
+ { 0x202f0, 1, 0x10, 0x924},
+ { 0x20400, 1, 0x1f, 0x924},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x20414, 2, 0x1f, 0x924},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x20424, 2, 0x1f, 0x924},
+ { 0x2042c, 18, 0x1e, 0x924},
+ { 0x20480, 1, 0x1f, 0x924},
+ { 0x20500, 1, 0x1f, 0x924},
+ { 0x20600, 1, 0x1f, 0x924},
+ { 0x28000, 1, 0x1f, 0x9e4},
+ { 0x28004, 255, 0x1f, 0x180},
+ { 0x28400, 1, 0x1f, 0x1c0},
+ { 0x28404, 255, 0x1f, 0x180},
+ { 0x28800, 1, 0x1f, 0x1c0},
+ { 0x28804, 255, 0x1f, 0x180},
+ { 0x28c00, 1, 0x1f, 0x1c0},
+ { 0x28c04, 255, 0x1f, 0x180},
+ { 0x29000, 1, 0x1f, 0x1c0},
+ { 0x29004, 255, 0x1f, 0x180},
+ { 0x29400, 1, 0x1f, 0x1c0},
+ { 0x29404, 255, 0x1f, 0x180},
+ { 0x29800, 1, 0x1f, 0x1c0},
+ { 0x29804, 255, 0x1f, 0x180},
+ { 0x29c00, 1, 0x1f, 0x1c0},
+ { 0x29c04, 255, 0x1f, 0x180},
+ { 0x2a000, 1, 0x1f, 0x1c0},
+ { 0x2a004, 255, 0x1f, 0x180},
+ { 0x2a400, 1, 0x1f, 0x1c0},
+ { 0x2a404, 255, 0x1f, 0x180},
+ { 0x2a800, 1, 0x1f, 0x1c0},
+ { 0x2a804, 255, 0x1f, 0x180},
+ { 0x2ac00, 1, 0x1f, 0x1c0},
+ { 0x2ac04, 255, 0x1f, 0x180},
+ { 0x2b000, 1, 0x1f, 0x1c0},
+ { 0x2b004, 255, 0x1f, 0x180},
+ { 0x2b400, 1, 0x1f, 0x1c0},
+ { 0x2b404, 255, 0x1f, 0x180},
+ { 0x2b800, 1, 0x1f, 0x1c0},
+ { 0x2b804, 255, 0x1f, 0x180},
+ { 0x2bc00, 1, 0x1f, 0x1c0},
+ { 0x2bc04, 255, 0x1f, 0x180},
+ { 0x2c000, 1, 0x1f, 0x1c0},
+ { 0x2c004, 255, 0x1f, 0x180},
+ { 0x2c400, 1, 0x1f, 0x1c0},
+ { 0x2c404, 255, 0x1f, 0x180},
+ { 0x2c800, 1, 0x1f, 0x1c0},
+ { 0x2c804, 255, 0x1f, 0x180},
+ { 0x2cc00, 1, 0x1f, 0x1c0},
+ { 0x2cc04, 255, 0x1f, 0x180},
+ { 0x2d000, 1, 0x1f, 0x1c0},
+ { 0x2d004, 255, 0x1f, 0x180},
+ { 0x2d400, 1, 0x1f, 0x1c0},
+ { 0x2d404, 255, 0x1f, 0x180},
+ { 0x2d800, 1, 0x1f, 0x1c0},
+ { 0x2d804, 255, 0x1f, 0x180},
+ { 0x2dc00, 1, 0x1f, 0x1c0},
+ { 0x2dc04, 255, 0x1f, 0x180},
+ { 0x2e000, 1, 0x1f, 0x1c0},
+ { 0x2e004, 255, 0x1f, 0x180},
+ { 0x2e400, 1, 0x1f, 0x1c0},
+ { 0x2e404, 255, 0x1f, 0x180},
+ { 0x2e800, 1, 0x1f, 0x1c0},
+ { 0x2e804, 255, 0x1f, 0x180},
+ { 0x2ec00, 1, 0x1f, 0x1c0},
+ { 0x2ec04, 255, 0x1f, 0x180},
+ { 0x2f000, 1, 0x1f, 0x1c0},
+ { 0x2f004, 255, 0x1f, 0x180},
+ { 0x2f400, 1, 0x1f, 0x1c0},
+ { 0x2f404, 255, 0x1f, 0x180},
+ { 0x2f800, 1, 0x1f, 0x1c0},
+ { 0x2f804, 255, 0x1f, 0x180},
+ { 0x2fc00, 1, 0x1f, 0x1c0},
+ { 0x2fc04, 255, 0x1f, 0x180},
+ { 0x30000, 1, 0x1f, 0x9e4},
+ { 0x30004, 255, 0x1f, 0x180},
+ { 0x30400, 1, 0x1f, 0x1c0},
+ { 0x30404, 255, 0x1f, 0x180},
+ { 0x30800, 1, 0x1f, 0x1c0},
+ { 0x30804, 255, 0x1f, 0x180},
+ { 0x30c00, 1, 0x1f, 0x1c0},
+ { 0x30c04, 255, 0x1f, 0x180},
+ { 0x31000, 1, 0x1f, 0x1c0},
+ { 0x31004, 255, 0x1f, 0x180},
+ { 0x31400, 1, 0x1f, 0x1c0},
+ { 0x31404, 255, 0x1f, 0x180},
+ { 0x31800, 1, 0x1f, 0x1c0},
+ { 0x31804, 255, 0x1f, 0x180},
+ { 0x31c00, 1, 0x1f, 0x1c0},
+ { 0x31c04, 255, 0x1f, 0x180},
+ { 0x32000, 1, 0x1f, 0x1c0},
+ { 0x32004, 255, 0x1f, 0x180},
+ { 0x32400, 1, 0x1f, 0x1c0},
+ { 0x32404, 255, 0x1f, 0x180},
+ { 0x32800, 1, 0x1f, 0x1c0},
+ { 0x32804, 255, 0x1f, 0x180},
+ { 0x32c00, 1, 0x1f, 0x1c0},
+ { 0x32c04, 255, 0x1f, 0x180},
+ { 0x33000, 1, 0x1f, 0x1c0},
+ { 0x33004, 255, 0x1f, 0x180},
+ { 0x33400, 1, 0x1f, 0x1c0},
+ { 0x33404, 255, 0x1f, 0x180},
+ { 0x33800, 1, 0x1f, 0x1c0},
+ { 0x33804, 255, 0x1f, 0x180},
+ { 0x33c00, 1, 0x1f, 0x1c0},
+ { 0x33c04, 255, 0x1f, 0x180},
+ { 0x34000, 1, 0x1f, 0x1c0},
+ { 0x34004, 255, 0x1f, 0x180},
+ { 0x34400, 1, 0x1f, 0x1c0},
+ { 0x34404, 255, 0x1f, 0x180},
+ { 0x34800, 1, 0x1f, 0x1c0},
+ { 0x34804, 255, 0x1f, 0x180},
+ { 0x34c00, 1, 0x1f, 0x1c0},
+ { 0x34c04, 255, 0x1f, 0x180},
+ { 0x35000, 1, 0x1f, 0x1c0},
+ { 0x35004, 255, 0x1f, 0x180},
+ { 0x35400, 1, 0x1f, 0x1c0},
+ { 0x35404, 255, 0x1f, 0x180},
+ { 0x35800, 1, 0x1f, 0x1c0},
+ { 0x35804, 255, 0x1f, 0x180},
+ { 0x35c00, 1, 0x1f, 0x1c0},
+ { 0x35c04, 255, 0x1f, 0x180},
+ { 0x36000, 1, 0x1f, 0x1c0},
+ { 0x36004, 255, 0x1f, 0x180},
+ { 0x36400, 1, 0x1f, 0x1c0},
+ { 0x36404, 255, 0x1f, 0x180},
+ { 0x36800, 1, 0x1f, 0x1c0},
+ { 0x36804, 255, 0x1f, 0x180},
+ { 0x36c00, 1, 0x1f, 0x1c0},
+ { 0x36c04, 255, 0x1f, 0x180},
+ { 0x37000, 1, 0x1f, 0x1c0},
+ { 0x37004, 255, 0x1f, 0x180},
+ { 0x37400, 1, 0x1f, 0x1c0},
+ { 0x37404, 255, 0x1f, 0x180},
+ { 0x37800, 1, 0x1f, 0x1c0},
+ { 0x37804, 255, 0x1f, 0x180},
+ { 0x37c00, 1, 0x1f, 0x1c0},
+ { 0x37c04, 255, 0x1f, 0x180},
+ { 0x38000, 1, 0x1f, 0x1c0},
+ { 0x38004, 255, 0x1f, 0x180},
+ { 0x38400, 1, 0x1f, 0x1c0},
+ { 0x38404, 255, 0x1f, 0x180},
+ { 0x38800, 1, 0x1f, 0x1c0},
+ { 0x38804, 255, 0x1f, 0x180},
+ { 0x38c00, 1, 0x1f, 0x1c0},
+ { 0x38c04, 255, 0x1f, 0x180},
+ { 0x39000, 1, 0x1f, 0x1c0},
+ { 0x39004, 255, 0x1f, 0x180},
+ { 0x39400, 1, 0x1f, 0x1c0},
+ { 0x39404, 255, 0x1f, 0x180},
+ { 0x39800, 1, 0x1f, 0x1c0},
+ { 0x39804, 255, 0x1f, 0x180},
+ { 0x39c00, 1, 0x1f, 0x1c0},
+ { 0x39c04, 255, 0x1f, 0x180},
+ { 0x3a000, 1, 0x1f, 0x1c0},
+ { 0x3a004, 255, 0x1f, 0x180},
+ { 0x3a400, 1, 0x1f, 0x1c0},
+ { 0x3a404, 255, 0x1f, 0x180},
+ { 0x3a800, 1, 0x1f, 0x1c0},
+ { 0x3a804, 255, 0x1f, 0x180},
+ { 0x3ac00, 1, 0x1f, 0x1c0},
+ { 0x3ac04, 255, 0x1f, 0x180},
+ { 0x3b000, 1, 0x1f, 0x1c0},
+ { 0x3b004, 255, 0x1f, 0x180},
+ { 0x3b400, 1, 0x1f, 0x1c0},
+ { 0x3b404, 255, 0x1f, 0x180},
+ { 0x3b800, 1, 0x1f, 0x1c0},
+ { 0x3b804, 255, 0x1f, 0x180},
+ { 0x3bc00, 1, 0x1f, 0x1c0},
+ { 0x3bc04, 255, 0x1f, 0x180},
+ { 0x3c000, 1, 0x1f, 0x1c0},
+ { 0x3c004, 255, 0x1f, 0x180},
+ { 0x3c400, 1, 0x1f, 0x1c0},
+ { 0x3c404, 255, 0x1f, 0x180},
+ { 0x3c800, 1, 0x1f, 0x1c0},
+ { 0x3c804, 255, 0x1f, 0x180},
+ { 0x3cc00, 1, 0x1f, 0x1c0},
+ { 0x3cc04, 255, 0x1f, 0x180},
+ { 0x3d000, 1, 0x1f, 0x1c0},
+ { 0x3d004, 255, 0x1f, 0x180},
+ { 0x3d400, 1, 0x1f, 0x1c0},
+ { 0x3d404, 255, 0x1f, 0x180},
+ { 0x3d800, 1, 0x1f, 0x1c0},
+ { 0x3d804, 255, 0x1f, 0x180},
+ { 0x3dc00, 1, 0x1f, 0x1c0},
+ { 0x3dc04, 255, 0x1f, 0x180},
+ { 0x3e000, 1, 0x1f, 0x1c0},
+ { 0x3e004, 255, 0x1f, 0x180},
+ { 0x3e400, 1, 0x1f, 0x1c0},
+ { 0x3e404, 255, 0x1f, 0x180},
+ { 0x3e800, 1, 0x1f, 0x1c0},
+ { 0x3e804, 255, 0x1f, 0x180},
+ { 0x3ec00, 1, 0x1f, 0x1c0},
+ { 0x3ec04, 255, 0x1f, 0x180},
+ { 0x3f000, 1, 0x1f, 0x1c0},
+ { 0x3f004, 255, 0x1f, 0x180},
+ { 0x3f400, 1, 0x1f, 0x1c0},
+ { 0x3f404, 255, 0x1f, 0x180},
+ { 0x3f800, 1, 0x1f, 0x1c0},
+ { 0x3f804, 255, 0x1f, 0x180},
+ { 0x3fc00, 1, 0x1f, 0x1c0},
+ { 0x3fc04, 255, 0x1f, 0x180},
+ { 0x40000, 85, 0x1f, 0x924},
+ { 0x40154, 13, 0x1f, 0xfff},
+ { 0x40198, 2, 0x1f, 0x1fff},
+ { 0x401a4, 1, 0x1f, 0x1fff},
+ { 0x401a8, 8, 0x1e, 0x924},
+ { 0x401c8, 1, 0x2, 0x924},
+ { 0x401cc, 2, 0x1e, 0x924},
+ { 0x401d4, 2, 0x1c, 0x924},
+ { 0x40200, 4, 0x1f, 0x924},
+ { 0x40220, 6, 0x1c, 0x924},
+ { 0x40238, 8, 0xc, 0x924},
+ { 0x40258, 4, 0x1c, 0x924},
+ { 0x40268, 2, 0x18, 0x924},
+ { 0x40270, 17, 0x10, 0x924},
+ { 0x40400, 43, 0x1f, 0x924},
+ { 0x404bc, 2, 0x1f, 0x1fff},
+ { 0x404c8, 1, 0x1f, 0x1fff},
+ { 0x404cc, 3, 0x1e, 0x924},
+ { 0x404e0, 1, 0x1c, 0x924},
+ { 0x40500, 2, 0x1f, 0x924},
+ { 0x40510, 2, 0x1f, 0x924},
+ { 0x40520, 2, 0x1f, 0x924},
+ { 0x40530, 2, 0x1f, 0x924},
+ { 0x40540, 2, 0x1f, 0x924},
+ { 0x40550, 10, 0x1c, 0x924},
+ { 0x40610, 2, 0x1c, 0x924},
+ { 0x42000, 164, 0x1f, 0x924},
+ { 0x422b0, 2, 0x1f, 0x1fff},
+ { 0x422bc, 1, 0x1f, 0x1fff},
+ { 0x422c0, 4, 0x1c, 0x924},
+ { 0x422d4, 5, 0x1e, 0x924},
+ { 0x422e8, 1, 0x1c, 0x924},
+ { 0x42400, 49, 0x1f, 0x924},
+ { 0x424c8, 32, 0x1f, 0x924},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x4254c, 1, 0x1f, 0x924},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42554, 1, 0x1f, 0x924},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x4255c, 1, 0x1f, 0x924},
+ { 0x42568, 2, 0x1f, 0x924},
+ { 0x42640, 5, 0x1c, 0x924},
+ { 0x42800, 1, 0x1f, 0x924},
+ { 0x50000, 1, 0x1f, 0x1fff},
+ { 0x50004, 19, 0x1f, 0x924},
+ { 0x50050, 8, 0x1f, 0x93c},
+ { 0x50070, 60, 0x1f, 0x924},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x50180, 20, 0x1f, 0x924},
+ { 0x501e0, 2, 0x1f, 0x1fff},
+ { 0x501ec, 1, 0x1f, 0x1fff},
+ { 0x501f0, 4, 0x1e, 0x924},
+ { 0x50200, 1, 0x1f, 0x924},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x50214, 2, 0x1f, 0x924},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x50220, 2, 0x1f, 0x924},
+ { 0x50228, 6, 0x1e, 0x924},
+ { 0x50240, 1, 0x1f, 0x924},
+ { 0x50280, 1, 0x1f, 0x924},
+ { 0x50300, 1, 0x1c, 0x924},
+ { 0x5030c, 1, 0x1c, 0x924},
+ { 0x50318, 1, 0x1c, 0x934},
+ { 0x5031c, 1, 0x1c, 0x924},
+ { 0x50320, 2, 0x1c, 0x934},
+ { 0x50330, 1, 0x10, 0x924},
+ { 0x52000, 1, 0x1f, 0x924},
+ { 0x54000, 1, 0x1f, 0x93c},
+ { 0x54004, 255, 0x1f, 0x30},
+ { 0x54400, 1, 0x1f, 0x38},
+ { 0x54404, 255, 0x1f, 0x30},
+ { 0x54800, 1, 0x1f, 0x38},
+ { 0x54804, 255, 0x1f, 0x30},
+ { 0x54c00, 1, 0x1f, 0x38},
+ { 0x54c04, 255, 0x1f, 0x30},
+ { 0x55000, 1, 0x1f, 0x38},
+ { 0x55004, 255, 0x1f, 0x30},
+ { 0x55400, 1, 0x1f, 0x38},
+ { 0x55404, 255, 0x1f, 0x30},
+ { 0x55800, 1, 0x1f, 0x38},
+ { 0x55804, 255, 0x1f, 0x30},
+ { 0x55c00, 1, 0x1f, 0x38},
+ { 0x55c04, 255, 0x1f, 0x30},
+ { 0x56000, 1, 0x1f, 0x38},
+ { 0x56004, 255, 0x1f, 0x30},
+ { 0x56400, 1, 0x1f, 0x38},
+ { 0x56404, 255, 0x1f, 0x30},
+ { 0x56800, 1, 0x1f, 0x38},
+ { 0x56804, 255, 0x1f, 0x30},
+ { 0x56c00, 1, 0x1f, 0x38},
+ { 0x56c04, 255, 0x1f, 0x30},
+ { 0x57000, 1, 0x1f, 0x38},
+ { 0x57004, 255, 0x1f, 0x30},
+ { 0x58000, 1, 0x1f, 0x934},
+ { 0x58004, 8191, 0x3, 0x30},
+ { 0x60000, 26, 0x1f, 0x924},
+ { 0x60068, 8, 0x3, 0x924},
+ { 0x60088, 2, 0x1f, 0x924},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x60094, 9, 0x1f, 0x924},
+ { 0x600b8, 9, 0x3, 0x924},
+ { 0x600dc, 1, 0x1f, 0x924},
+ { 0x600e0, 5, 0x3, 0x924},
+ { 0x600f4, 1, 0x7, 0x924},
+ { 0x600f8, 1, 0x3, 0x924},
+ { 0x600fc, 8, 0x1f, 0x924},
+ { 0x6012c, 2, 0x1f, 0x1fff},
+ { 0x60138, 1, 0x1f, 0x1fff},
+ { 0x6013c, 24, 0x2, 0x924},
+ { 0x6019c, 2, 0x1c, 0x924},
+ { 0x601ac, 18, 0x1c, 0x924},
+ { 0x60200, 1, 0x1f, 0xb6d},
+ { 0x60204, 2, 0x1f, 0x249},
+ { 0x60210, 13, 0x1c, 0x924},
+ { 0x60244, 16, 0x10, 0x924},
+ { 0x61000, 1, 0x1f, 0xb6d},
+ { 0x61004, 511, 0x1f, 0x249},
+ { 0x61800, 512, 0x18, 0x249},
+ { 0x70000, 8, 0x1f, 0xb6d},
+ { 0x70020, 8184, 0x1f, 0x249},
+ { 0x78000, 8192, 0x18, 0x249},
+ { 0x85000, 3, 0x1f, 0x1000},
+ { 0x8501c, 7, 0x1f, 0x1000},
+ { 0x85048, 1, 0x1f, 0x1000},
+ { 0x85200, 32, 0x1f, 0x1000},
+ { 0xa0000, 16384, 0x3, 0x1000},
+ { 0xb0000, 16384, 0x2, 0x1000},
+ { 0xc1000, 7, 0x1f, 0x924},
+ { 0xc102c, 2, 0x1f, 0x1fff},
+ { 0xc1038, 1, 0x1f, 0x1fff},
+ { 0xc103c, 2, 0x1c, 0x924},
+ { 0xc1800, 2, 0x1f, 0x924},
+ { 0xc2000, 164, 0x1f, 0x924},
+ { 0xc22b0, 2, 0x1f, 0x1fff},
+ { 0xc22bc, 1, 0x1f, 0x1fff},
+ { 0xc22c0, 5, 0x1c, 0x924},
+ { 0xc22d8, 4, 0x1c, 0x924},
+ { 0xc2400, 49, 0x1f, 0x924},
+ { 0xc24c8, 32, 0x1f, 0x924},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc254c, 1, 0x1f, 0x924},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2554, 1, 0x1f, 0x924},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc255c, 1, 0x1f, 0x924},
+ { 0xc2568, 2, 0x1f, 0x924},
+ { 0xc2600, 1, 0x1f, 0x924},
+ { 0xc4000, 165, 0x1f, 0x924},
+ { 0xc42b4, 2, 0x1f, 0x1fff},
+ { 0xc42c0, 1, 0x1f, 0x1fff},
+ { 0xc42d8, 2, 0x1c, 0x924},
+ { 0xc42e0, 7, 0x1e, 0x924},
+ { 0xc42fc, 1, 0x1c, 0x924},
+ { 0xc4400, 51, 0x1f, 0x924},
+ { 0xc44d0, 32, 0x1f, 0x924},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4554, 1, 0x1f, 0x924},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc455c, 1, 0x1f, 0x924},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xc4564, 1, 0x1f, 0x924},
+ { 0xc4570, 2, 0x1f, 0x924},
+ { 0xc4578, 5, 0x1c, 0x924},
+ { 0xc4600, 1, 0x1f, 0x924},
+ { 0xd0000, 19, 0x1f, 0x924},
+ { 0xd004c, 8, 0x1f, 0x1927},
+ { 0xd006c, 64, 0x1f, 0x924},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd018c, 19, 0x1f, 0x924},
+ { 0xd01e8, 2, 0x1f, 0x1fff},
+ { 0xd01f4, 1, 0x1f, 0x1fff},
+ { 0xd01fc, 1, 0x1c, 0x924},
+ { 0xd0200, 1, 0x1f, 0x924},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xd0218, 4, 0x1f, 0x924},
+ { 0xd0228, 18, 0x1e, 0x924},
+ { 0xd0280, 1, 0x1f, 0x924},
+ { 0xd0300, 1, 0x1f, 0x924},
+ { 0xd0400, 1, 0x1f, 0x924},
+ { 0xd0818, 1, 0x10, 0x924},
+ { 0xd4000, 1, 0x1f, 0x1927},
+ { 0xd4004, 255, 0x1f, 0x6},
+ { 0xd4400, 1, 0x1f, 0x1007},
+ { 0xd4404, 255, 0x1f, 0x6},
+ { 0xd4800, 1, 0x1f, 0x1007},
+ { 0xd4804, 255, 0x1f, 0x6},
+ { 0xd4c00, 1, 0x1f, 0x1007},
+ { 0xd4c04, 255, 0x1f, 0x6},
+ { 0xd5000, 1, 0x1f, 0x1007},
+ { 0xd5004, 255, 0x1f, 0x6},
+ { 0xd5400, 1, 0x1f, 0x1007},
+ { 0xd5404, 255, 0x1f, 0x6},
+ { 0xd5800, 1, 0x1f, 0x1007},
+ { 0xd5804, 255, 0x1f, 0x6},
+ { 0xd5c00, 1, 0x1f, 0x1007},
+ { 0xd5c04, 255, 0x1f, 0x6},
+ { 0xd6000, 1, 0x1f, 0x1007},
+ { 0xd6004, 255, 0x1f, 0x6},
+ { 0xd6400, 1, 0x1f, 0x1007},
+ { 0xd6404, 255, 0x1f, 0x6},
+ { 0xd8000, 1, 0x1f, 0x1927},
+ { 0xd8004, 255, 0x1f, 0x6},
+ { 0xd8400, 1, 0x1f, 0x1007},
+ { 0xd8404, 255, 0x1f, 0x6},
+ { 0xd8800, 1, 0x1f, 0x1007},
+ { 0xd8804, 255, 0x1f, 0x6},
+ { 0xd8c00, 1, 0x1f, 0x1007},
+ { 0xd8c04, 255, 0x1f, 0x6},
+ { 0xd9000, 1, 0x1f, 0x1007},
+ { 0xd9004, 255, 0x1f, 0x6},
+ { 0xd9400, 1, 0x1f, 0x1007},
+ { 0xd9404, 255, 0x1f, 0x6},
+ { 0xd9800, 1, 0x1f, 0x1007},
+ { 0xd9804, 255, 0x1f, 0x6},
+ { 0xd9c00, 1, 0x1f, 0x1007},
+ { 0xd9c04, 255, 0x1f, 0x6},
+ { 0xda000, 1, 0x1f, 0x1007},
+ { 0xda004, 255, 0x1f, 0x6},
+ { 0xda400, 1, 0x1f, 0x1007},
+ { 0xda404, 255, 0x1f, 0x6},
+ { 0xda800, 1, 0x1f, 0x1007},
+ { 0xda804, 255, 0x1f, 0x6},
+ { 0xdac00, 1, 0x1f, 0x1007},
+ { 0xdac04, 255, 0x1f, 0x6},
+ { 0xdb000, 1, 0x1f, 0x1007},
+ { 0xdb004, 255, 0x1f, 0x6},
+ { 0xdb400, 1, 0x1f, 0x1007},
+ { 0xdb404, 255, 0x1f, 0x6},
+ { 0xdb800, 1, 0x1f, 0x1007},
+ { 0xdb804, 255, 0x1f, 0x6},
+ { 0xdbc00, 1, 0x1f, 0x1007},
+ { 0xdbc04, 255, 0x1f, 0x6},
+ { 0xdc000, 1, 0x1f, 0x1007},
+ { 0xdc004, 255, 0x1f, 0x6},
+ { 0xdc400, 1, 0x1f, 0x1007},
+ { 0xdc404, 255, 0x1f, 0x6},
+ { 0xdc800, 1, 0x1f, 0x1007},
+ { 0xdc804, 255, 0x1f, 0x6},
+ { 0xdcc00, 1, 0x1f, 0x1007},
+ { 0xdcc04, 255, 0x1f, 0x6},
+ { 0xdd000, 1, 0x1f, 0x1007},
+ { 0xdd004, 255, 0x1f, 0x6},
+ { 0xdd400, 1, 0x1f, 0x1007},
+ { 0xdd404, 255, 0x1f, 0x6},
+ { 0xdd800, 1, 0x1f, 0x1007},
+ { 0xdd804, 255, 0x1f, 0x6},
+ { 0xddc00, 1, 0x1f, 0x1007},
+ { 0xddc04, 255, 0x1f, 0x6},
+ { 0xde000, 1, 0x1f, 0x1007},
+ { 0xde004, 255, 0x1f, 0x6},
+ { 0xde400, 1, 0x1f, 0x1007},
+ { 0xde404, 255, 0x1f, 0x6},
+ { 0xde800, 1, 0x1f, 0x1007},
+ { 0xde804, 255, 0x1f, 0x6},
+ { 0xdec00, 1, 0x1f, 0x1007},
+ { 0xdec04, 255, 0x1f, 0x6},
+ { 0xdf000, 1, 0x1f, 0x1007},
+ { 0xdf004, 255, 0x1f, 0x6},
+ { 0xdf400, 1, 0x1f, 0x1007},
+ { 0xdf404, 255, 0x1f, 0x6},
+ { 0xdf800, 1, 0x1f, 0x1007},
+ { 0xdf804, 255, 0x1f, 0x6},
+ { 0xdfc00, 1, 0x1f, 0x1007},
+ { 0xdfc04, 255, 0x1f, 0x6},
+ { 0xe0000, 21, 0x1f, 0x924},
+ { 0xe0054, 8, 0x1f, 0xf24},
+ { 0xe0074, 49, 0x1f, 0x924},
+ { 0xe0138, 1, 0x3, 0x924},
+ { 0xe013c, 6, 0x1f, 0x924},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe0174, 21, 0x1f, 0x924},
+ { 0xe01d8, 2, 0x1f, 0x1fff},
+ { 0xe01e4, 1, 0x1f, 0x1fff},
+ { 0xe01f4, 1, 0x4, 0x924},
+ { 0xe01f8, 1, 0x1c, 0x924},
+ { 0xe0200, 1, 0x1f, 0x924},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe0214, 2, 0x1f, 0x924},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0xe0224, 2, 0x1f, 0x924},
+ { 0xe022c, 18, 0x1e, 0x924},
+ { 0xe0280, 1, 0x1f, 0x924},
+ { 0xe0300, 1, 0x1f, 0x924},
+ { 0xe0400, 1, 0x10, 0x924},
+ { 0xe1000, 1, 0x1f, 0x924},
+ { 0xe2000, 1, 0x1f, 0xf24},
+ { 0xe2004, 255, 0x1f, 0xc00},
+ { 0xe2400, 1, 0x1f, 0xe00},
+ { 0xe2404, 255, 0x1f, 0xc00},
+ { 0xe2800, 1, 0x1f, 0xe00},
+ { 0xe2804, 255, 0x1f, 0xc00},
+ { 0xe2c00, 1, 0x1f, 0xe00},
+ { 0xe2c04, 255, 0x1f, 0xc00},
+ { 0xe3000, 1, 0x1f, 0xe00},
+ { 0xe3004, 255, 0x1f, 0xc00},
+ { 0xe3400, 1, 0x1f, 0xe00},
+ { 0xe3404, 255, 0x1f, 0xc00},
+ { 0xe3800, 1, 0x1f, 0xe00},
+ { 0xe3804, 255, 0x1f, 0xc00},
+ { 0xe3c00, 1, 0x1f, 0xe00},
+ { 0xe3c04, 255, 0x1f, 0xc00},
+ { 0xf0000, 1, 0x1f, 0xf24},
+ { 0xf0004, 255, 0x1f, 0xc00},
+ { 0xf0400, 1, 0x1f, 0xe00},
+ { 0xf0404, 255, 0x1f, 0xc00},
+ { 0xf0800, 1, 0x1f, 0xe00},
+ { 0xf0804, 255, 0x1f, 0xc00},
+ { 0xf0c00, 1, 0x1f, 0xe00},
+ { 0xf0c04, 255, 0x1f, 0xc00},
+ { 0xf1000, 1, 0x1f, 0xe00},
+ { 0xf1004, 255, 0x1f, 0xc00},
+ { 0xf1400, 1, 0x1f, 0xe00},
+ { 0xf1404, 255, 0x1f, 0xc00},
+ { 0xf1800, 1, 0x1f, 0xe00},
+ { 0xf1804, 255, 0x1f, 0xc00},
+ { 0xf1c00, 1, 0x1f, 0xe00},
+ { 0xf1c04, 255, 0x1f, 0xc00},
+ { 0xf2000, 1, 0x1f, 0xe00},
+ { 0xf2004, 255, 0x1f, 0xc00},
+ { 0xf2400, 1, 0x1f, 0xe00},
+ { 0xf2404, 255, 0x1f, 0xc00},
+ { 0xf2800, 1, 0x1f, 0xe00},
+ { 0xf2804, 255, 0x1f, 0xc00},
+ { 0xf2c00, 1, 0x1f, 0xe00},
+ { 0xf2c04, 255, 0x1f, 0xc00},
+ { 0xf3000, 1, 0x1f, 0xe00},
+ { 0xf3004, 255, 0x1f, 0xc00},
+ { 0xf3400, 1, 0x1f, 0xe00},
+ { 0xf3404, 255, 0x1f, 0xc00},
+ { 0xf3800, 1, 0x1f, 0xe00},
+ { 0xf3804, 255, 0x1f, 0xc00},
+ { 0xf3c00, 1, 0x1f, 0xe00},
+ { 0xf3c04, 255, 0x1f, 0xc00},
+ { 0xf4000, 1, 0x1f, 0xe00},
+ { 0xf4004, 255, 0x1f, 0xc00},
+ { 0xf4400, 1, 0x1f, 0xe00},
+ { 0xf4404, 255, 0x1f, 0xc00},
+ { 0xf4800, 1, 0x1f, 0xe00},
+ { 0xf4804, 255, 0x1f, 0xc00},
+ { 0xf4c00, 1, 0x1f, 0xe00},
+ { 0xf4c04, 255, 0x1f, 0xc00},
+ { 0xf5000, 1, 0x1f, 0xe00},
+ { 0xf5004, 255, 0x1f, 0xc00},
+ { 0xf5400, 1, 0x1f, 0xe00},
+ { 0xf5404, 255, 0x1f, 0xc00},
+ { 0xf5800, 1, 0x1f, 0xe00},
+ { 0xf5804, 255, 0x1f, 0xc00},
+ { 0xf5c00, 1, 0x1f, 0xe00},
+ { 0xf5c04, 255, 0x1f, 0xc00},
+ { 0xf6000, 1, 0x1f, 0xe00},
+ { 0xf6004, 255, 0x1f, 0xc00},
+ { 0xf6400, 1, 0x1f, 0xe00},
+ { 0xf6404, 255, 0x1f, 0xc00},
+ { 0xf6800, 1, 0x1f, 0xe00},
+ { 0xf6804, 255, 0x1f, 0xc00},
+ { 0xf6c00, 1, 0x1f, 0xe00},
+ { 0xf6c04, 255, 0x1f, 0xc00},
+ { 0xf7000, 1, 0x1f, 0xe00},
+ { 0xf7004, 255, 0x1f, 0xc00},
+ { 0xf7400, 1, 0x1f, 0xe00},
+ { 0xf7404, 255, 0x1f, 0xc00},
+ { 0xf7800, 1, 0x1f, 0xe00},
+ { 0xf7804, 255, 0x1f, 0xc00},
+ { 0xf7c00, 1, 0x1f, 0xe00},
+ { 0xf7c04, 255, 0x1f, 0xc00},
+ { 0xf8000, 1, 0x1f, 0xe00},
+ { 0xf8004, 255, 0x1f, 0xc00},
+ { 0xf8400, 1, 0x1f, 0xe00},
+ { 0xf8404, 255, 0x1f, 0xc00},
+ { 0xf8800, 1, 0x1f, 0xe00},
+ { 0xf8804, 255, 0x1f, 0xc00},
+ { 0xf8c00, 1, 0x1f, 0xe00},
+ { 0xf8c04, 255, 0x1f, 0xc00},
+ { 0xf9000, 1, 0x1f, 0xe00},
+ { 0xf9004, 255, 0x1f, 0xc00},
+ { 0xf9400, 1, 0x1f, 0xe00},
+ { 0xf9404, 255, 0x1f, 0xc00},
+ { 0xf9800, 1, 0x1f, 0xe00},
+ { 0xf9804, 255, 0x1f, 0xc00},
+ { 0xf9c00, 1, 0x1f, 0xe00},
+ { 0xf9c04, 255, 0x1f, 0xc00},
+ { 0xfa000, 1, 0x1f, 0xe00},
+ { 0xfa004, 255, 0x1f, 0xc00},
+ { 0xfa400, 1, 0x1f, 0xe00},
+ { 0xfa404, 255, 0x1f, 0xc00},
+ { 0xfa800, 1, 0x1f, 0xe00},
+ { 0xfa804, 255, 0x1f, 0xc00},
+ { 0xfac00, 1, 0x1f, 0xe00},
+ { 0xfac04, 255, 0x1f, 0xc00},
+ { 0xfb000, 1, 0x1f, 0xe00},
+ { 0xfb004, 255, 0x1f, 0xc00},
+ { 0xfb400, 1, 0x1f, 0xe00},
+ { 0xfb404, 255, 0x1f, 0xc00},
+ { 0xfb800, 1, 0x1f, 0xe00},
+ { 0xfb804, 255, 0x1f, 0xc00},
+ { 0xfbc00, 1, 0x1f, 0xe00},
+ { 0xfbc04, 255, 0x1f, 0xc00},
+ { 0xfc000, 1, 0x1f, 0xe00},
+ { 0xfc004, 255, 0x1f, 0xc00},
+ { 0xfc400, 1, 0x1f, 0xe00},
+ { 0xfc404, 255, 0x1f, 0xc00},
+ { 0xfc800, 1, 0x1f, 0xe00},
+ { 0xfc804, 255, 0x1f, 0xc00},
+ { 0xfcc00, 1, 0x1f, 0xe00},
+ { 0xfcc04, 255, 0x1f, 0xc00},
+ { 0xfd000, 1, 0x1f, 0xe00},
+ { 0xfd004, 255, 0x1f, 0xc00},
+ { 0xfd400, 1, 0x1f, 0xe00},
+ { 0xfd404, 255, 0x1f, 0xc00},
+ { 0xfd800, 1, 0x1f, 0xe00},
+ { 0xfd804, 255, 0x1f, 0xc00},
+ { 0xfdc00, 1, 0x1f, 0xe00},
+ { 0xfdc04, 255, 0x1f, 0xc00},
+ { 0xfe000, 1, 0x1f, 0xe00},
+ { 0xfe004, 255, 0x1f, 0xc00},
+ { 0xfe400, 1, 0x1f, 0xe00},
+ { 0xfe404, 255, 0x1f, 0xc00},
+ { 0xfe800, 1, 0x1f, 0xe00},
+ { 0xfe804, 255, 0x1f, 0xc00},
+ { 0xfec00, 1, 0x1f, 0xe00},
+ { 0xfec04, 255, 0x1f, 0xc00},
+ { 0xff000, 1, 0x1f, 0xe00},
+ { 0xff004, 255, 0x1f, 0xc00},
+ { 0xff400, 1, 0x1f, 0xe00},
+ { 0xff404, 255, 0x1f, 0xc00},
+ { 0xff800, 1, 0x1f, 0xe00},
+ { 0xff804, 255, 0x1f, 0xc00},
+ { 0xffc00, 1, 0x1f, 0xe00},
+ { 0xffc04, 255, 0x1f, 0xc00},
+ { 0x101000, 5, 0x1f, 0x924},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101018, 6, 0x1f, 0x924},
+ { 0x101040, 2, 0x1f, 0x1fff},
+ { 0x10104c, 1, 0x1f, 0x1fff},
+ { 0x101050, 1, 0x1e, 0x924},
+ { 0x101054, 3, 0x1c, 0x924},
+ { 0x101100, 1, 0x1f, 0x924},
+ { 0x101800, 8, 0x1f, 0x924},
+ { 0x102000, 18, 0x1f, 0x924},
+ { 0x102058, 2, 0x1f, 0x1fff},
+ { 0x102064, 1, 0x1f, 0x1fff},
+ { 0x102068, 6, 0x1c, 0x924},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x1020c0, 1, 0x1f, 0x924},
+ { 0x1020c8, 8, 0x2, 0x924},
+ { 0x1020e8, 9, 0x1c, 0x924},
+ { 0x102400, 1, 0x1f, 0x924},
+ { 0x103000, 1, 0x1f, 0x924},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x10300c, 23, 0x1f, 0x924},
+ { 0x103088, 2, 0x1f, 0x1fff},
+ { 0x103094, 1, 0x1f, 0x1fff},
+ { 0x103098, 1, 0x1e, 0x924},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030a4, 2, 0x1e, 0x924},
+ { 0x1030ac, 2, 0x1c, 0x924},
+ { 0x1030b4, 1, 0x4, 0x924},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030c0, 3, 0x1c, 0x924},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030d0, 1, 0x1c, 0x924},
+ { 0x1030d8, 2, 0x1c, 0x924},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x1030e4, 5, 0x1c, 0x924},
+ { 0x103400, 136, 0x1c, 0x1fff},
+ { 0x103800, 8, 0x1f, 0x924},
+ { 0x104000, 1, 0x1f, 0x924},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104008, 4, 0x1f, 0x924},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x10401c, 1, 0x1f, 0x924},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x104024, 6, 0x1f, 0x924},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x104040, 47, 0x1f, 0x924},
+ { 0x10410c, 2, 0x1f, 0x1fff},
+ { 0x104118, 1, 0x1f, 0x1fff},
+ { 0x10411c, 16, 0x1c, 0x924},
+ { 0x104200, 17, 0x1f, 0x924},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104500, 192, 0x1f, 0xdb6},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x104900, 192, 0x1f, 0xdb6},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x105400, 768, 0x1f, 0xdb6},
+ { 0x107000, 7, 0x1c, 0x924},
+ { 0x10701c, 1, 0x18, 0x924},
+ { 0x108000, 33, 0x3, 0x924},
+ { 0x1080ac, 5, 0x2, 0x924},
+ { 0x108100, 5, 0x3, 0x924},
+ { 0x108120, 5, 0x3, 0x924},
+ { 0x108200, 74, 0x3, 0x924},
+ { 0x108400, 74, 0x3, 0x924},
+ { 0x108800, 152, 0x3, 0x924},
+ { 0x110000, 111, 0x1c, 0x924},
+ { 0x1101cc, 2, 0x1c, 0x1fff},
+ { 0x1101d8, 1, 0x1c, 0x1fff},
+ { 0x1101dc, 1, 0x18, 0x924},
+ { 0x110200, 4, 0x1c, 0x924},
+ { 0x120000, 92, 0x1f, 0x924},
+ { 0x120170, 2, 0x3, 0x924},
+ { 0x120178, 14, 0x1f, 0x924},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x1201b8, 93, 0x1f, 0x924},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x120330, 15, 0x1f, 0x924},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120378, 36, 0x1f, 0x924},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120410, 1, 0x1f, 0x924},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120450, 10, 0x1f, 0x924},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x120480, 43, 0x1f, 0x924},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120530, 5, 0x1f, 0x924},
+ { 0x120544, 4, 0x3, 0x924},
+ { 0x120554, 4, 0x1f, 0x924},
+ { 0x120564, 2, 0x1f, 0xfff},
+ { 0x12057c, 2, 0x1f, 0x1fff},
+ { 0x120588, 3, 0x1f, 0x1fff},
+ { 0x120598, 1, 0x1f, 0x1fff},
+ { 0x12059c, 22, 0x1e, 0x924},
+ { 0x1205f4, 1, 0x6, 0x924},
+ { 0x1205f8, 4, 0x1c, 0x924},
+ { 0x120618, 1, 0x1c, 0x924},
+ { 0x12061c, 31, 0x1e, 0x924},
+ { 0x120698, 3, 0x1c, 0x924},
+ { 0x1206a4, 1, 0x4, 0x924},
+ { 0x1206a8, 1, 0x1c, 0x924},
+ { 0x1206b0, 38, 0x1c, 0x924},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x12074c, 11, 0x1c, 0x924},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120780, 23, 0x1c, 0x924},
+ { 0x1207dc, 1, 0x4, 0x924},
+ { 0x1207fc, 1, 0x1c, 0x924},
+ { 0x12080c, 2, 0x1f, 0xfff},
+ { 0x120814, 1, 0x1f, 0x924},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x12081c, 1, 0x1f, 0x924},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120824, 1, 0x1f, 0x924},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x12082c, 1, 0x1f, 0x924},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120834, 1, 0x1f, 0x924},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x12083c, 1, 0x1f, 0x924},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120844, 1, 0x1f, 0x924},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x12084c, 1, 0x1f, 0x924},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120854, 1, 0x1f, 0x924},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x12085c, 1, 0x1f, 0x924},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120864, 1, 0x1f, 0x924},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x12086c, 1, 0x1f, 0x924},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120874, 1, 0x1f, 0x924},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x12087c, 1, 0x1f, 0x924},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120884, 1, 0x1f, 0x924},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x12088c, 1, 0x1f, 0x924},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120894, 1, 0x1f, 0x924},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x12089c, 1, 0x1f, 0x924},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a4, 1, 0x1f, 0x924},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208ac, 1, 0x1f, 0x924},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b4, 1, 0x1f, 0x924},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208bc, 1, 0x1f, 0x924},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c4, 1, 0x1f, 0x924},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208cc, 1, 0x1f, 0x924},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d4, 1, 0x1f, 0x924},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208dc, 1, 0x1f, 0x924},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e4, 1, 0x1f, 0x924},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208ec, 1, 0x1f, 0x924},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f4, 1, 0x1f, 0x924},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x1208fc, 1, 0x1f, 0x924},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120904, 1, 0x1f, 0x924},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x12090c, 1, 0x1f, 0x924},
+ { 0x120910, 7, 0x1c, 0x924},
+ { 0x120930, 9, 0x1c, 0x924},
+ { 0x12095c, 37, 0x18, 0x924},
+ { 0x120a00, 2, 0x7, 0x924},
+ { 0x120b00, 1, 0x18, 0x924},
+ { 0x122000, 2, 0x1f, 0x924},
+ { 0x122008, 2046, 0x1, 0x924},
+ { 0x128000, 6144, 0x1e, 0x924},
+ { 0x130000, 1, 0x1c, 0x1fff},
+ { 0x130004, 11, 0x1c, 0x924},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x130034, 6, 0x1c, 0x924},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130058, 3, 0x1c, 0x924},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13006c, 8, 0x1c, 0x924},
+ { 0x13009c, 2, 0x1c, 0x1fff},
+ { 0x1300a8, 1, 0x1c, 0x1fff},
+ { 0x130100, 12, 0x1c, 0x924},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x130134, 14, 0x1c, 0x924},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130170, 1, 0x1c, 0x924},
+ { 0x130180, 1, 0x1c, 0x924},
+ { 0x130200, 1, 0x1c, 0x924},
+ { 0x130280, 1, 0x1c, 0x924},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130304, 4, 0x1c, 0x924},
+ { 0x130380, 1, 0x1c, 0x924},
+ { 0x130400, 1, 0x1c, 0x924},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x130484, 4, 0x1c, 0x924},
+ { 0x130800, 72, 0x1c, 0x924},
+ { 0x131000, 136, 0x1c, 0x924},
+ { 0x132000, 148, 0x1c, 0x924},
+ { 0x134000, 544, 0x1c, 0x924},
+ { 0x140000, 1, 0x1f, 0x924},
+ { 0x140004, 9, 0xf, 0x924},
+ { 0x140028, 8, 0x1f, 0x924},
+ { 0x140048, 5, 0xf, 0x924},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x140064, 3, 0xf, 0x924},
+ { 0x140070, 1, 0x1f, 0x924},
+ { 0x140074, 10, 0xf, 0x924},
+ { 0x14009c, 1, 0x1f, 0x924},
+ { 0x1400a0, 5, 0xf, 0x924},
+ { 0x1400b4, 7, 0x1f, 0x924},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400d8, 2, 0xf, 0x924},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1400e4, 5, 0xf, 0x924},
+ { 0x1400f8, 2, 0x1f, 0x924},
+ { 0x140100, 5, 0x3, 0x924},
+ { 0x140114, 5, 0xf, 0x924},
+ { 0x140128, 7, 0x1f, 0x924},
+ { 0x140144, 9, 0xf, 0x924},
+ { 0x140168, 8, 0x1f, 0x924},
+ { 0x140188, 3, 0xf, 0x924},
+ { 0x140194, 13, 0x1f, 0x924},
+ { 0x1401d8, 2, 0x1f, 0x1fff},
+ { 0x1401e4, 1, 0x1f, 0x1fff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x1402e0, 2, 0xc, 0x924},
+ { 0x1402e8, 2, 0x1c, 0x924},
+ { 0x1402f0, 9, 0xc, 0x924},
+ { 0x140314, 9, 0x10, 0x924},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140354, 7, 0x10, 0x924},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x14038c, 14, 0x10, 0x924},
+ { 0x1404b0, 14, 0x10, 0x924},
+ { 0x15c000, 2, 0x1e, 0x924},
+ { 0x15c008, 5, 0x2, 0x924},
+ { 0x15c020, 8, 0x1c, 0x924},
+ { 0x15c040, 1, 0xc, 0x924},
+ { 0x15c044, 2, 0x1c, 0x924},
+ { 0x15c04c, 8, 0xc, 0x924},
+ { 0x15c06c, 8, 0x1c, 0x924},
+ { 0x15c090, 13, 0x1c, 0x924},
+ { 0x15c0c8, 24, 0x1c, 0x924},
+ { 0x15c128, 2, 0xc, 0x924},
+ { 0x15c130, 1, 0x1c, 0x924},
+ { 0x15c138, 6, 0x1c, 0x924},
+ { 0x15c150, 2, 0x18, 0x924},
+ { 0x15c158, 2, 0x8, 0x924},
+ { 0x15c160, 23, 0x10, 0x924},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c1d4, 23, 0x10, 0x924},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x15c24c, 90, 0x10, 0x924},
+ { 0x160004, 6, 0x18, 0x924},
+ { 0x16003c, 1, 0x10, 0x924},
+ { 0x160040, 6, 0x18, 0x924},
+ { 0x16005c, 6, 0x18, 0x924},
+ { 0x160074, 1, 0x10, 0x924},
+ { 0x160078, 2, 0x18, 0x924},
+ { 0x160300, 8, 0x18, 0x924},
+ { 0x160330, 6, 0x18, 0x924},
+ { 0x160404, 6, 0x18, 0x924},
+ { 0x16043c, 1, 0x10, 0x924},
+ { 0x160440, 6, 0x18, 0x924},
+ { 0x16045c, 6, 0x18, 0x924},
+ { 0x160474, 1, 0x10, 0x924},
+ { 0x160478, 2, 0x18, 0x924},
+ { 0x160700, 8, 0x18, 0x924},
+ { 0x160730, 6, 0x18, 0x924},
+ { 0x161000, 7, 0x1f, 0x924},
+ { 0x16102c, 2, 0x1f, 0x1fff},
+ { 0x161038, 1, 0x1f, 0x1fff},
+ { 0x16103c, 2, 0x1c, 0x924},
+ { 0x161800, 2, 0x1f, 0x924},
+ { 0x162000, 54, 0x18, 0x924},
+ { 0x162200, 60, 0x18, 0x924},
+ { 0x162400, 54, 0x18, 0x924},
+ { 0x162600, 60, 0x18, 0x924},
+ { 0x162800, 54, 0x18, 0x924},
+ { 0x162a00, 60, 0x18, 0x924},
+ { 0x162c00, 54, 0x18, 0x924},
+ { 0x162e00, 60, 0x18, 0x924},
+ { 0x163000, 1, 0x18, 0x924},
+ { 0x163008, 1, 0x18, 0x924},
+ { 0x163010, 1, 0x18, 0x924},
+ { 0x163018, 1, 0x18, 0x924},
+ { 0x163020, 5, 0x18, 0x924},
+ { 0x163038, 3, 0x18, 0x924},
+ { 0x163048, 3, 0x18, 0x924},
+ { 0x163058, 1, 0x18, 0x924},
+ { 0x163060, 1, 0x18, 0x924},
+ { 0x163068, 1, 0x18, 0x924},
+ { 0x163070, 3, 0x18, 0x924},
+ { 0x163080, 1, 0x18, 0x924},
+ { 0x163088, 3, 0x18, 0x924},
+ { 0x163098, 1, 0x18, 0x924},
+ { 0x1630a0, 1, 0x18, 0x924},
+ { 0x1630a8, 1, 0x18, 0x924},
+ { 0x1630b0, 2, 0x10, 0x924},
+ { 0x1630c0, 1, 0x18, 0x924},
+ { 0x1630c8, 1, 0x18, 0x924},
+ { 0x1630d0, 1, 0x18, 0x924},
+ { 0x1630d8, 1, 0x18, 0x924},
+ { 0x1630e0, 2, 0x18, 0x924},
+ { 0x163110, 1, 0x18, 0x924},
+ { 0x163120, 2, 0x18, 0x924},
+ { 0x163420, 4, 0x18, 0x924},
+ { 0x163438, 2, 0x18, 0x924},
+ { 0x163488, 2, 0x18, 0x924},
+ { 0x163520, 2, 0x18, 0x924},
+ { 0x163800, 1, 0x18, 0x924},
+ { 0x163808, 1, 0x18, 0x924},
+ { 0x163810, 1, 0x18, 0x924},
+ { 0x163818, 1, 0x18, 0x924},
+ { 0x163820, 5, 0x18, 0x924},
+ { 0x163838, 3, 0x18, 0x924},
+ { 0x163848, 3, 0x18, 0x924},
+ { 0x163858, 1, 0x18, 0x924},
+ { 0x163860, 1, 0x18, 0x924},
+ { 0x163868, 1, 0x18, 0x924},
+ { 0x163870, 3, 0x18, 0x924},
+ { 0x163880, 1, 0x18, 0x924},
+ { 0x163888, 3, 0x18, 0x924},
+ { 0x163898, 1, 0x18, 0x924},
+ { 0x1638a0, 1, 0x18, 0x924},
+ { 0x1638a8, 1, 0x18, 0x924},
+ { 0x1638b0, 2, 0x10, 0x924},
+ { 0x1638c0, 1, 0x18, 0x924},
+ { 0x1638c8, 1, 0x18, 0x924},
+ { 0x1638d0, 1, 0x18, 0x924},
+ { 0x1638d8, 1, 0x18, 0x924},
+ { 0x1638e0, 2, 0x18, 0x924},
+ { 0x163910, 1, 0x18, 0x924},
+ { 0x163920, 2, 0x18, 0x924},
+ { 0x163c20, 4, 0x18, 0x924},
+ { 0x163c38, 2, 0x18, 0x924},
+ { 0x163c88, 2, 0x18, 0x924},
+ { 0x163d20, 2, 0x18, 0x924},
+ { 0x164000, 5, 0x1f, 0x924},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x16401c, 53, 0x1f, 0x924},
+ { 0x164100, 2, 0x1f, 0x1fff},
+ { 0x16410c, 1, 0x1f, 0x1fff},
+ { 0x164110, 2, 0x1e, 0x924},
+ { 0x164118, 15, 0x1c, 0x924},
+ { 0x164200, 1, 0x1f, 0x924},
+ { 0x164208, 1, 0x1f, 0x924},
+ { 0x164210, 1, 0x1f, 0x924},
+ { 0x164218, 1, 0x1f, 0x924},
+ { 0x164220, 1, 0x1f, 0x924},
+ { 0x164228, 1, 0x1f, 0x924},
+ { 0x164230, 1, 0x1f, 0x924},
+ { 0x164238, 1, 0x1f, 0x924},
+ { 0x164240, 1, 0x1f, 0x924},
+ { 0x164248, 1, 0x1f, 0x924},
+ { 0x164250, 1, 0x1f, 0x924},
+ { 0x164258, 1, 0x1f, 0x924},
+ { 0x164260, 1, 0x1f, 0x924},
+ { 0x164270, 2, 0x1f, 0x924},
+ { 0x164280, 2, 0x1f, 0x924},
+ { 0x164800, 2, 0x1f, 0x924},
+ { 0x165000, 2, 0x1f, 0x924},
+ { 0x166000, 164, 0x1f, 0x924},
+ { 0x1662b0, 2, 0x1f, 0x1fff},
+ { 0x1662bc, 1, 0x1f, 0x1fff},
+ { 0x1662cc, 7, 0x1c, 0x924},
+ { 0x166400, 49, 0x1f, 0x924},
+ { 0x1664c8, 32, 0x1f, 0x924},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x16654c, 1, 0x1f, 0x924},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166554, 1, 0x1f, 0x924},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x16655c, 1, 0x1f, 0x924},
+ { 0x166568, 2, 0x1f, 0x924},
+ { 0x166570, 5, 0x1c, 0x924},
+ { 0x166800, 1, 0x1f, 0x924},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168004, 1, 0x1f, 0x924},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x16800c, 1, 0x1f, 0x924},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168014, 1, 0x1f, 0x924},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x16801c, 3, 0x1f, 0x924},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168030, 10, 0x1f, 0x924},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x16807c, 106, 0x1f, 0x924},
+ { 0x168224, 2, 0x3, 0x924},
+ { 0x16822c, 3, 0x1f, 0x924},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x16823c, 25, 0x1f, 0x924},
+ { 0x1682a0, 12, 0x3, 0x924},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x1682ec, 5, 0x1f, 0x924},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x16840c, 1, 0x1f, 0x924},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168418, 2, 0x3, 0x924},
+ { 0x168420, 6, 0x1f, 0x924},
+ { 0x168448, 2, 0x1f, 0x1fff},
+ { 0x168454, 1, 0x1f, 0x1fff},
+ { 0x168800, 19, 0x1f, 0x924},
+ { 0x168900, 1, 0x1f, 0x924},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16a000, 1536, 0x1f, 0x924},
+ { 0x16c000, 1536, 0x1f, 0x924},
+ { 0x16e000, 16, 0x2, 0x924},
+ { 0x16e040, 8, 0x1c, 0x924},
+ { 0x16e100, 1, 0x2, 0x924},
+ { 0x16e200, 2, 0x2, 0xfff},
+ { 0x16e400, 1, 0x2, 0x924},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e40c, 94, 0x2, 0x924},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e69c, 8, 0x2, 0x924},
+ { 0x16e6bc, 4, 0x1e, 0x924},
+ { 0x16e6cc, 4, 0x2, 0x924},
+ { 0x16e6e0, 2, 0x1c, 0x924},
+ { 0x16e6e8, 5, 0xc, 0x924},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e70c, 1, 0x1c, 0x924},
+ { 0x16e768, 17, 0x1c, 0x924},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x170000, 24, 0x1f, 0x924},
+ { 0x170060, 4, 0x3, 0x924},
+ { 0x170070, 13, 0x1f, 0x924},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700a8, 1, 0x1f, 0x924},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700b4, 3, 0x1f, 0x924},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x1700c4, 44, 0x1f, 0x924},
+ { 0x170184, 2, 0x1f, 0x1fff},
+ { 0x170190, 1, 0x1f, 0x1fff},
+ { 0x170194, 11, 0x1c, 0x924},
+ { 0x1701c4, 1, 0x1c, 0x924},
+ { 0x1701cc, 7, 0x1c, 0x924},
+ { 0x1701e8, 1, 0x18, 0x924},
+ { 0x1701ec, 1, 0x1c, 0x924},
+ { 0x1701f4, 1, 0x1c, 0x924},
+ { 0x170200, 4, 0x1f, 0x924},
+ { 0x170214, 1, 0x1f, 0x924},
+ { 0x170218, 77, 0x1c, 0x924},
+ { 0x170400, 64, 0x1c, 0x924},
+ { 0x178000, 1, 0x1f, 0x924},
+ { 0x180000, 61, 0x1f, 0x924},
+ { 0x180114, 2, 0x1f, 0x1fff},
+ { 0x180120, 3, 0x1f, 0x1fff},
+ { 0x180130, 1, 0x1f, 0x1fff},
+ { 0x18013c, 2, 0x1e, 0x924},
+ { 0x180200, 27, 0x1f, 0x924},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x180270, 12, 0x1f, 0x924},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1802a4, 17, 0x1f, 0x924},
+ { 0x180340, 4, 0x1f, 0x924},
+ { 0x180380, 1, 0x1c, 0x924},
+ { 0x180388, 1, 0x1c, 0x924},
+ { 0x180390, 1, 0x1c, 0x924},
+ { 0x180398, 1, 0x1c, 0x924},
+ { 0x1803a0, 5, 0x1c, 0x924},
+ { 0x1803b4, 2, 0x18, 0x924},
+ { 0x180400, 256, 0x3, 0xfff},
+ { 0x181000, 4, 0x1f, 0x93c},
+ { 0x181010, 1020, 0x1f, 0x38},
+ { 0x182000, 4, 0x18, 0x924},
+ { 0x1a0000, 1, 0x1f, 0x92c},
+ { 0x1a0004, 5631, 0x1f, 0x8},
+ { 0x1a5800, 2560, 0x1e, 0x8},
+ { 0x1a8000, 1, 0x1f, 0x92c},
+ { 0x1a8004, 8191, 0x1e, 0x8},
+ { 0x1b0000, 1, 0x1f, 0x92c},
+ { 0x1b0004, 15, 0x2, 0x8},
+ { 0x1b0040, 1, 0x1e, 0x92c},
+ { 0x1b0044, 239, 0x2, 0x8},
+ { 0x1b0400, 1, 0x1f, 0x92c},
+ { 0x1b0404, 255, 0x2, 0x8},
+ { 0x1b0800, 1, 0x1f, 0x924},
+ { 0x1b0840, 1, 0x1e, 0x924},
+ { 0x1b0c00, 1, 0x1f, 0x1fff},
+ { 0x1b1000, 1, 0x1f, 0x1fff},
+ { 0x1b1040, 1, 0x1e, 0x1fff},
+ { 0x1b1400, 1, 0x1f, 0x924},
+ { 0x1b1440, 1, 0x1e, 0x924},
+ { 0x1b1480, 1, 0x1e, 0x924},
+ { 0x1b14c0, 1, 0x1e, 0x924},
+ { 0x1b1800, 128, 0x1f, 0x10},
+ { 0x1b1c00, 128, 0x1f, 0x10},
+ { 0x1b2000, 1, 0x1f, 0xdb6},
+ { 0x1b2400, 1, 0x1e, 0x92c},
+ { 0x1b2404, 5631, 0x1c, 0x8},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x1b8100, 1, 0x1f, 0x924},
+ { 0x1b8140, 1, 0x1f, 0x924},
+ { 0x1b8180, 1, 0x1f, 0x924},
+ { 0x1b81c0, 1, 0x1f, 0x924},
+ { 0x1b8200, 1, 0x1f, 0x924},
+ { 0x1b8240, 1, 0x1f, 0x924},
+ { 0x1b8280, 1, 0x1f, 0x924},
+ { 0x1b82c0, 1, 0x1f, 0x924},
+ { 0x1b8300, 1, 0x1f, 0x924},
+ { 0x1b8340, 1, 0x1f, 0x924},
+ { 0x1b8380, 1, 0x1f, 0x924},
+ { 0x1b83c0, 1, 0x1f, 0x924},
+ { 0x1b8400, 1, 0x1f, 0x924},
+ { 0x1b8440, 1, 0x1f, 0x924},
+ { 0x1b8480, 1, 0x1f, 0x924},
+ { 0x1b84c0, 1, 0x1f, 0x924},
+ { 0x1b8500, 1, 0x1f, 0x924},
+ { 0x1b8540, 1, 0x1f, 0x924},
+ { 0x1b8580, 1, 0x1f, 0x924},
+ { 0x1b85c0, 19, 0x1c, 0x924},
+ { 0x1b8800, 1, 0x1f, 0x924},
+ { 0x1b8840, 1, 0x1f, 0x924},
+ { 0x1b8880, 1, 0x1f, 0x924},
+ { 0x1b88c0, 1, 0x1f, 0x924},
+ { 0x1b8900, 1, 0x1f, 0x924},
+ { 0x1b8940, 1, 0x1f, 0x924},
+ { 0x1b8980, 1, 0x1f, 0x924},
+ { 0x1b89c0, 1, 0x1f, 0x924},
+ { 0x1b8a00, 1, 0x1f, 0x934},
+ { 0x1b8a40, 1, 0x1f, 0x924},
+ { 0x1b8a80, 1, 0x1f, 0x492},
+ { 0x1b8ac0, 1, 0x1f, 0x924},
+ { 0x1b8b00, 1, 0x1f, 0x924},
+ { 0x1b8b40, 1, 0x1f, 0x924},
+ { 0x1b8b80, 1, 0x1f, 0x924},
+ { 0x1b8bc0, 1, 0x1f, 0x924},
+ { 0x1b8c00, 1, 0x1f, 0x924},
+ { 0x1b8c40, 1, 0x1f, 0x924},
+ { 0x1b8c80, 1, 0x1f, 0x924},
+ { 0x1b8cc0, 1, 0x1f, 0x924},
+ { 0x1b8cc4, 1, 0x1c, 0x924},
+ { 0x1b8d00, 1, 0x1f, 0x924},
+ { 0x1b8d40, 1, 0x1f, 0x924},
+ { 0x1b8d80, 1, 0x1f, 0x924},
+ { 0x1b8dc0, 1, 0x1f, 0x924},
+ { 0x1b8e00, 1, 0x1f, 0x924},
+ { 0x1b8e40, 1, 0x1f, 0x924},
+ { 0x1b8e80, 1, 0x1f, 0x924},
+ { 0x1b8e84, 1, 0x1c, 0x924},
+ { 0x1b8ec0, 1, 0x1e, 0x924},
+ { 0x1b8f00, 1, 0x1e, 0x924},
+ { 0x1b8f40, 1, 0x1e, 0x924},
+ { 0x1b8f80, 1, 0x1e, 0x924},
+ { 0x1b8fc0, 1, 0x1e, 0x924},
+ { 0x1b8fd4, 5, 0x1c, 0x924},
+ { 0x1b8fe8, 2, 0x18, 0x924},
+ { 0x1b9000, 1, 0x1c, 0x924},
+ { 0x1b9040, 3, 0x1c, 0x924},
+ { 0x1b905c, 1, 0x18, 0x924},
+ { 0x1b9064, 1, 0x10, 0x924},
+ { 0x1b9080, 10, 0x10, 0x924},
+ { 0x1c0000, 2, 0x1f, 0x924},
+ { 0x200000, 65, 0x1f, 0x924},
+ { 0x200124, 2, 0x1f, 0x1fff},
+ { 0x200130, 3, 0x1f, 0x1fff},
+ { 0x200140, 1, 0x1f, 0x1fff},
+ { 0x20014c, 2, 0x1e, 0x924},
+ { 0x200200, 27, 0x1f, 0x924},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x200270, 12, 0x1f, 0x924},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x2002a4, 17, 0x1f, 0x924},
+ { 0x200340, 4, 0x1f, 0x924},
+ { 0x200380, 1, 0x1c, 0x924},
+ { 0x200388, 1, 0x1c, 0x924},
+ { 0x200390, 1, 0x1c, 0x924},
+ { 0x200398, 1, 0x1c, 0x924},
+ { 0x2003a0, 1, 0x1c, 0x924},
+ { 0x2003a8, 2, 0x1c, 0x924},
+ { 0x200400, 256, 0x3, 0xfff},
+ { 0x202000, 4, 0x1f, 0x1927},
+ { 0x202010, 2044, 0x1f, 0x1007},
+ { 0x204000, 4, 0x18, 0x924},
+ { 0x220000, 1, 0x1f, 0x925},
+ { 0x220004, 5631, 0x1f, 0x1},
+ { 0x225800, 2560, 0x1e, 0x1},
+ { 0x228000, 1, 0x1f, 0x925},
+ { 0x228004, 8191, 0x1e, 0x1},
+ { 0x230000, 1, 0x1f, 0x925},
+ { 0x230004, 15, 0x2, 0x1},
+ { 0x230040, 1, 0x1e, 0x925},
+ { 0x230044, 239, 0x2, 0x1},
+ { 0x230400, 1, 0x1f, 0x925},
+ { 0x230404, 255, 0x2, 0x1},
+ { 0x230800, 1, 0x1f, 0x924},
+ { 0x230840, 1, 0x1e, 0x924},
+ { 0x230c00, 1, 0x1f, 0x924},
+ { 0x231000, 1, 0x1f, 0x924},
+ { 0x231040, 1, 0x1e, 0x924},
+ { 0x231400, 1, 0x1f, 0x924},
+ { 0x231440, 1, 0x1e, 0x924},
+ { 0x231480, 1, 0x1e, 0x924},
+ { 0x2314c0, 1, 0x1e, 0x924},
+ { 0x231800, 128, 0x1f, 0x2},
+ { 0x231c00, 128, 0x1f, 0x2},
+ { 0x232000, 1, 0x1f, 0xdb6},
+ { 0x232400, 1, 0x1e, 0x925},
+ { 0x232404, 5631, 0x1c, 0x1},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x238100, 1, 0x1f, 0x924},
+ { 0x238140, 1, 0x1f, 0x924},
+ { 0x238180, 1, 0x1f, 0x924},
+ { 0x2381c0, 1, 0x1f, 0x924},
+ { 0x238200, 1, 0x1f, 0x924},
+ { 0x238240, 1, 0x1f, 0x924},
+ { 0x238280, 1, 0x1f, 0x924},
+ { 0x2382c0, 1, 0x1f, 0x924},
+ { 0x238300, 1, 0x1f, 0x924},
+ { 0x238340, 1, 0x1f, 0x924},
+ { 0x238380, 1, 0x1f, 0x924},
+ { 0x2383c0, 1, 0x1f, 0x924},
+ { 0x238400, 1, 0x1f, 0x924},
+ { 0x238440, 1, 0x1f, 0x924},
+ { 0x238480, 1, 0x1f, 0x924},
+ { 0x2384c0, 1, 0x1f, 0x924},
+ { 0x238500, 1, 0x1f, 0x924},
+ { 0x238540, 1, 0x1f, 0x924},
+ { 0x238580, 1, 0x1f, 0x924},
+ { 0x2385c0, 19, 0x1c, 0x924},
+ { 0x238800, 1, 0x1f, 0x924},
+ { 0x238840, 1, 0x1f, 0x924},
+ { 0x238880, 1, 0x1f, 0x924},
+ { 0x2388c0, 1, 0x1f, 0x924},
+ { 0x238900, 1, 0x1f, 0x924},
+ { 0x238940, 1, 0x1f, 0x924},
+ { 0x238980, 1, 0x1f, 0x924},
+ { 0x2389c0, 1, 0x1f, 0x924},
+ { 0x238a00, 1, 0x1f, 0x926},
+ { 0x238a40, 1, 0x1f, 0x924},
+ { 0x238a80, 1, 0x1f, 0x492},
+ { 0x238ac0, 1, 0x1f, 0x924},
+ { 0x238b00, 1, 0x1f, 0x924},
+ { 0x238b40, 1, 0x1f, 0x924},
+ { 0x238b80, 1, 0x1f, 0x924},
+ { 0x238bc0, 1, 0x1f, 0x924},
+ { 0x238c00, 1, 0x1f, 0x924},
+ { 0x238c40, 1, 0x1f, 0x924},
+ { 0x238c80, 1, 0x1f, 0x924},
+ { 0x238cc0, 1, 0x1f, 0x924},
+ { 0x238cc4, 1, 0x1c, 0x924},
+ { 0x238d00, 1, 0x1f, 0x924},
+ { 0x238d40, 1, 0x1f, 0x924},
+ { 0x238d80, 1, 0x1f, 0x924},
+ { 0x238dc0, 1, 0x1f, 0x924},
+ { 0x238e00, 1, 0x1f, 0x924},
+ { 0x238e40, 1, 0x1f, 0x924},
+ { 0x238e80, 1, 0x1f, 0x924},
+ { 0x238e84, 1, 0x1c, 0x924},
+ { 0x238ec0, 1, 0x1e, 0x924},
+ { 0x238f00, 1, 0x1e, 0x924},
+ { 0x238f40, 1, 0x1e, 0x924},
+ { 0x238f80, 1, 0x1e, 0x924},
+ { 0x238fc0, 1, 0x1e, 0x924},
+ { 0x238fd4, 5, 0x1c, 0x924},
+ { 0x238fe8, 2, 0x18, 0x924},
+ { 0x239000, 1, 0x1c, 0x924},
+ { 0x239040, 3, 0x1c, 0x924},
+ { 0x23905c, 1, 0x18, 0x924},
+ { 0x239064, 1, 0x10, 0x924},
+ { 0x239080, 10, 0x10, 0x924},
+ { 0x240000, 2, 0x1f, 0x924},
+ { 0x280000, 65, 0x1f, 0x924},
+ { 0x280124, 2, 0x1f, 0x1fff},
+ { 0x280130, 3, 0x1f, 0x1fff},
+ { 0x280140, 1, 0x1f, 0x1fff},
+ { 0x28014c, 2, 0x1e, 0x924},
+ { 0x280200, 27, 0x1f, 0x924},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x280270, 12, 0x1f, 0x924},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2802a4, 17, 0x1f, 0x924},
+ { 0x280340, 4, 0x1f, 0x924},
+ { 0x280380, 1, 0x1c, 0x924},
+ { 0x280388, 1, 0x1c, 0x924},
+ { 0x280390, 1, 0x1c, 0x924},
+ { 0x280398, 1, 0x1c, 0x924},
+ { 0x2803a0, 1, 0x1c, 0x924},
+ { 0x2803a8, 2, 0x1c, 0x924},
+ { 0x280400, 256, 0x3, 0xfff},
+ { 0x282000, 4, 0x1f, 0x9e4},
+ { 0x282010, 2044, 0x1f, 0x1c0},
+ { 0x284000, 4, 0x18, 0x924},
+ { 0x2a0000, 1, 0x1f, 0x964},
+ { 0x2a0004, 5631, 0x1f, 0x40},
+ { 0x2a5800, 2560, 0x1e, 0x40},
+ { 0x2a8000, 1, 0x1f, 0x964},
+ { 0x2a8004, 8191, 0x1e, 0x40},
+ { 0x2b0000, 1, 0x1f, 0x964},
+ { 0x2b0004, 15, 0x2, 0x40},
+ { 0x2b0040, 1, 0x1e, 0x964},
+ { 0x2b0044, 239, 0x2, 0x40},
+ { 0x2b0400, 1, 0x1f, 0x964},
+ { 0x2b0404, 255, 0x2, 0x40},
+ { 0x2b0800, 1, 0x1f, 0x924},
+ { 0x2b0840, 1, 0x1e, 0x924},
+ { 0x2b0c00, 1, 0x1f, 0x924},
+ { 0x2b1000, 1, 0x1f, 0x924},
+ { 0x2b1040, 1, 0x1e, 0x924},
+ { 0x2b1400, 1, 0x1f, 0x924},
+ { 0x2b1440, 1, 0x1e, 0x924},
+ { 0x2b1480, 1, 0x1e, 0x924},
+ { 0x2b14c0, 1, 0x1e, 0x924},
+ { 0x2b1800, 128, 0x1f, 0x80},
+ { 0x2b1c00, 128, 0x1f, 0x80},
+ { 0x2b2000, 1, 0x1f, 0xdb6},
+ { 0x2b2400, 1, 0x1e, 0x964},
+ { 0x2b2404, 5631, 0x1c, 0x40},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x2b80c0, 1, 0x1f, 0x924},
+ { 0x2b8100, 1, 0x1f, 0x924},
+ { 0x2b8140, 1, 0x1f, 0x924},
+ { 0x2b8180, 1, 0x1f, 0x924},
+ { 0x2b81c0, 1, 0x1f, 0x924},
+ { 0x2b8200, 1, 0x1f, 0x924},
+ { 0x2b8240, 1, 0x1f, 0x924},
+ { 0x2b8280, 1, 0x1f, 0x924},
+ { 0x2b82c0, 1, 0x1f, 0x924},
+ { 0x2b8300, 1, 0x1f, 0x924},
+ { 0x2b8340, 1, 0x1f, 0x924},
+ { 0x2b8380, 1, 0x1f, 0x924},
+ { 0x2b83c0, 1, 0x1f, 0x924},
+ { 0x2b8400, 1, 0x1f, 0x924},
+ { 0x2b8440, 1, 0x1f, 0x924},
+ { 0x2b8480, 1, 0x1f, 0x924},
+ { 0x2b84c0, 1, 0x1f, 0x924},
+ { 0x2b8500, 1, 0x1f, 0x924},
+ { 0x2b8540, 1, 0x1f, 0x924},
+ { 0x2b8580, 1, 0x1f, 0x924},
+ { 0x2b85c0, 19, 0x1c, 0x924},
+ { 0x2b8800, 1, 0x1f, 0x924},
+ { 0x2b8840, 1, 0x1f, 0x924},
+ { 0x2b8880, 1, 0x1f, 0x924},
+ { 0x2b88c0, 1, 0x1f, 0x924},
+ { 0x2b8900, 1, 0x1f, 0x924},
+ { 0x2b8940, 1, 0x1f, 0x924},
+ { 0x2b8980, 1, 0x1f, 0x924},
+ { 0x2b89c0, 1, 0x1f, 0x924},
+ { 0x2b8a00, 1, 0x1f, 0x9a4},
+ { 0x2b8a40, 1, 0x1f, 0x924},
+ { 0x2b8a80, 1, 0x1f, 0x492},
+ { 0x2b8ac0, 1, 0x1f, 0x924},
+ { 0x2b8b00, 1, 0x1f, 0x924},
+ { 0x2b8b40, 1, 0x1f, 0x924},
+ { 0x2b8b80, 1, 0x1f, 0x924},
+ { 0x2b8bc0, 1, 0x1f, 0x924},
+ { 0x2b8c00, 1, 0x1f, 0x924},
+ { 0x2b8c40, 1, 0x1f, 0x924},
+ { 0x2b8c80, 1, 0x1f, 0x924},
+ { 0x2b8cc0, 1, 0x1f, 0x924},
+ { 0x2b8cc4, 1, 0x1c, 0x924},
+ { 0x2b8d00, 1, 0x1f, 0x924},
+ { 0x2b8d40, 1, 0x1f, 0x924},
+ { 0x2b8d80, 1, 0x1f, 0x924},
+ { 0x2b8dc0, 1, 0x1f, 0x924},
+ { 0x2b8e00, 1, 0x1f, 0x924},
+ { 0x2b8e40, 1, 0x1f, 0x924},
+ { 0x2b8e80, 1, 0x1f, 0x924},
+ { 0x2b8e84, 1, 0x1c, 0x924},
+ { 0x2b8ec0, 1, 0x1e, 0x924},
+ { 0x2b8f00, 1, 0x1e, 0x924},
+ { 0x2b8f40, 1, 0x1e, 0x924},
+ { 0x2b8f80, 1, 0x1e, 0x924},
+ { 0x2b8fc0, 1, 0x1e, 0x924},
+ { 0x2b8fd4, 5, 0x1c, 0x924},
+ { 0x2b8fe8, 2, 0x18, 0x924},
+ { 0x2b9000, 1, 0x1c, 0x924},
+ { 0x2b9040, 3, 0x1c, 0x924},
+ { 0x2b905c, 1, 0x18, 0x924},
+ { 0x2b9064, 1, 0x10, 0x924},
+ { 0x2b9080, 10, 0x10, 0x924},
+ { 0x2c0000, 2, 0x1f, 0x1fff},
+ { 0x300000, 65, 0x1f, 0x924},
+ { 0x300124, 2, 0x1f, 0x1fff},
+ { 0x300130, 3, 0x1f, 0x1fff},
+ { 0x300140, 1, 0x1f, 0x1fff},
+ { 0x30014c, 2, 0x1e, 0x924},
+ { 0x300200, 27, 0x1f, 0x924},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x300270, 12, 0x1f, 0x924},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x3002a4, 17, 0x1f, 0x924},
+ { 0x300340, 4, 0x1f, 0x924},
+ { 0x300380, 1, 0x1c, 0x924},
+ { 0x300388, 1, 0x1c, 0x924},
+ { 0x300390, 1, 0x1c, 0x924},
+ { 0x300398, 1, 0x1c, 0x924},
+ { 0x3003a0, 1, 0x1c, 0x924},
+ { 0x3003a8, 2, 0x1c, 0x924},
+ { 0x300400, 256, 0x3, 0xfff},
+ { 0x302000, 4, 0x1f, 0xf24},
+ { 0x302010, 2044, 0x1f, 0xe00},
+ { 0x304000, 4, 0x18, 0x924},
+ { 0x320000, 1, 0x1f, 0xb24},
+ { 0x320004, 5631, 0x1f, 0x200},
+ { 0x325800, 2560, 0x1e, 0x200},
+ { 0x328000, 1, 0x1f, 0xb24},
+ { 0x328004, 8191, 0x1e, 0x200},
+ { 0x330000, 1, 0x1f, 0xb24},
+ { 0x330004, 15, 0x2, 0x200},
+ { 0x330040, 1, 0x1e, 0xb24},
+ { 0x330044, 239, 0x2, 0x200},
+ { 0x330400, 1, 0x1f, 0xb24},
+ { 0x330404, 255, 0x2, 0x200},
+ { 0x330800, 1, 0x1f, 0x924},
+ { 0x330840, 1, 0x1e, 0x924},
+ { 0x330c00, 1, 0x1f, 0x924},
+ { 0x331000, 1, 0x1f, 0x924},
+ { 0x331040, 1, 0x1e, 0x924},
+ { 0x331400, 1, 0x1f, 0x924},
+ { 0x331440, 1, 0x1e, 0x924},
+ { 0x331480, 1, 0x1e, 0x924},
+ { 0x3314c0, 1, 0x1e, 0x924},
+ { 0x331800, 128, 0x1f, 0x400},
+ { 0x331c00, 128, 0x1f, 0x400},
+ { 0x332000, 1, 0x1f, 0xdb6},
+ { 0x332400, 1, 0x1e, 0xb24},
+ { 0x332404, 5631, 0x1c, 0x200},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff},
+ { 0x338100, 1, 0x1f, 0x924},
+ { 0x338140, 1, 0x1f, 0x924},
+ { 0x338180, 1, 0x1f, 0x924},
+ { 0x3381c0, 1, 0x1f, 0x924},
+ { 0x338200, 1, 0x1f, 0x924},
+ { 0x338240, 1, 0x1f, 0x924},
+ { 0x338280, 1, 0x1f, 0x924},
+ { 0x3382c0, 1, 0x1f, 0x924},
+ { 0x338300, 1, 0x1f, 0x924},
+ { 0x338340, 1, 0x1f, 0x924},
+ { 0x338380, 1, 0x1f, 0x924},
+ { 0x3383c0, 1, 0x1f, 0x924},
+ { 0x338400, 1, 0x1f, 0x924},
+ { 0x338440, 1, 0x1f, 0x924},
+ { 0x338480, 1, 0x1f, 0x924},
+ { 0x3384c0, 1, 0x1f, 0x924},
+ { 0x338500, 1, 0x1f, 0x924},
+ { 0x338540, 1, 0x1f, 0x924},
+ { 0x338580, 1, 0x1f, 0x924},
+ { 0x3385c0, 19, 0x1c, 0x924},
+ { 0x338800, 1, 0x1f, 0x924},
+ { 0x338840, 1, 0x1f, 0x924},
+ { 0x338880, 1, 0x1f, 0x924},
+ { 0x3388c0, 1, 0x1f, 0x924},
+ { 0x338900, 1, 0x1f, 0x924},
+ { 0x338940, 1, 0x1f, 0x924},
+ { 0x338980, 1, 0x1f, 0x924},
+ { 0x3389c0, 1, 0x1f, 0x924},
+ { 0x338a00, 1, 0x1f, 0xd24},
+ { 0x338a40, 1, 0x1f, 0x924},
+ { 0x338a80, 1, 0x1f, 0x492},
+ { 0x338ac0, 1, 0x1f, 0x924},
+ { 0x338b00, 1, 0x1f, 0x924},
+ { 0x338b40, 1, 0x1f, 0x924},
+ { 0x338b80, 1, 0x1f, 0x924},
+ { 0x338bc0, 1, 0x1f, 0x924},
+ { 0x338c00, 1, 0x1f, 0x924},
+ { 0x338c40, 1, 0x1f, 0x924},
+ { 0x338c80, 1, 0x1f, 0x924},
+ { 0x338cc0, 1, 0x1f, 0x924},
+ { 0x338cc4, 1, 0x1c, 0x924},
+ { 0x338d00, 1, 0x1f, 0x924},
+ { 0x338d40, 1, 0x1f, 0x924},
+ { 0x338d80, 1, 0x1f, 0x924},
+ { 0x338dc0, 1, 0x1f, 0x924},
+ { 0x338e00, 1, 0x1f, 0x924},
+ { 0x338e40, 1, 0x1f, 0x924},
+ { 0x338e80, 1, 0x1f, 0x924},
+ { 0x338e84, 1, 0x1c, 0x924},
+ { 0x338ec0, 1, 0x1e, 0x924},
+ { 0x338f00, 1, 0x1e, 0x924},
+ { 0x338f40, 1, 0x1e, 0x924},
+ { 0x338f80, 1, 0x1e, 0x924},
+ { 0x338fc0, 1, 0x1e, 0x924},
+ { 0x338fd4, 5, 0x1c, 0x924},
+ { 0x338fe8, 2, 0x18, 0x924},
+ { 0x339000, 1, 0x1c, 0x924},
+ { 0x339040, 3, 0x1c, 0x924},
+ { 0x33905c, 1, 0x18, 0x924},
+ { 0x339064, 1, 0x10, 0x924},
+ { 0x339080, 10, 0x10, 0x924},
+ { 0x340000, 2, 0x1f, 0x924},
+ { 0x3a0000, 40960, 0x1c, 0x1000}
};
-#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
+#define REGS_COUNT ARRAY_SIZE(reg_addrs)
-static const u32 page_vals_e2[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E2 ARRAY_SIZE(page_vals_e2)
+static const struct reg_addr idle_reg_addrs[] = {
+ { 0x2104, 1, 0x1f, 0xfff},
+ { 0x2110, 2, 0x1f, 0xfff},
+ { 0x211c, 8, 0x1f, 0xfff},
+ { 0x2814, 1, 0x1f, 0xfff},
+ { 0x281c, 2, 0x1f, 0xfff},
+ { 0x2854, 1, 0x1f, 0xfff},
+ { 0x285c, 1, 0x1f, 0xfff},
+ { 0x3040, 1, 0x1f, 0xfff},
+ { 0x9010, 7, 0x1c, 0xfff},
+ { 0x9030, 1, 0x1c, 0xfff},
+ { 0x9068, 16, 0x1c, 0xfff},
+ { 0x9230, 2, 0x1c, 0xfff},
+ { 0x9244, 1, 0x1c, 0xfff},
+ { 0x9298, 1, 0x1c, 0xfff},
+ { 0x92a8, 1, 0x1c, 0x1fff},
+ { 0xa38c, 1, 0x1f, 0x1fff},
+ { 0xa3c4, 1, 0x1e, 0xfff},
+ { 0xa404, 1, 0x1f, 0xfff},
+ { 0xa408, 2, 0x1f, 0x1fff},
+ { 0xa42c, 12, 0x1f, 0xfff},
+ { 0xa580, 1, 0x1f, 0x1fff},
+ { 0xa590, 1, 0x1f, 0x1fff},
+ { 0xa600, 5, 0x1e, 0xfff},
+ { 0xa618, 1, 0x1e, 0xfff},
+ { 0xa714, 1, 0x1c, 0xfff},
+ { 0xa720, 1, 0x1c, 0xfff},
+ { 0xa750, 1, 0x1c, 0xfff},
+ { 0xc09c, 1, 0x3, 0xfff},
+ { 0x103b0, 1, 0x1f, 0xfff},
+ { 0x103c0, 1, 0x1f, 0xfff},
+ { 0x103d0, 1, 0x3, 0x1fff},
+ { 0x10418, 1, 0x1f, 0xfff},
+ { 0x10420, 1, 0x1f, 0xfff},
+ { 0x10428, 1, 0x1f, 0xfff},
+ { 0x10460, 1, 0x1f, 0xfff},
+ { 0x10474, 1, 0x1f, 0xfff},
+ { 0x104e0, 1, 0x1f, 0xfff},
+ { 0x104ec, 1, 0x1f, 0xfff},
+ { 0x104f8, 1, 0x1f, 0xfff},
+ { 0x10508, 1, 0x1f, 0xfff},
+ { 0x10530, 1, 0x1f, 0xfff},
+ { 0x10538, 1, 0x1f, 0xfff},
+ { 0x10548, 1, 0x1f, 0xfff},
+ { 0x10558, 1, 0x1f, 0xfff},
+ { 0x182a8, 1, 0x1c, 0xfff},
+ { 0x182b8, 1, 0x1c, 0xfff},
+ { 0x18308, 1, 0x1c, 0xfff},
+ { 0x18318, 1, 0x1c, 0xfff},
+ { 0x18338, 1, 0x1c, 0xfff},
+ { 0x18348, 1, 0x1c, 0xfff},
+ { 0x183bc, 1, 0x1c, 0x1fff},
+ { 0x183cc, 1, 0x1c, 0x1fff},
+ { 0x18570, 1, 0x18, 0xfff},
+ { 0x18578, 1, 0x18, 0xfff},
+ { 0x1858c, 1, 0x18, 0xfff},
+ { 0x18594, 1, 0x18, 0xfff},
+ { 0x1862c, 4, 0x10, 0xfff},
+ { 0x2021c, 11, 0x1f, 0xfff},
+ { 0x202a8, 1, 0x1f, 0xfff},
+ { 0x202b8, 1, 0x1f, 0x1fff},
+ { 0x20404, 1, 0x1f, 0xfff},
+ { 0x2040c, 2, 0x1f, 0xfff},
+ { 0x2041c, 2, 0x1f, 0xfff},
+ { 0x40154, 14, 0x1f, 0xfff},
+ { 0x40198, 1, 0x1f, 0x1fff},
+ { 0x404ac, 1, 0x1f, 0xfff},
+ { 0x404bc, 1, 0x1f, 0x1fff},
+ { 0x42290, 1, 0x1f, 0xfff},
+ { 0x422a0, 1, 0x1f, 0xfff},
+ { 0x422b0, 1, 0x1f, 0x1fff},
+ { 0x42548, 1, 0x1f, 0xfff},
+ { 0x42550, 1, 0x1f, 0xfff},
+ { 0x42558, 1, 0x1f, 0xfff},
+ { 0x50160, 8, 0x1f, 0xfff},
+ { 0x501d0, 1, 0x1f, 0xfff},
+ { 0x501e0, 1, 0x1f, 0x1fff},
+ { 0x50204, 1, 0x1f, 0xfff},
+ { 0x5020c, 2, 0x1f, 0xfff},
+ { 0x5021c, 1, 0x1f, 0xfff},
+ { 0x60090, 1, 0x1f, 0xfff},
+ { 0x6011c, 1, 0x1f, 0xfff},
+ { 0x6012c, 1, 0x1f, 0x1fff},
+ { 0xc101c, 1, 0x1f, 0xfff},
+ { 0xc102c, 1, 0x1f, 0x1fff},
+ { 0xc2290, 1, 0x1f, 0xfff},
+ { 0xc22a0, 1, 0x1f, 0xfff},
+ { 0xc22b0, 1, 0x1f, 0x1fff},
+ { 0xc2548, 1, 0x1f, 0xfff},
+ { 0xc2550, 1, 0x1f, 0xfff},
+ { 0xc2558, 1, 0x1f, 0xfff},
+ { 0xc4294, 1, 0x1f, 0xfff},
+ { 0xc42a4, 1, 0x1f, 0xfff},
+ { 0xc42b4, 1, 0x1f, 0x1fff},
+ { 0xc4550, 1, 0x1f, 0xfff},
+ { 0xc4558, 1, 0x1f, 0xfff},
+ { 0xc4560, 1, 0x1f, 0xfff},
+ { 0xd016c, 8, 0x1f, 0xfff},
+ { 0xd01d8, 1, 0x1f, 0xfff},
+ { 0xd01e8, 1, 0x1f, 0x1fff},
+ { 0xd0204, 1, 0x1f, 0xfff},
+ { 0xd020c, 3, 0x1f, 0xfff},
+ { 0xe0154, 8, 0x1f, 0xfff},
+ { 0xe01c8, 1, 0x1f, 0xfff},
+ { 0xe01d8, 1, 0x1f, 0x1fff},
+ { 0xe0204, 1, 0x1f, 0xfff},
+ { 0xe020c, 2, 0x1f, 0xfff},
+ { 0xe021c, 2, 0x1f, 0xfff},
+ { 0x101014, 1, 0x1f, 0xfff},
+ { 0x101030, 1, 0x1f, 0xfff},
+ { 0x101040, 1, 0x1f, 0x1fff},
+ { 0x102058, 1, 0x1f, 0x1fff},
+ { 0x102080, 16, 0x1f, 0xfff},
+ { 0x103004, 2, 0x1f, 0xfff},
+ { 0x103068, 1, 0x1f, 0xfff},
+ { 0x103078, 1, 0x1f, 0xfff},
+ { 0x103088, 1, 0x1f, 0x1fff},
+ { 0x10309c, 2, 0x1e, 0xfff},
+ { 0x1030b8, 2, 0x1c, 0xfff},
+ { 0x1030cc, 1, 0x1c, 0xfff},
+ { 0x1030e0, 1, 0x1c, 0xfff},
+ { 0x104004, 1, 0x1f, 0xfff},
+ { 0x104018, 1, 0x1f, 0xfff},
+ { 0x104020, 1, 0x1f, 0xfff},
+ { 0x10403c, 1, 0x1f, 0xfff},
+ { 0x1040fc, 1, 0x1f, 0xfff},
+ { 0x10410c, 1, 0x1f, 0x1fff},
+ { 0x104400, 1, 0x1f, 0x1fff},
+ { 0x104404, 63, 0x1f, 0xfff},
+ { 0x104800, 1, 0x1f, 0x1fff},
+ { 0x104804, 63, 0x1f, 0xfff},
+ { 0x105000, 4, 0x1f, 0x1fff},
+ { 0x105010, 252, 0x1f, 0xfff},
+ { 0x108094, 1, 0x3, 0xfff},
+ { 0x1201b0, 2, 0x1f, 0xfff},
+ { 0x12032c, 1, 0x1f, 0xfff},
+ { 0x12036c, 3, 0x1f, 0xfff},
+ { 0x120408, 2, 0x1f, 0xfff},
+ { 0x120414, 15, 0x1f, 0xfff},
+ { 0x120478, 2, 0x1f, 0xfff},
+ { 0x12052c, 1, 0x1f, 0xfff},
+ { 0x120564, 3, 0x1f, 0xfff},
+ { 0x12057c, 1, 0x1f, 0x1fff},
+ { 0x12058c, 1, 0x1f, 0x1fff},
+ { 0x120608, 1, 0x1e, 0xfff},
+ { 0x120748, 1, 0x1c, 0xfff},
+ { 0x120778, 2, 0x1c, 0xfff},
+ { 0x120808, 3, 0x1f, 0xfff},
+ { 0x120818, 1, 0x1f, 0xfff},
+ { 0x120820, 1, 0x1f, 0xfff},
+ { 0x120828, 1, 0x1f, 0xfff},
+ { 0x120830, 1, 0x1f, 0xfff},
+ { 0x120838, 1, 0x1f, 0xfff},
+ { 0x120840, 1, 0x1f, 0xfff},
+ { 0x120848, 1, 0x1f, 0xfff},
+ { 0x120850, 1, 0x1f, 0xfff},
+ { 0x120858, 1, 0x1f, 0xfff},
+ { 0x120860, 1, 0x1f, 0xfff},
+ { 0x120868, 1, 0x1f, 0xfff},
+ { 0x120870, 1, 0x1f, 0xfff},
+ { 0x120878, 1, 0x1f, 0xfff},
+ { 0x120880, 1, 0x1f, 0xfff},
+ { 0x120888, 1, 0x1f, 0xfff},
+ { 0x120890, 1, 0x1f, 0xfff},
+ { 0x120898, 1, 0x1f, 0xfff},
+ { 0x1208a0, 1, 0x1f, 0xfff},
+ { 0x1208a8, 1, 0x1f, 0xfff},
+ { 0x1208b0, 1, 0x1f, 0xfff},
+ { 0x1208b8, 1, 0x1f, 0xfff},
+ { 0x1208c0, 1, 0x1f, 0xfff},
+ { 0x1208c8, 1, 0x1f, 0xfff},
+ { 0x1208d0, 1, 0x1f, 0xfff},
+ { 0x1208d8, 1, 0x1f, 0xfff},
+ { 0x1208e0, 1, 0x1f, 0xfff},
+ { 0x1208e8, 1, 0x1f, 0xfff},
+ { 0x1208f0, 1, 0x1f, 0xfff},
+ { 0x1208f8, 1, 0x1f, 0xfff},
+ { 0x120900, 1, 0x1f, 0xfff},
+ { 0x120908, 1, 0x1f, 0xfff},
+ { 0x130030, 1, 0x1c, 0xfff},
+ { 0x13004c, 3, 0x1c, 0xfff},
+ { 0x130064, 2, 0x1c, 0xfff},
+ { 0x13009c, 1, 0x1c, 0x1fff},
+ { 0x130130, 1, 0x1c, 0xfff},
+ { 0x13016c, 1, 0x1c, 0xfff},
+ { 0x130300, 1, 0x1c, 0xfff},
+ { 0x130480, 1, 0x1c, 0xfff},
+ { 0x14005c, 2, 0xf, 0xfff},
+ { 0x1400d0, 2, 0xf, 0xfff},
+ { 0x1400e0, 1, 0xf, 0xfff},
+ { 0x1401c8, 1, 0xf, 0xfff},
+ { 0x140200, 6, 0xf, 0xfff},
+ { 0x140338, 7, 0x10, 0xfff},
+ { 0x140370, 7, 0x10, 0xfff},
+ { 0x15c1bc, 6, 0x10, 0xfff},
+ { 0x15c230, 7, 0x10, 0xfff},
+ { 0x16101c, 1, 0x1f, 0xfff},
+ { 0x16102c, 1, 0x1f, 0x1fff},
+ { 0x164014, 2, 0x1f, 0xfff},
+ { 0x1640f0, 1, 0x1f, 0xfff},
+ { 0x166290, 1, 0x1f, 0xfff},
+ { 0x1662a0, 1, 0x1f, 0xfff},
+ { 0x1662b0, 1, 0x1f, 0x1fff},
+ { 0x166548, 1, 0x1f, 0xfff},
+ { 0x166550, 1, 0x1f, 0xfff},
+ { 0x166558, 1, 0x1f, 0xfff},
+ { 0x168000, 1, 0x1f, 0xfff},
+ { 0x168008, 1, 0x1f, 0xfff},
+ { 0x168010, 1, 0x1f, 0xfff},
+ { 0x168018, 1, 0x1f, 0xfff},
+ { 0x168028, 2, 0x1f, 0xfff},
+ { 0x168058, 9, 0x1f, 0xfff},
+ { 0x168238, 1, 0x1f, 0xfff},
+ { 0x1682d0, 7, 0x1f, 0xfff},
+ { 0x168300, 2, 0x3, 0xfff},
+ { 0x168308, 65, 0x1f, 0xfff},
+ { 0x168410, 2, 0x1f, 0xfff},
+ { 0x168438, 1, 0x1f, 0xfff},
+ { 0x168448, 1, 0x1f, 0x1fff},
+ { 0x168a00, 128, 0x1f, 0xfff},
+ { 0x16e200, 128, 0x2, 0xfff},
+ { 0x16e404, 2, 0x2, 0xfff},
+ { 0x16e584, 64, 0x2, 0xfff},
+ { 0x16e684, 2, 0x1e, 0xfff},
+ { 0x16e68c, 4, 0x2, 0xfff},
+ { 0x16e6fc, 4, 0x1c, 0xfff},
+ { 0x16e7ac, 12, 0x10, 0xfff},
+ { 0x1700a4, 1, 0x1f, 0xfff},
+ { 0x1700ac, 2, 0x1f, 0xfff},
+ { 0x1700c0, 1, 0x1f, 0xfff},
+ { 0x170174, 1, 0x1f, 0xfff},
+ { 0x170184, 1, 0x1f, 0x1fff},
+ { 0x1800f4, 1, 0x1f, 0xfff},
+ { 0x180104, 1, 0x1f, 0xfff},
+ { 0x180114, 1, 0x1f, 0x1fff},
+ { 0x180124, 1, 0x1f, 0x1fff},
+ { 0x18026c, 1, 0x1f, 0xfff},
+ { 0x1802a0, 1, 0x1f, 0xfff},
+ { 0x1b8000, 1, 0x1f, 0xfff},
+ { 0x1b8040, 1, 0x1f, 0xfff},
+ { 0x1b8080, 1, 0x1f, 0xfff},
+ { 0x1b80c0, 1, 0x1f, 0xfff},
+ { 0x200104, 1, 0x1f, 0xfff},
+ { 0x200114, 1, 0x1f, 0xfff},
+ { 0x200124, 1, 0x1f, 0x1fff},
+ { 0x200134, 1, 0x1f, 0x1fff},
+ { 0x20026c, 1, 0x1f, 0xfff},
+ { 0x2002a0, 1, 0x1f, 0xfff},
+ { 0x238000, 1, 0x1f, 0xfff},
+ { 0x238040, 1, 0x1f, 0xfff},
+ { 0x238080, 1, 0x1f, 0xfff},
+ { 0x2380c0, 1, 0x1f, 0xfff},
+ { 0x280104, 1, 0x1f, 0xfff},
+ { 0x280114, 1, 0x1f, 0xfff},
+ { 0x280124, 1, 0x1f, 0x1fff},
+ { 0x280134, 1, 0x1f, 0x1fff},
+ { 0x28026c, 1, 0x1f, 0xfff},
+ { 0x2802a0, 1, 0x1f, 0xfff},
+ { 0x2b8000, 1, 0x1f, 0xfff},
+ { 0x2b8040, 1, 0x1f, 0xfff},
+ { 0x2b8080, 1, 0x1f, 0xfff},
+ { 0x300104, 1, 0x1f, 0xfff},
+ { 0x300114, 1, 0x1f, 0xfff},
+ { 0x300124, 1, 0x1f, 0x1fff},
+ { 0x300134, 1, 0x1f, 0x1fff},
+ { 0x30026c, 1, 0x1f, 0xfff},
+ { 0x3002a0, 1, 0x1f, 0xfff},
+ { 0x338000, 1, 0x1f, 0xfff},
+ { 0x338040, 1, 0x1f, 0xfff},
+ { 0x338080, 1, 0x1f, 0xfff},
+ { 0x3380c0, 1, 0x1f, 0xfff}
+};
-static const u32 page_write_regs_e2[] = { 328476 };
-#define PAGE_WRITE_REGS_E2 ARRAY_SIZE(page_write_regs_e2)
+#define IDLE_REGS_COUNT ARRAY_SIZE(idle_reg_addrs)
-static const struct reg_addr page_read_regs_e2[] = {
- { 0x58000, 4608, RI_E2_ONLINE } };
-#define PAGE_READ_REGS_E2 ARRAY_SIZE(page_read_regs_e2)
+static const u32 read_reg_e1[] = {
+ 0x1b1000};
-static const u32 page_vals_e3[] = { 0, 128 };
-#define PAGE_MODE_VALUES_E3 ARRAY_SIZE(page_vals_e3)
+static const struct wreg_addr wreg_addr_e1 = {
+ 0x1b0c00, 192, 1, read_reg_e1, 0x1f, 0x1fff};
-static const u32 page_write_regs_e3[] = { 328476 };
-#define PAGE_WRITE_REGS_E3 ARRAY_SIZE(page_write_regs_e3)
+static const u32 read_reg_e1h[] = {
+ 0x1b1040, 0x1b1000};
-static const struct reg_addr page_read_regs_e3[] = {
- { 0x58000, 4608, RI_E3E3B0_ONLINE } };
-#define PAGE_READ_REGS_E3 ARRAY_SIZE(page_read_regs_e3)
+static const struct wreg_addr wreg_addr_e1h = {
+ 0x1b0c00, 256, 2, read_reg_e1h, 0x1f, 0x1fff};
+
+static const u32 read_reg_e2[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e2 = {
+ 0x1b0c00, 128, 2, read_reg_e2, 0x1f, 0x1fff};
-#endif /* BNX2X_DUMP_H */
+static const u32 read_reg_e3[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3 = {
+ 0x1b0c00, 128, 2, read_reg_e3, 0x1f, 0x1fff};
+
+static const u32 read_reg_e3b0[] = {
+ 0x1b1040, 0x1b1000};
+
+static const struct wreg_addr wreg_addr_e3b0 = {
+ 0x1b0c00, 128, 2, read_reg_e3b0, 0x1f, 0x1fff};
+
+static const unsigned int dump_num_registers[NUM_CHIPS][NUM_PRESETS] = {
+ {20782, 18567, 27975, 19729, 18311, 27719, 20836, 32391, 41799, 20812,
+ 26247, 35655, 19074},
+ {32774, 19297, 33277, 31721, 19041, 33021, 32828, 33121, 47101, 32804,
+ 26977, 40957, 35895},
+ {36527, 17928, 33697, 35474, 18700, 34466, 36581, 31752, 47521, 36557,
+ 25608, 41377, 43903},
+ {45239, 17936, 34387, 44186, 18708, 35156, 45293, 31760, 48211, 45269,
+ 25616, 42067, 43903},
+ {45302, 17999, 34802, 44249, 18771, 35571, 45356, 31823, 48626, 45332,
+ 25679, 42482, 43903}
+};
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index a427b49a886..9a674b14b40 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
/* bnx2x_ethtool.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -186,6 +186,7 @@ static const struct {
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
+
static int bnx2x_get_port_type(struct bnx2x *bp)
{
int port_type;
@@ -233,7 +234,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
!(bp->flags & MF_FUNC_DIS)) {
- cmd->duplex = bp->link_vars.duplex;
+ cmd->duplex = bp->link_vars.duplex;
if (IS_MF(bp) && !BP_NOMCP(bp))
ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
@@ -399,7 +400,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
- /* Save new config in case command complete successully */
+ /* Save new config in case command complete successfully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -596,29 +597,58 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
-#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
-#define IS_E2_ONLINE(info) (((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
-#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
-#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+#define DUMP_ALL_PRESETS 0x1FFF
+#define DUMP_MAX_PRESETS 13
-static bool bnx2x_is_reg_online(struct bnx2x *bp,
- const struct reg_addr *reg_info)
+static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
{
if (CHIP_IS_E1(bp))
- return IS_E1_ONLINE(reg_info->info);
+ return dump_num_registers[0][preset-1];
else if (CHIP_IS_E1H(bp))
- return IS_E1H_ONLINE(reg_info->info);
+ return dump_num_registers[1][preset-1];
else if (CHIP_IS_E2(bp))
- return IS_E2_ONLINE(reg_info->info);
+ return dump_num_registers[2][preset-1];
else if (CHIP_IS_E3A0(bp))
- return IS_E3_ONLINE(reg_info->info);
+ return dump_num_registers[3][preset-1];
else if (CHIP_IS_E3B0(bp))
- return IS_E3B0_ONLINE(reg_info->info);
+ return dump_num_registers[4][preset-1];
else
- return false;
+ return 0;
+}
+
+static int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+ u32 preset_idx;
+ int regdump_len = 0;
+
+ /* Calculate the total preset regs length */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
+ regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
+
+ return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_regs_len(bp);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
}
+#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
+#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
+#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
+#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
+#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
+
+#define IS_REG_IN_PRESET(presets, idx) \
+ ((presets & (1 << (idx-1))) == (1 << (idx-1)))
+
/******* Paged registers info selectors ********/
static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
{
@@ -680,38 +710,39 @@ static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
return 0;
}
-static int __bnx2x_get_regs_len(struct bnx2x *bp)
+static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
+ const struct reg_addr *reg_info)
{
- int num_pages = __bnx2x_get_page_reg_num(bp);
- int page_write_num = __bnx2x_get_page_write_num(bp);
- const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
- int page_read_num = __bnx2x_get_page_read_num(bp);
- int regdump_len = 0;
- int i, j, k;
-
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
- regdump_len += reg_addrs[i].size;
-
- for (i = 0; i < num_pages; i++)
- for (j = 0; j < page_write_num; j++)
- for (k = 0; k < page_read_num; k++)
- if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
- regdump_len += page_read_addr[k].size;
-
- return regdump_len;
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(reg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(reg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(reg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(reg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(reg_info->chips);
+ else
+ return false;
}
-static int bnx2x_get_regs_len(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int regdump_len = 0;
- regdump_len = __bnx2x_get_regs_len(bp);
- regdump_len *= 4;
- regdump_len += sizeof(struct dump_hdr);
-
- return regdump_len;
+static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
+ const struct wreg_addr *wreg_info)
+{
+ if (CHIP_IS_E1(bp))
+ return IS_E1_REG(wreg_info->chips);
+ else if (CHIP_IS_E1H(bp))
+ return IS_E1H_REG(wreg_info->chips);
+ else if (CHIP_IS_E2(bp))
+ return IS_E2_REG(wreg_info->chips);
+ else if (CHIP_IS_E3A0(bp))
+ return IS_E3A0_REG(wreg_info->chips);
+ else if (CHIP_IS_E3B0(bp))
+ return IS_E3B0_REG(wreg_info->chips);
+ else
+ return false;
}
/**
@@ -725,9 +756,10 @@ static int bnx2x_get_regs_len(struct net_device *dev)
* ("read address"). There may be more than one write address per "page" and
* more than one read address per write address.
*/
-static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
u32 i, j, k, n;
+
/* addresses of the paged registers */
const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
/* number of paged registers */
@@ -740,32 +772,100 @@ static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
/* number of read addresses */
int read_num = __bnx2x_get_page_read_num(bp);
+ u32 addr, size;
for (i = 0; i < num_pages; i++) {
for (j = 0; j < write_num; j++) {
REG_WR(bp, write_addr[j], page_addr[i]);
- for (k = 0; k < read_num; k++)
- if (bnx2x_is_reg_online(bp, &read_addr[k]))
- for (n = 0; n <
- read_addr[k].size; n++)
- *p++ = REG_RD(bp,
- read_addr[k].addr + n*4);
+
+ for (k = 0; k < read_num; k++) {
+ if (IS_REG_IN_PRESET(read_addr[k].presets,
+ preset)) {
+ size = read_addr[k].size;
+ for (n = 0; n < size; n++) {
+ addr = read_addr[k].addr + n*4;
+ *p++ = REG_RD(bp, addr);
+ }
+ }
+ }
}
}
}
-static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
{
- u32 i, j;
+ u32 i, j, addr;
+ const struct wreg_addr *wreg_addr_p = NULL;
+
+ if (CHIP_IS_E1(bp))
+ wreg_addr_p = &wreg_addr_e1;
+ else if (CHIP_IS_E1H(bp))
+ wreg_addr_p = &wreg_addr_e1h;
+ else if (CHIP_IS_E2(bp))
+ wreg_addr_p = &wreg_addr_e2;
+ else if (CHIP_IS_E3A0(bp))
+ wreg_addr_p = &wreg_addr_e3;
+ else if (CHIP_IS_E3B0(bp))
+ wreg_addr_p = &wreg_addr_e3b0;
+
+ /* Read the idle_chk registers */
+ for (i = 0; i < IDLE_REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
+ IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
+ for (j = 0; j < idle_reg_addrs[i].size; j++)
+ *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
+ }
+ }
/* Read the regular registers */
- for (i = 0; i < REGS_COUNT; i++)
- if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+ for (i = 0; i < REGS_COUNT; i++) {
+ if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
+ IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
for (j = 0; j < reg_addrs[i].size; j++)
*p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+ }
+ }
+
+ /* Read the CAM registers */
+ if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
+ IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
+ for (i = 0; i < wreg_addr_p->size; i++) {
+ *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
- /* Read "paged" registes */
- bnx2x_read_pages_regs(bp, p);
+ /* In case of wreg_addr register, read additional
+ registers from read_regs array
+ */
+ for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
+ addr = *(wreg_addr_p->read_regs);
+ *p++ = REG_RD(bp, addr + j*4);
+ }
+ }
+ }
+
+ /* Paged registers are supported in E2 & E3 only */
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
+ /* Read "paged" registes */
+ bnx2x_read_pages_regs(bp, p, preset);
+ }
+
+ return 0;
+}
+
+static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+ u32 preset_idx;
+
+ /* Read all registers, by reading all preset registers */
+ for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
+ /* Skip presets with IOR */
+ if ((preset_idx == 2) ||
+ (preset_idx == 5) ||
+ (preset_idx == 8) ||
+ (preset_idx == 11))
+ continue;
+ __bnx2x_get_preset_regs(bp, p, preset_idx);
+ p += __bnx2x_get_preset_regs_len(bp, preset_idx);
+ }
}
static void bnx2x_get_regs(struct net_device *dev,
@@ -773,9 +873,9 @@ static void bnx2x_get_regs(struct net_device *dev,
{
u32 *p = _p;
struct bnx2x *bp = netdev_priv(dev);
- struct dump_hdr dump_hdr = {0};
+ struct dump_header dump_hdr = {0};
- regs->version = 1;
+ regs->version = 2;
memset(p, 0, regs->len);
if (!netif_running(bp->dev))
@@ -785,53 +885,173 @@ static void bnx2x_get_regs(struct net_device *dev,
* cause false alarms by reading never written registers. We
* will re-enable parity attentions right after the dump.
*/
+
+ /* Disable parity on path 0 */
+ bnx2x_pretend_func(bp, 0);
bnx2x_disable_blocks_parity(bp);
- dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
- dump_hdr.dump_sign = dump_sign_all;
- dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
- dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
- dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
- dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+ /* Disable parity on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_disable_blocks_parity(bp);
- if (CHIP_IS_E1(bp))
- dump_hdr.info = RI_E1_ONLINE;
- else if (CHIP_IS_E1H(bp))
- dump_hdr.info = RI_E1H_ONLINE;
- else if (!CHIP_IS_E1x(bp))
- dump_hdr.info = RI_E2_ONLINE |
- (BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = DUMP_ALL_PRESETS;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
- memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
- p += dump_hdr.hdr_size + 1;
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
/* Actually read the registers */
__bnx2x_get_regs(bp, p);
- /* Re-enable parity attentions */
+ /* Re-enable parity attentions on path 0 */
+ bnx2x_pretend_func(bp, 0);
bnx2x_clear_blocks_parity(bp);
bnx2x_enable_blocks_parity(bp);
+
+ /* Re-enable parity attentions on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int regdump_len = 0;
+
+ regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
+ regdump_len *= 4;
+ regdump_len += sizeof(struct dump_header);
+
+ return regdump_len;
+}
+
+static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Use the ethtool_dump "flag" field as the dump preset index */
+ bp->dump_preset_idx = val->flag;
+ return 0;
+}
+
+static int bnx2x_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* Calculate the requested preset idx length */
+ dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
+ DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
+ bp->dump_preset_idx, dump->len);
+
+ dump->flag = ETHTOOL_GET_DUMP_DATA;
+ return 0;
+}
+
+static int bnx2x_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump,
+ void *buffer)
+{
+ u32 *p = buffer;
+ struct bnx2x *bp = netdev_priv(dev);
+ struct dump_header dump_hdr = {0};
+
+ memset(p, 0, dump->len);
+
+ /* Disable parity attentions as long as following dump may
+ * cause false alarms by reading never written registers. We
+ * will re-enable parity attentions right after the dump.
+ */
+
+ /* Disable parity on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Disable parity on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_disable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
+ dump_hdr.preset = bp->dump_preset_idx;
+ dump_hdr.version = BNX2X_DUMP_VERSION;
+
+ DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
+
+ /* dump_meta_data presents OR of CHIP and PATH. */
+ if (CHIP_IS_E1(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1;
+ } else if (CHIP_IS_E1H(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
+ } else if (CHIP_IS_E2(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3A0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ } else if (CHIP_IS_E3B0(bp)) {
+ dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
+ (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
+ }
+
+ memcpy(p, &dump_hdr, sizeof(struct dump_header));
+ p += dump_hdr.header_size + 1;
+
+ /* Actually read the registers */
+ __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
+
+ /* Re-enable parity attentions on path 0 */
+ bnx2x_pretend_func(bp, 0);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Re-enable parity attentions on path 1 */
+ bnx2x_pretend_func(bp, 1);
+ bnx2x_clear_blocks_parity(bp);
+ bnx2x_enable_blocks_parity(bp);
+
+ /* Return to current function */
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ return 0;
}
static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 phy_fw_ver[PHY_FW_VER_LEN];
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- phy_fw_ver[0] = '\0';
- bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
- snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
- (bp->common.bc_ver & 0xff0000) >> 16,
- (bp->common.bc_ver & 0xff00) >> 8,
- (bp->common.bc_ver & 0xff),
- ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+ bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS(bp);
@@ -861,13 +1081,13 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct bnx2x *bp = netdev_priv(dev);
if (wol->wolopts & ~WAKE_MAGIC) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
if (wol->wolopts & WAKE_MAGIC) {
if (bp->flags & NO_WOL_FLAG) {
- DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
return -EINVAL;
}
bp->wol = 1;
@@ -890,7 +1110,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
if (capable(CAP_NET_ADMIN)) {
/* dump MCP trace */
- if (level & BNX2X_MSG_MCP)
+ if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
bnx2x_fw_dump_lvl(bp, KERN_INFO);
bp->msg_enable = level;
}
@@ -940,7 +1160,7 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
* Pf B takes the lock and proceeds to perform it's own access.
* pf A unlocks the per port lock, while pf B is still working (!).
* mcp takes the per port lock and corrupts pf B's access (and/or has it's own
- * acess corrupted by pf B).*
+ * access corrupted by pf B)
*/
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
{
@@ -1070,7 +1290,8 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
/* we read nvram data in cpu order
* but ethtool sees it as an array of bytes
- * converting to big-endian will do the work */
+ * converting to big-endian will do the work
+ */
*ret_val = cpu_to_be32(val);
rc = 0;
break;
@@ -1297,7 +1518,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
val |= (*data_buf << BYTE_OFFSET(offset));
/* nvram data is returned as an array of bytes
- * convert it back to cpu order */
+ * convert it back to cpu order
+ */
val = be32_to_cpu(val);
rc = bnx2x_nvram_write_dword(bp, align_offset, val,
@@ -1509,6 +1731,10 @@ static int bnx2x_set_ringparam(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ DP(BNX2X_MSG_ETHTOOL,
+ "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+ ering->rx_pending, ering->tx_pending);
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
DP(BNX2X_MSG_ETHTOOL,
"Handling parity error recovery. Try again later\n");
@@ -1747,7 +1973,6 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
return 0;
}
-
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1875,7 +2100,8 @@ static int bnx2x_test_registers(struct bnx2x *bp)
hw = BNX2X_CHIP_MASK_E3;
/* Repeat the test twice:
- First by writing 0x00000000, second by writing 0xffffffff */
+ * First by writing 0x00000000, second by writing 0xffffffff
+ */
for (idx = 0; idx < 2; idx++) {
switch (idx) {
@@ -2388,8 +2614,8 @@ static void bnx2x_self_test(struct net_device *dev,
struct ethtool_test *etest, u64 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- u8 is_serdes;
- int rc;
+ u8 is_serdes, link_up;
+ int rc, cnt = 0;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
@@ -2397,6 +2623,7 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+
DP(BNX2X_MSG_ETHTOOL,
"Self-test command parameters: offline = %d, external_lb = %d\n",
(etest->flags & ETH_TEST_FL_OFFLINE),
@@ -2411,20 +2638,17 @@ static void bnx2x_self_test(struct net_device *dev,
}
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
-
+ link_up = bp->link_vars.link_up;
/* offline tests are not supported in MF mode */
if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
- u8 link_up;
/* save current value of input enable for TX port IF */
val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
/* disable input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
- link_up = bp->link_vars.link_up;
-
bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
rc = bnx2x_nic_load(bp, LOAD_DIAG);
if (rc) {
@@ -2486,17 +2710,19 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (bnx2x_link_test(bp, is_serdes) != 0) {
+ if (link_up) {
+ cnt = 100;
+ while (bnx2x_link_test(bp, is_serdes) && --cnt)
+ msleep(20);
+ }
+
+ if (!cnt) {
if (!IS_MF(bp))
buf[6] = 1;
else
buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
-
-#ifdef BNX2X_EXTRA_DEBUG
- bnx2x_panic_dump(bp);
-#endif
}
#define IS_PORT_STAT(i) \
@@ -2753,15 +2979,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
/* For UDP either 2-tupple hash or 4-tupple hash is supported */
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
udp_rss_requested = 1;
else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
udp_rss_requested = 0;
@@ -2781,9 +3006,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
- } else {
- return 0;
}
+ return 0;
+
case IPV4_FLOW:
case IPV6_FLOW:
/* For IP only 2-tupple hash is supported */
@@ -2791,9 +3016,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2809,9 +3034,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n");
return -EINVAL;
- } else {
- return 0;
}
+ return 0;
+
default:
return -EINVAL;
}
@@ -2964,6 +3189,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
+ .get_dump_flag = bnx2x_get_dump_flag,
+ .get_dump_data = bnx2x_get_dump_data,
+ .set_dump = bnx2x_set_dump,
.get_wol = bnx2x_get_wol,
.set_wol = bnx2x_set_wol,
.get_msglevel = bnx2x_get_msglevel,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 60a83ad1037..e5f808377c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -305,12 +305,10 @@
#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
-
/* Maximal aggregation queues supported */
#define ETH_MAX_AGGREGATION_QUEUES_E1 32
#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
-
#define ETH_NUM_OF_MCAST_BINS 256
#define ETH_NUM_OF_MCAST_ENGINES_E2 72
@@ -353,7 +351,6 @@
/* max number of slow path commands per port */
#define MAX_RAMRODS_PER_PORT 8
-
/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
#define TIMERS_TICK_SIZE_CHIP (1e-3)
@@ -380,7 +377,6 @@
that is not mapped to priority*/
#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
-
#define C_ERES_PER_PAGE \
(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
@@ -391,8 +387,6 @@
#define INVALID_VNIC_ID 0xFF
-
#define UNDEF_IRO 0x80000000
-
#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 4bed52ba300..f572ae164fc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 3369a50ac6b..037860ecc34 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -899,6 +899,10 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
#define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK 0x00000C00
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE 0x00000400
+ #define PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI 0x00000800
+
#define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
#define PORT_FEATURE_EN_SIZE_SHIFT 24
#define PORT_FEATURE_WOL_ENABLED 0x01000000
@@ -3374,6 +3378,10 @@ struct regpair {
__le32 hi;
};
+struct regpair_native {
+ u32 lo;
+ u32 hi;
+};
/*
* Classify rule opcodes in E2/E3
@@ -4400,13 +4408,13 @@ struct tstorm_eth_function_common_config {
* MAC filtering configuration parameters per port in Tstorm
*/
struct tstorm_eth_mac_filter_config {
- __le32 ucast_drop_all;
- __le32 ucast_accept_all;
- __le32 mcast_drop_all;
- __le32 mcast_accept_all;
- __le32 bcast_accept_all;
- __le32 vlan_filter[2];
- __le32 unmatched_unicast;
+ u32 ucast_drop_all;
+ u32 ucast_accept_all;
+ u32 mcast_drop_all;
+ u32 mcast_accept_all;
+ u32 bcast_accept_all;
+ u32 vlan_filter[2];
+ u32 unmatched_unicast;
};
@@ -4898,7 +4906,7 @@ union event_data {
* per PF event ring data
*/
struct event_ring_data {
- struct regpair base_addr;
+ struct regpair_native base_addr;
#if defined(__BIG_ENDIAN)
u8 index_id;
u8 sb_id;
@@ -5131,7 +5139,7 @@ struct pci_entity {
* The fast-path status block meta-data, common to all chips
*/
struct hc_sb_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
struct pci_entity p_func;
#if defined(__BIG_ENDIAN)
@@ -5145,7 +5153,7 @@ struct hc_sb_data {
u8 state;
u8 rsrv0;
#endif
- struct regpair rsrv1[2];
+ struct regpair_native rsrv1[2];
};
@@ -5163,7 +5171,7 @@ enum hc_segment {
* The fast-path status block meta-data
*/
struct hc_sp_status_block_data {
- struct regpair host_sb_addr;
+ struct regpair_native host_sb_addr;
#if defined(__BIG_ENDIAN)
u8 rsrv1;
u8 state;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c8f10f0e8a0..76df015f486 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index d755acfe7a4..8ab0dd90096 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -218,7 +218,7 @@ static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
/* gunzip_outlen is in dwords */
len = GUNZIP_OUTLEN(bp);
for (i = 0; i < len; i++)
- ((u32 *)GUNZIP_BUF(bp))[i] =
+ ((u32 *)GUNZIP_BUF(bp))[i] = (__force u32)
cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
bnx2x_write_big_buf_wb(bp, addr, len);
@@ -232,7 +232,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
u16 op_end =
INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
STAGE_END)];
- union init_op *op;
+ const union init_op *op;
u32 op_idx, op_type, addr, len;
const u32 *data, *data_base;
@@ -244,7 +244,7 @@ static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
for (op_idx = op_start; op_idx < op_end; op_idx++) {
- op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+ op = (const union init_op *)&(INIT_OPS(bp)[op_idx]);
/* Get generic data */
op_type = op->raw.op;
addr = op->raw.offset;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 09096b43a6e..c6da77fa9d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -3659,7 +3659,7 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3<<6));
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3713,7 +3713,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
};
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -3854,7 +3854,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2}
};
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -4242,7 +4242,7 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, (3<<13));
- for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(wc_regs); i++)
bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
wc_regs[i].val);
@@ -4748,6 +4748,12 @@ void bnx2x_link_status_update(struct link_params *params,
vars->link_status = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region,
port_mb[port].link_status));
+
+ /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE &&
+ bp->link_params.loopback_mode != LOOPBACK_EXT)
+ vars->link_status |= LINK_STATUS_LINK_UP;
+
if (bnx2x_eee_has_cap(params))
vars->eee_status = REG_RD(bp, params->shmem2_base +
offsetof(struct shmem2_region,
@@ -9520,7 +9526,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set);
+ for (i = 0; i < ARRAY_SIZE(reg_set);
i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -9592,7 +9598,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL, val);
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
@@ -13395,7 +13401,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
};
DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
- for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ee6e7ec8545..d25c7d79787 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2012 Broadcom Corporation
+/* Copyright 2008-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5523da3afcd..c4daee1b728 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -59,6 +59,7 @@
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_vfpf.h"
#include "bnx2x_dcb.h"
#include "bnx2x_sp.h"
@@ -144,39 +145,49 @@ enum bnx2x_board_type {
BCM57711E,
BCM57712,
BCM57712_MF,
+ BCM57712_VF,
BCM57800,
BCM57800_MF,
+ BCM57800_VF,
BCM57810,
BCM57810_MF,
- BCM57840_O,
+ BCM57810_VF,
BCM57840_4_10,
BCM57840_2_20,
- BCM57840_MFO,
BCM57840_MF,
+ BCM57840_VF,
BCM57811,
- BCM57811_MF
+ BCM57811_MF,
+ BCM57840_O,
+ BCM57840_MFO,
+ BCM57811_VF
};
/* indexed by board_type, above */
static struct {
char *name;
} board_info[] = {
- { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
- { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
- { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
+ [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+ [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+ [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+ [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+ [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+ [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
+ [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+ [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+ [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
+ [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+ [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+ [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
+ [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+ [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
+ [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
+ [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
+ [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
};
#ifndef PCI_DEVICE_ID_NX2_57710
@@ -194,12 +205,18 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57712_MF
#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_VF
+#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57800
#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
#endif
#ifndef PCI_DEVICE_ID_NX2_57800_MF
#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_VF
+#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57810
#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
#endif
@@ -209,6 +226,9 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_O
#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_VF
+#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57840_4_10
#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
#endif
@@ -221,29 +241,41 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_VF
+#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
+#endif
#ifndef PCI_DEVICE_ID_NX2_57811
#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
#endif
#ifndef PCI_DEVICE_ID_NX2_57811_MF
#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
#endif
+#ifndef PCI_DEVICE_ID_NX2_57811_VF
+#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
+#endif
+
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
{ 0 }
};
@@ -346,6 +378,65 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+{
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
+ case DMAE_CMD_DST_PCI:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ case DMAE_CMD_DST_GRC:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src [%08x], len [%d*4], dst [%08x]\n"
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->dst_addr_lo >> 2,
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ default:
+ if (src_type == DMAE_CMD_SRC_PCI)
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ else
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
+ "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
+ "comp_addr [%x:%08x] comp_val 0x%08x\n",
+ dmae->opcode, dmae->src_addr_lo >> 2,
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+ dmae->comp_val);
+ break;
+ }
+}
/* copy command into DMAE command memory and set DMAE command go */
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -396,7 +487,7 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
@@ -412,9 +503,8 @@ static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
dmae->comp_val = DMAE_COMP_VAL;
}
-/* issue a dmae command over the init-channel and wailt for completion */
-static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
- struct dmae_command *dmae)
+/* issue a dmae command over the init-channel and wait for completion */
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
@@ -692,12 +782,16 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
printk("%s", lvl);
+
+ /* dump buffer after the mark */
for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
data[8] = 0x0;
pr_cont("%s", (char *)data);
}
+
+ /* dump buffer before the mark */
for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, offset + 4*word));
@@ -712,7 +806,71 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
bnx2x_fw_dump_lvl(bp, KERN_ERR);
}
-void bnx2x_panic_dump(struct bnx2x *bp)
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+ int port = BP_PORT(bp);
+ u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+ u32 val = REG_RD(bp, addr);
+
+ /* in E1 we must use only PCI configuration space to disable
+ * MSI/MSIX capablility
+ * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+ */
+ if (CHIP_IS_E1(bp)) {
+ /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
+ * Use mask register to prevent from HC sending interrupts
+ * after we exit the function
+ */
+ REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+ } else
+ val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+ HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+ HC_CONFIG_0_REG_INT_LINE_EN_0 |
+ HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
+ val, port, addr);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, addr, val);
+ if (REG_RD(bp, addr) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+ IGU_PF_CONF_INT_LINE_EN |
+ IGU_PF_CONF_ATTN_BIT_EN);
+
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
+
+ /* flush all outstanding writes */
+ mmiowb();
+
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_int_disable(struct bnx2x *bp)
+{
+ if (bp->common.int_block == INT_BLOCK_HC)
+ bnx2x_hc_int_disable(bp);
+ else
+ bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
{
int i;
u16 j;
@@ -722,6 +880,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
u16 start = 0, end = 0;
u8 cos;
#endif
+ if (disable_int)
+ bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
bp->eth_stats.unrecoverable_error++;
@@ -867,6 +1027,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
}
#ifdef BNX2X_STOP_ON_ERROR
+
+ /* event queue */
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ }
+
/* Rings */
/* Rx */
for_each_valid_rx_queue(bp, i) {
@@ -1038,8 +1209,8 @@ static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
return val;
}
-static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
- char *msg, u32 poll_cnt)
+int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+ char *msg, u32 poll_cnt)
{
u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
if (val != 0) {
@@ -1049,7 +1220,8 @@ static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
return 0;
}
-static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+/* Common routines with VF FLR cleanup */
+u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
{
/* adjust polling timeout */
if (CHIP_REV_IS_EMUL(bp))
@@ -1061,7 +1233,7 @@ static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
return FLR_POLL_CNT;
}
-static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
{
struct pbf_pN_cmd_regs cmd_regs[] = {
{0, (CHIP_IS_E3B0(bp)) ?
@@ -1136,10 +1308,9 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
-static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
- u32 poll_cnt)
+int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
{
- struct sdm_op_gen op_gen = {0};
+ u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
@@ -1150,19 +1321,20 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return 1;
}
- op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
- op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
- op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
- op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+ op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+ op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+ op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
+ op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
- REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+ REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
BNX2X_ERR("FW final cleanup did not succeed\n");
DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
(REG_RD(bp, comp_addr)));
- ret = 1;
+ bnx2x_panic();
+ return 1;
}
/* Zero completion for nxt FLR */
REG_WR(bp, comp_addr, 0);
@@ -1170,7 +1342,7 @@ static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
return ret;
}
-static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
{
u16 status;
@@ -1382,26 +1554,31 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
if (msix) {
val &= ~(IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN);
if (single_msix)
val |= IGU_PF_CONF_SINGLE_ISR_EN;
} else if (msi) {
val &= ~IGU_PF_CONF_INT_LINE_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_MSI_MSIX_EN |
+ val |= (IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
} else {
val &= ~IGU_PF_CONF_MSI_MSIX_EN;
- val |= (IGU_PF_CONF_FUNC_EN |
- IGU_PF_CONF_INT_LINE_EN |
+ val |= (IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN |
IGU_PF_CONF_SINGLE_ISR_EN);
}
+ /* Clean previous status - need to configure igu prior to ack*/
+ if ((!msix) || single_msix) {
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+ bnx2x_ack_int(bp);
+ }
+
+ val |= IGU_PF_CONF_FUNC_EN;
+
DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
@@ -1436,71 +1613,6 @@ void bnx2x_int_enable(struct bnx2x *bp)
bnx2x_igu_int_enable(bp);
}
-static void bnx2x_hc_int_disable(struct bnx2x *bp)
-{
- int port = BP_PORT(bp);
- u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
- u32 val = REG_RD(bp, addr);
-
- /*
- * in E1 we must use only PCI configuration space to disable
- * MSI/MSIX capablility
- * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
- */
- if (CHIP_IS_E1(bp)) {
- /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
- * Use mask register to prevent from HC sending interrupts
- * after we exit the function
- */
- REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
-
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
- } else
- val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
- HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
- HC_CONFIG_0_REG_INT_LINE_EN_0 |
- HC_CONFIG_0_REG_ATTN_BIT_EN_0);
-
- DP(NETIF_MSG_IFDOWN,
- "write %x to HC %d (addr 0x%x)\n",
- val, port, addr);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, addr, val);
- if (REG_RD(bp, addr) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_igu_int_disable(struct bnx2x *bp)
-{
- u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
-
- val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
- IGU_PF_CONF_INT_LINE_EN |
- IGU_PF_CONF_ATTN_BIT_EN);
-
- DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
-
- /* flush all outstanding writes */
- mmiowb();
-
- REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
- if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
- BNX2X_ERR("BUG! proper val not read from IGU!\n");
-}
-
-static void bnx2x_int_disable(struct bnx2x *bp)
-{
- if (bp->common.int_block == INT_BLOCK_HC)
- bnx2x_hc_int_disable(bp);
- else
- bnx2x_igu_int_disable(bp);
-}
-
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
{
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -1586,11 +1698,11 @@ static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
}
/**
- * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ * bnx2x_trylock_leader_lock- try to acquire a leader lock.
*
* @bp: driver handle
*
- * Tries to aquire a leader lock for current engine.
+ * Tries to acquire a leader lock for current engine.
*/
static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
{
@@ -1599,6 +1711,24 @@ static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
+static int bnx2x_schedule_sp_task(struct bnx2x *bp)
+{
+ /* Set the interrupt occurred bit for the sp-task to recognize it
+ * must ack the interrupt and transition according to the IGU
+ * state machine.
+ */
+ atomic_set(&bp->interrupt_occurred, 1);
+
+ /* The sp_task must execute only after this bit
+ * is set, otherwise we will get out of sync and miss all
+ * further interrupts. Hence, the barrier.
+ */
+ smp_wmb();
+
+ /* schedule sp_task to workqueue */
+ return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+}
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
{
@@ -1613,6 +1743,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
fp->index, cid, command, bp->state,
rr_cqe->ramrod_cqe.ramrod_type);
+ /* If cid is within VF range, replace the slowpath object with the
+ * one corresponding to this VF
+ */
+ if (cid >= BNX2X_FIRST_VF_CID &&
+ cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
+ bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
+
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
@@ -1664,6 +1801,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
#else
return;
#endif
+ /* SRIOV: reschedule any 'in_progress' operations */
+ bnx2x_iov_sp_event(bp, cid, true);
smp_mb__before_atomic_inc();
atomic_inc(&bp->cq_spq_left);
@@ -1680,7 +1819,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
* mark pending ACK to MCP bit.
* prevent case that both bits are cleared.
* At the end of load/unload driver checks that
- * sp_state is cleaerd, and this order prevents
+ * sp_state is cleared, and this order prevents
* races
*/
smp_mb__before_clear_bit();
@@ -1689,22 +1828,13 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
smp_mb__after_clear_bit();
- /* schedule workqueue to send ack to MCP */
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule the sp task as mcp ack is required */
+ bnx2x_schedule_sp_task(bp);
}
return;
}
-void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
-{
- u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
-
- bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
- start);
-}
-
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
{
struct bnx2x *bp = netdev_priv(dev_instance);
@@ -1745,21 +1875,23 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
if (status & (mask | 0x1)) {
struct cnic_ops *c_ops = NULL;
- if (likely(bp->state == BNX2X_STATE_OPEN)) {
- rcu_read_lock();
- c_ops = rcu_dereference(bp->cnic_ops);
- if (c_ops)
- c_ops->cnic_handler(bp->cnic_data,
- NULL);
- rcu_read_unlock();
- }
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops && (bp->cnic_eth_dev.drv_state &
+ CNIC_DRV_STATE_HANDLES_IRQ))
+ c_ops->cnic_handler(bp->cnic_data, NULL);
+ rcu_read_unlock();
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
status &= ~0x1;
if (!status)
@@ -2459,23 +2591,55 @@ void bnx2x__link_status_update(struct bnx2x *bp)
return;
/* read updated dcb configuration */
- bnx2x_dcbx_pmf_update(bp);
-
- bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (IS_PF(bp)) {
+ bnx2x_dcbx_pmf_update(bp);
+ bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+ if (bp->link_vars.link_up)
+ bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+ else
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ /* indicate link status */
+ bnx2x_link_report(bp);
- if (bp->link_vars.link_up)
+ } else { /* VF */
+ bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_2500baseX_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_TP |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause);
+ bp->port.advertising[0] = bp->port.supported[0];
+
+ bp->link_params.bp = bp;
+ bp->link_params.port = BP_PORT(bp);
+ bp->link_params.req_duplex[0] = DUPLEX_FULL;
+ bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
+ bp->link_params.req_line_speed[0] = SPEED_10000;
+ bp->link_params.speed_cap_mask[0] = 0x7f0000;
+ bp->link_params.switch_cfg = SWITCH_CFG_10G;
+ bp->link_vars.mac_type = MAC_TYPE_BMAC;
+ bp->link_vars.line_speed = SPEED_10000;
+ bp->link_vars.link_status =
+ (LINK_STATUS_LINK_UP |
+ LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
+ bp->link_vars.link_up = 1;
+ bp->link_vars.duplex = DUPLEX_FULL;
+ bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+ __bnx2x_link_report(bp);
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
- else
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* indicate link status */
- bnx2x_link_report(bp);
+ }
}
static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
u16 vlan_val, u8 allowed_prio)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_update_params *f_update_params =
&func_params.params.afex_update;
@@ -2500,7 +2664,7 @@ static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
u16 vif_index, u8 func_bit_map)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_afex_viflists_params *update_params =
&func_params.params.afex_viflists;
int rc;
@@ -2516,7 +2680,7 @@ static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
/* set parameters according to cmd_type */
update_params->afex_vif_list_command = cmd_type;
- update_params->vif_list_index = cpu_to_le16(vif_index);
+ update_params->vif_list_index = vif_index;
update_params->func_bit_map =
(cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
update_params->func_to_clear = 0;
@@ -2800,6 +2964,10 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+#ifdef BNX2X_STOP_ON_ERROR
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
+#endif
+
return flags;
}
@@ -2875,15 +3043,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
pause->sge_th_hi + FW_PREFETCH_CNT >
MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
- tpa_agg_size = min_t(u32,
- (min_t(u32, 8, MAX_SKB_FRAGS) *
- SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+ tpa_agg_size = TPA_AGG_SIZE;
max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
SGE_PAGE_SHIFT;
max_sge = ((max_sge + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
- sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
- 0xffff);
+ sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
}
/* pause - not for e1 */
@@ -2928,7 +3093,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
- * For PF Clients it should be the maximum avaliable number.
+ * For PF Clients it should be the maximum available number.
* VF driver(s) may want to define it to a smaller value.
*/
rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
@@ -3022,7 +3187,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
if (bp->port.pmf)
storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- /* init Event Queue */
+ /* init Event Queue - PCI bus guarantees correct endianity*/
eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
eq_data.producer = bp->eq_prod;
@@ -3112,65 +3277,75 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_statistics_params *fw_fcoe_stat =
&bp->fw_stats_data->fcoe;
- ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
+ fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
- ADD_64(fcoe_stat->rx_bytes_hi,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
- fcoe_stat->rx_bytes_lo,
- fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+ ADD_64_LE(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_ucast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_bcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
- ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
- fcoe_q_tstorm_stats->rcv_mcast_pkts);
+ ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
+ fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
- ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
+ fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_bytes_hi,
- fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
- fcoe_stat->tx_bytes_lo,
- fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+ ADD_64_LE(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->ucast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->bcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
- ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
- fcoe_q_xstorm_stats->mcast_pkts_sent);
+ ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
+ fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
}
/* ask L5 driver to add data to the struct */
@@ -3641,7 +3816,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
"Please contact OEM Support for assistance\n");
/*
- * Scheudle device reset (unload)
+ * Schedule device reset (unload)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
@@ -3791,6 +3966,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_DRV_INFO_REQ)
bnx2x_handle_drv_info_req(bp);
+
+ if (val & DRV_STATUS_VF_DISABLED)
+ bnx2x_vf_handle_flr_event(bp);
+
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -4587,8 +4766,8 @@ static void bnx2x_attn_int(struct bnx2x *bp)
void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
u16 index, u8 op, u8 update)
{
- u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
-
+ u32 igu_addr = bp->igu_base_addr;
+ igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
igu_addr);
}
@@ -4616,7 +4795,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
cid);
- bnx2x_panic_dump(bp);
+ bnx2x_panic_dump(bp, false);
}
bnx2x_cnic_cfc_comp(bp, cid, err);
return 0;
@@ -4658,7 +4837,8 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
/* Always push next commands out, don't wait here */
__set_bit(RAMROD_CONT, &ramrod_flags);
- switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
+ >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -4735,7 +4915,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
struct bnx2x_queue_update_params *q_update_params =
&queue_params.params.update;
- /* Send Q update command with afex vlan removal values for all Qs */
+ /* Send Q update command with afex vlan removal values for all Qs */
queue_params.cmd = BNX2X_Q_CMD_UPDATE;
/* set silent vlan removal values according to vlan mode */
@@ -4809,7 +4989,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
u8 echo;
u32 cid;
u8 opcode;
- int spqe_cnt = 0;
+ int rc, spqe_cnt = 0;
struct bnx2x_queue_sp_obj *q_obj;
struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
@@ -4837,15 +5017,27 @@ static void bnx2x_eq_int(struct bnx2x *bp)
for (; sw_cons != hw_cons;
sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
-
elem = &bp->eq_ring[EQ_DESC(sw_cons)];
- cid = SW_CID(elem->message.data.cfc_del_event.cid);
- opcode = elem->message.opcode;
+ rc = bnx2x_iov_eq_sp_event(bp, elem);
+ if (!rc) {
+ DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
+ rc);
+ goto next_spqe;
+ }
+ /* elem CID originates from FW; actually LE */
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ opcode = elem->message.opcode;
/* handle eq element */
switch (opcode) {
+ case EVENT_RING_OPCODE_VF_PF_CHANNEL:
+ DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
+ bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
+ continue;
+
case EVENT_RING_OPCODE_STAT_QUERY:
DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
"got statistics comp event %d\n",
@@ -5011,50 +5203,65 @@ next_spqe:
static void bnx2x_sp_task(struct work_struct *work)
{
struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
- u16 status;
- status = bnx2x_update_dsb_idx(bp);
-/* if (status == 0) */
-/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
+ DP(BNX2X_MSG_SP, "sp task invoked\n");
- DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
+ /* make sure the atomic interupt_occurred has been written */
+ smp_rmb();
+ if (atomic_read(&bp->interrupt_occurred)) {
- /* HW attentions */
- if (status & BNX2X_DEF_SB_ATT_IDX) {
- bnx2x_attn_int(bp);
- status &= ~BNX2X_DEF_SB_ATT_IDX;
- }
+ /* what work needs to be performed? */
+ u16 status = bnx2x_update_dsb_idx(bp);
- /* SP events: STAT_QUERY and others */
- if (status & BNX2X_DEF_SB_IDX) {
- struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+ DP(BNX2X_MSG_SP, "status %x\n", status);
+ DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
+ atomic_set(&bp->interrupt_occurred, 0);
+
+ /* HW attentions */
+ if (status & BNX2X_DEF_SB_ATT_IDX) {
+ bnx2x_attn_int(bp);
+ status &= ~BNX2X_DEF_SB_ATT_IDX;
+ }
+
+ /* SP events: STAT_QUERY and others */
+ if (status & BNX2X_DEF_SB_IDX) {
+ struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
if (FCOE_INIT(bp) &&
- (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
- /*
- * Prevent local bottom-halves from running as
- * we are going to change the local NAPI list.
- */
- local_bh_disable();
- napi_schedule(&bnx2x_fcoe(bp, napi));
- local_bh_enable();
+ (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+ /* Prevent local bottom-halves from running as
+ * we are going to change the local NAPI list.
+ */
+ local_bh_disable();
+ napi_schedule(&bnx2x_fcoe(bp, napi));
+ local_bh_enable();
+ }
+
+ /* Handle EQ completions */
+ bnx2x_eq_int(bp);
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+ le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+ status &= ~BNX2X_DEF_SB_IDX;
}
- /* Handle EQ completions */
- bnx2x_eq_int(bp);
+ /* if status is non zero then perhaps something went wrong */
+ if (unlikely(status))
+ DP(BNX2X_MSG_SP,
+ "got an unknown interrupt! (status 0x%x)\n", status);
- bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
- le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+ /* ack status block only if something was actually handled */
+ bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+ le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
- status &= ~BNX2X_DEF_SB_IDX;
}
- if (unlikely(status))
- DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
- status);
-
- bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
- le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+ /* must be called after the EQ processing (since eq leads to sriov
+ * ramrod completion flows).
+ * This flow may have been scheduled by the arrival of a ramrod
+ * completion, or by the sriov code rescheduling itself.
+ */
+ bnx2x_iov_sp_task(bp);
/* afex - poll to check if VIFSET_ACK should be sent to MFW */
if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
@@ -5087,7 +5294,10 @@ irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
rcu_read_unlock();
}
- queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ /* schedule sp task to perform default status block work, ack
+ * attentions and enable interrupts.
+ */
+ bnx2x_schedule_sp_task(bp);
return IRQ_HANDLED;
}
@@ -5101,7 +5311,6 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
bp->fw_drv_pulse_wr_seq);
}
-
static void bnx2x_timer(unsigned long data)
{
struct bnx2x *bp = (struct bnx2x *) data;
@@ -5109,7 +5318,8 @@ static void bnx2x_timer(unsigned long data)
if (!netif_running(bp->dev))
return;
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
u32 drv_pulse;
u32 mcp_pulse;
@@ -5136,6 +5346,10 @@ static void bnx2x_timer(unsigned long data)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+ /* sample pf vf bulletin board for new posts from pf */
+ if (IS_VF(bp))
+ bnx2x_sample_bulletin(bp);
+
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
@@ -5278,7 +5492,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
}
-static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -5334,7 +5548,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
- /* write indecies to HW */
+ /* write indices to HW - PCI guarantees endianity of regpairs */
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
}
@@ -5422,6 +5636,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
bnx2x_zero_sp_sb(bp);
+ /* PCI guarantees endianity of regpairs */
sp_sb_data.state = SB_ENABLED;
sp_sb_data.host_sb_addr.lo = U64_LO(section);
sp_sb_data.host_sb_addr.hi = U64_HI(section);
@@ -5478,13 +5693,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
}
-
/* called with netif_addr_lock_bh() */
-void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
- unsigned long rx_mode_flags,
- unsigned long rx_accept_flags,
- unsigned long tx_accept_flags,
- unsigned long ramrod_flags)
+int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+ unsigned long rx_mode_flags,
+ unsigned long rx_accept_flags,
+ unsigned long tx_accept_flags,
+ unsigned long ramrod_flags)
{
struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc;
@@ -5514,22 +5728,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
rc = bnx2x_config_rx_mode(bp, &ramrod_param);
if (rc < 0) {
BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
- return;
+ return rc;
}
+
+ return 0;
}
-/* called with netif_addr_lock_bh() */
-void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
{
- unsigned long rx_mode_flags = 0, ramrod_flags = 0;
- unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ /* Clear the flags first */
+ *rx_accept_flags = 0;
+ *tx_accept_flags = 0;
- if (!NO_FCOE(bp))
-
- /* Configure rx_mode of FCoE Queue */
- __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
-
- switch (bp->rx_mode) {
+ switch (rx_mode) {
case BNX2X_RX_MODE_NONE:
/*
* 'drop all' supersedes any accept flags that may have been
@@ -5537,25 +5750,25 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
*/
break;
case BNX2X_RX_MODE_NORMAL:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_ALLMULTI:
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break;
case BNX2X_RX_MODE_PROMISC:
@@ -5563,36 +5776,57 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
- __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */
- __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
- __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(bp))
- __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
else
- __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
break;
default:
- BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
- return;
+ BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
+ return -EINVAL;
}
+ /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
}
+ return 0;
+}
+
+/* called with netif_addr_lock_bh() */
+int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+ unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+ unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+ int rc;
+
+ if (!NO_FCOE(bp))
+ /* Configure rx_mode of FCoE Queue */
+ __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+
+ rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
+ &tx_accept_flags);
+ if (rc)
+ return rc;
+
__set_bit(RAMROD_RX, &ramrod_flags);
__set_bit(RAMROD_TX, &ramrod_flags);
- bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
- tx_accept_flags, ramrod_flags);
+ return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
+ rx_accept_flags, tx_accept_flags,
+ ramrod_flags);
}
static void bnx2x_init_internal_common(struct bnx2x *bp)
@@ -5699,6 +5933,13 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
cids[cos] = fp->txdata_ptr[cos]->cid;
}
+ /* nothing more for vf to do here */
+ if (IS_VF(bp))
+ return;
+
+ bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+ fp->fw_sb_id, fp->igu_sb_id);
+ bnx2x_update_fpsb_idx(fp);
bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
@@ -5708,13 +5949,10 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
*/
bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
- fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
- fp->igu_sb_id);
- bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
- fp->fw_sb_id, fp->igu_sb_id);
-
- bnx2x_update_fpsb_idx(fp);
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
+ fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+ fp->igu_sb_id);
}
static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
@@ -5786,17 +6024,22 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
for_each_eth_queue(bp, i)
bnx2x_init_eth_fp(bp, i);
+
+ /* ensure status block indices were read */
+ rmb();
+ bnx2x_init_rx_rings(bp);
+ bnx2x_init_tx_rings(bp);
+
+ if (IS_VF(bp))
+ return;
+
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
bp->common.shmem_base, bp->common.shmem2_base,
BP_PORT(bp));
- /* ensure status block indices were read */
- rmb();
bnx2x_init_def_sb(bp);
bnx2x_update_dsb_idx(bp);
- bnx2x_init_rx_rings(bp);
- bnx2x_init_tx_rings(bp);
bnx2x_init_sp_ring(bp);
bnx2x_init_eq_ring(bp);
bnx2x_init_internal(bp, load_code);
@@ -6236,49 +6479,6 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
}
-static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
-{
- u32 offset = 0;
-
- if (CHIP_IS_E1(bp))
- return;
- if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
- return;
-
- switch (BP_ABS_FUNC(bp)) {
- case 0:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
- break;
- case 1:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
- break;
- case 2:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
- break;
- case 3:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
- break;
- case 4:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
- break;
- case 5:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
- break;
- case 6:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
- break;
- case 7:
- offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
- break;
- default:
- return;
- }
-
- REG_WR(bp, offset, pretend_func_num);
- REG_RD(bp, offset);
- DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
-}
-
void bnx2x_pf_disable(struct bnx2x *bp)
{
u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
@@ -6322,7 +6522,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
- * take the UNDI lock to protect undi_unload flow from accessing
+ * take the RESET lock to protect undi_unload flow from accessing
* registers while we're resetting the chip
*/
bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
@@ -6452,7 +6652,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
* queues with "old" ILT addresses.
* c. PF enable in the PGLC.
* d. Clear the was_error of the PF in the PGLC. (could have
- * occured while driver was down)
+ * occurred while driver was down)
* e. PF enable in the CFC (WEAK + STRONG)
* f. Timers scan enable
* 3. PF driver unload flow:
@@ -6493,7 +6693,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
/* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
- * vnic (this code assumes existance of the vnic)
+ * vnic (this code assumes existence of the vnic)
*
* both steps performed by call to bnx2x_ilt_client_init_op()
* with dummy TM client
@@ -6510,7 +6710,6 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
}
-
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
@@ -6535,6 +6734,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+ bnx2x_iov_init_dmae(bp);
+
/* clean the DMAE memory */
bp->dmae_ready = 1;
bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
@@ -6991,7 +7192,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
}
}
-
/* If SPIO5 is set to generate interrupts, enable it for this port */
val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
if (val & MISC_SPIO_SPIO5) {
@@ -7020,15 +7220,14 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
REG_WR_DMAE(bp, reg, wb_write, 2);
}
-static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
- u8 idu_sb_id, bool is_Pf)
+void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
{
u32 data, ctl, cnt = 100;
u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
+ u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
@@ -7219,8 +7418,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
/* FLR cleanup - hmmm */
if (!CHIP_IS_E1x(bp)) {
rc = bnx2x_pf_flr_clnup(bp);
- if (rc)
+ if (rc) {
+ bnx2x_fw_dump(bp);
return rc;
+ }
}
/* set MSI reconfigure capability */
@@ -7237,12 +7438,21 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
ilt = BP_ILT(bp);
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+ if (IS_SRIOV(bp))
+ cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
+ cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
+
+ /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
+ * those of the VFs, so start line should be reset
+ */
+ cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
bp->context[i].cxt_mapping;
ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
+
bnx2x_ilt_init_op(bp, INITOP_SET);
if (!CONFIGURE_NIC_MODE(bp)) {
@@ -7315,6 +7525,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+ bnx2x_iov_init_dq(bp);
+
bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
bnx2x_init_block(bp, BLOCK_PRS, init_phase);
bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
@@ -7523,10 +7736,6 @@ void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
- /* fastpath */
- bnx2x_free_fp_mem(bp);
- /* end of fastpath */
-
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
@@ -7547,69 +7756,11 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-}
-
-static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
-{
- int num_groups;
- int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
-
- /* number of queues for statistics is number of eth queues + FCoE */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
-
- /* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
- * num of queues
- */
- bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
-
-
- /* Request is built from stats_query_header and an array of
- * stats_query_cmd_group each of which contains
- * STATS_QUERY_CMD_COUNT rules. The real number or requests is
- * configured in the stats_query_header.
- */
- num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
- (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
-
- bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
- num_groups * sizeof(struct stats_query_cmd_group);
-
- /* Data for statistics requests + stats_conter
- *
- * stats_counter holds per-STORM counters that are incremented
- * when STORM has finished with the current request.
- *
- * memory for FCoE offloaded statistics are counted anyway,
- * even if they will not be sent.
- */
- bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
- sizeof(struct per_pf_stats) +
- sizeof(struct fcoe_statistics_params) +
- sizeof(struct per_queue_stats) * num_queue_stats +
- sizeof(struct stats_counter);
-
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
- /* Set shortcuts */
- bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
- bp->fw_stats_req_mapping = bp->fw_stats_mapping;
-
- bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
- ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
-
- bp->fw_stats_data_mapping = bp->fw_stats_mapping +
- bp->fw_stats_req_sz;
- return 0;
-
-alloc_mem_err:
- BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
- BNX2X_ERR("Can't allocate memory\n");
- return -ENOMEM;
+ bnx2x_iov_free_mem(bp);
}
+
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
if (!CHIP_IS_E1x(bp))
@@ -7655,10 +7806,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- /* Allocated memory for FW statistics */
- if (bnx2x_alloc_fw_stats_mem(bp))
- goto alloc_mem_err;
-
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
* functions because CDU differs in few aspects:
@@ -7687,6 +7834,9 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
+ if (bnx2x_iov_alloc_mem(bp))
+ goto alloc_mem_err;
+
/* Slow path ring */
BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
@@ -7694,13 +7844,6 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
BCM_PAGE_SIZE * NUM_EQ_PAGES);
-
- /* fastpath */
- /* need to be done at the end, since it's self adjusting to amount
- * of memory available for RSS queues
- */
- if (bnx2x_alloc_fp_mem(bp))
- goto alloc_mem_err;
return 0;
alloc_mem_err:
@@ -7803,43 +7946,53 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-void bnx2x_set_int_mode(struct bnx2x *bp)
+int bnx2x_set_int_mode(struct bnx2x *bp)
{
+ int rc = 0;
+
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ return -EINVAL;
+
switch (int_mode) {
- case INT_MODE_MSI:
+ case BNX2X_INT_MODE_MSIX:
+ /* attempt to enable msix */
+ rc = bnx2x_enable_msix(bp);
+
+ /* msix attained */
+ if (!rc)
+ return 0;
+
+ /* vfs use only msix */
+ if (rc && IS_VF(bp))
+ return rc;
+
+ /* failed to enable multiple MSI-X */
+ BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues,
+ 1 + bp->num_cnic_queues);
+
+ /* falling through... */
+ case BNX2X_INT_MODE_MSI:
bnx2x_enable_msi(bp);
+
/* falling through... */
- case INT_MODE_INTx:
+ case BNX2X_INT_MODE_INTX:
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* if we can't use MSI-X we only need one fp,
- * so try to enable MSI-X with the requested number of fp's
- * and fallback to MSI or legacy INTx with one fp
- */
- if (bnx2x_enable_msix(bp) ||
- bp->flags & USING_SINGLE_MSIX_FLAG) {
- /* failed to enable multiple MSI-X */
- BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
- bp->num_queues,
- 1 + bp->num_cnic_queues);
-
- bp->num_queues = 1 + bp->num_cnic_queues;
-
- /* Try to enable MSI */
- if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
- !(bp->flags & DISABLE_MSI_FLAG))
- bnx2x_enable_msi(bp);
- }
- break;
+ BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
+ return -EINVAL;
}
+ return 0;
}
-/* must be called prioir to any HW initializations */
+/* must be called prior to any HW initializations */
static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
{
+ if (IS_SRIOV(bp))
+ return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
return L2_ILT_LINES(bp);
}
@@ -8222,8 +8375,8 @@ static void bnx2x_reset_func(struct bnx2x *bp)
/* SP SB */
REG_WR8(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
- SB_DISABLED);
+ CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+ SB_DISABLED);
for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
@@ -8524,7 +8677,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
}
/* Give HW time to discard old tx messages */
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Clean all ETH MACs */
rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
@@ -8562,6 +8715,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
netif_addr_unlock_bh(bp->dev);
+ bnx2x_iov_chip_cleanup(bp);
/*
@@ -8947,7 +9101,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
if (pend_bits == 0)
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -8964,8 +9118,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
int cnt = 1000;
u32 val = 0;
u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
- u32 tags_63_32 = 0;
-
+ u32 tags_63_32 = 0;
/* Empty the Tetris buffer, wait for 1s */
do {
@@ -8983,7 +9136,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
(pgl_exp_rom2 == 0xffffffff) &&
(!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
break;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
} while (cnt-- > 0);
if (cnt <= 0) {
@@ -9016,7 +9169,7 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
/* Wait for 1ms to empty GLUE and PCI-E core queues,
* PSWHST, GRC and PSWRD Tetris buffer.
*/
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* Prepare to chip reset: */
/* MCP */
@@ -9299,8 +9452,10 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
rtnl_lock();
- if (!netif_running(bp->dev))
- goto sp_rtnl_exit;
+ if (!netif_running(bp->dev)) {
+ rtnl_unlock();
+ return;
+ }
/* if stop on error is defined no recovery flows should be executed */
#ifdef BNX2X_STOP_ON_ERROR
@@ -9319,7 +9474,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_parity_recover(bp);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
@@ -9333,7 +9489,8 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
- goto sp_rtnl_exit;
+ rtnl_unlock();
+ return;
}
#ifdef BNX2X_STOP_ON_ERROR
sp_rtnl_not_reset:
@@ -9351,13 +9508,33 @@ sp_rtnl_not_reset:
DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
netif_device_detach(bp->dev);
bnx2x_close(bp->dev);
+ rtnl_unlock();
+ return;
}
-sp_rtnl_exit:
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set mcast vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_set_mcast(bp->dev);
+ }
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP,
+ "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+
+ /* work which needs rtnl lock not-taken (as it takes the lock itself and
+ * can be called from other contexts as well)
+ */
rtnl_unlock();
-}
-/* end of nic load/unload */
+ /* enable SR-IOV if applicable */
+ if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
+ &bp->sp_rtnl_state))
+ bnx2x_enable_sriov(bp);
+}
static void bnx2x_period_task(struct work_struct *work)
{
@@ -9394,43 +9571,13 @@ period_task_exit:
* Init service functions
*/
-static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
return base + (BP_ABS_FUNC(bp)) * stride;
}
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
-{
- u32 reg = bnx2x_get_pretend_reg(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Pretend to be function 0 */
- REG_WR(bp, reg, 0);
- REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
-
- /* From now we are in the "like-E1" mode */
- bnx2x_int_disable(bp);
-
- /* Flush all outstanding writes */
- mmiowb();
-
- /* Restore the original function */
- REG_WR(bp, reg, BP_ABS_FUNC(bp));
- REG_RD(bp, reg);
-}
-
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
-{
- if (CHIP_IS_E1(bp))
- bnx2x_int_disable(bp);
- else
- bnx2x_undi_int_disable_e1h(bp);
-}
-
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
struct bnx2x_mac_vals *vals)
{
@@ -9658,11 +9805,13 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
if (bnx2x_prev_is_path_marked(bp))
return bnx2x_prev_mcp_done(bp);
+ BNX2X_DEV_INFO("Path is unmarked\n");
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
*/
- rc = bnx2x_test_firmware_version(bp, false);
+ rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
if (!rc) {
/* fw version is good */
@@ -9718,7 +9867,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7
*/
- reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) {
@@ -9726,6 +9874,8 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
prev_undi = true;
/* clear the UNDI indication */
REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
/* wait until BRB is empty */
@@ -9792,7 +9942,8 @@ static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
if (!CHIP_IS_E1x(bp)) {
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ DP(BNX2X_MSG_SP,
+ "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
1 << BP_FUNC(bp));
}
@@ -9834,7 +9985,6 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
-
do {
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
@@ -10401,10 +10551,10 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp)
static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
{
- mac_hi = cpu_to_be16(mac_hi);
- mac_lo = cpu_to_be32(mac_lo);
- memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
- memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+ __be16 mac_hi_be = cpu_to_be16(mac_hi);
+ __be32 mac_lo_be = cpu_to_be32(mac_lo);
+ memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
+ memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
}
static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
@@ -10440,6 +10590,13 @@ static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
+ bp->flags |= NO_ISCSI_FLAG;
+ if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
+ PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
+ bp->flags |= NO_FCOE_FLAG;
+
BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
bp->link_params.speed_cap_mask[0],
@@ -10547,21 +10704,21 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
/* Port info */
bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_upper);
bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_port_name_lower);
/* Node info */
bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_upper);
bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
SHMEM_RD(bp,
- dev_info.port_hw_config[port].
+ dev_info.port_hw_config[port].
fcoe_wwn_node_name_lower);
} else if (!IS_MF_SD(bp)) {
/*
@@ -10659,7 +10816,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
/* Zero primary MAC configuration */
memset(bp->dev->dev_addr, 0, ETH_ALEN);
- if (IS_MF_FCOE_AFEX(bp))
+ if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
/* use FIP MAC as primary MAC */
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
@@ -10722,7 +10879,6 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
- memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
@@ -10787,7 +10943,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
tout--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
@@ -11125,9 +11281,13 @@ static int bnx2x_init_bp(struct bnx2x *bp)
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
- rc = bnx2x_get_hwinfo(bp);
- if (rc)
- return rc;
+ if (IS_PF(bp)) {
+ rc = bnx2x_get_hwinfo(bp);
+ if (rc)
+ return rc;
+ } else {
+ random_ether_addr(bp->dev->dev_addr);
+ }
bnx2x_set_modes_bitmap(bp);
@@ -11140,7 +11300,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
func = BP_FUNC(bp);
/* need to reset chip if undi was active */
- if (!BP_NOMCP(bp)) {
+ if (IS_PF(bp) && !BP_NOMCP(bp)) {
/* init fw_seq */
bp->fw_seq =
SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
@@ -11177,6 +11337,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->mrrs = mrrs;
bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
+ if (IS_VF(bp))
+ bp->rx_ring_size = MAX_RX_AVAIL;
/* make sure that the numbers are in the right granularity */
bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -11205,12 +11367,18 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->cnic_base_cl_id = FP_SB_MAX_E2;
/* multiple tx priority */
- if (CHIP_IS_E1x(bp))
+ if (IS_VF(bp))
+ bp->max_cos = 1;
+ else if (CHIP_IS_E1x(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
- if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+ else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
- if (CHIP_IS_E3B0(bp))
+ else if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ else
+ BNX2X_ERR("unknown chip %x revision %x\n",
+ CHIP_NUM(bp), CHIP_REV(bp));
+ BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
/* We need at least one default status block for slow-path events,
* second status block for the L2 queue, and a third status block for
@@ -11234,6 +11402,26 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
+static int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held).
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
+ return 0;
+}
+
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11241,6 +11429,7 @@ static int bnx2x_open(struct net_device *dev)
bool global = false;
int other_engine = BP_PATH(bp) ? 0 : 1;
bool other_load_status, load_status;
+ int rc;
bp->stats_init = true;
@@ -11248,53 +11437,57 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D0);
- other_load_status = bnx2x_get_load_status(bp, other_engine);
- load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
-
- /*
- * If parity had happen during the unload, then attentions
+ /* If parity had happen during the unload, then attentions
* and/or RECOVERY_IN_PROGRES may still be set. In this case we
* want the first function loaded on the current engine to
* complete the recovery.
+ * Parity recovery is only relevant for PF driver.
*/
- if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
- bnx2x_chk_parity_attn(bp, &global, true))
- do {
- /*
- * If there are attentions and they are in a global
- * blocks, set the GLOBAL_RESET bit regardless whether
- * it will be this function that will complete the
- * recovery or not.
- */
- if (global)
- bnx2x_set_reset_global(bp);
+ if (IS_PF(bp)) {
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
+ if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+ bnx2x_chk_parity_attn(bp, &global, true)) {
+ do {
+ /* If there are attentions and they are in a
+ * global blocks, set the GLOBAL_RESET bit
+ * regardless whether it will be this function
+ * that will complete the recovery or not.
+ */
+ if (global)
+ bnx2x_set_reset_global(bp);
- /*
- * Only the first function on the current engine should
- * try to recover in open. In case of attentions in
- * global blocks only the first in the chip should try
- * to recover.
- */
- if ((!load_status &&
- (!global || !other_load_status)) &&
- bnx2x_trylock_leader_lock(bp) &&
- !bnx2x_leader_reset(bp)) {
- netdev_info(bp->dev, "Recovered in open\n");
- break;
- }
+ /* Only the first function on the current
+ * engine should try to recover in open. In case
+ * of attentions in global blocks only the first
+ * in the chip should try to recover.
+ */
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
+ bnx2x_trylock_leader_lock(bp) &&
+ !bnx2x_leader_reset(bp)) {
+ netdev_info(bp->dev,
+ "Recovered in open\n");
+ break;
+ }
- /* recovery has failed... */
- bnx2x_set_power_state(bp, PCI_D3hot);
- bp->recovery_state = BNX2X_RECOVERY_FAILED;
+ /* recovery has failed... */
+ bnx2x_set_power_state(bp, PCI_D3hot);
+ bp->recovery_state = BNX2X_RECOVERY_FAILED;
- BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
- "If you still see this message after a few retries then power cycle is required.\n");
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
- return -EAGAIN;
- } while (0);
+ return -EAGAIN;
+ } while (0);
+ }
+ }
bp->recovery_state = BNX2X_RECOVERY_DONE;
- return bnx2x_nic_load(bp, LOAD_OPEN);
+ rc = bnx2x_nic_load(bp, LOAD_OPEN);
+ if (rc)
+ return rc;
+ return bnx2x_open_epilog(bp);
}
/* called with rtnl_lock */
@@ -11428,7 +11621,6 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
return rc;
}
-
/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
void bnx2x_set_rx_mode(struct net_device *dev)
{
@@ -11449,12 +11641,25 @@ void bnx2x_set_rx_mode(struct net_device *dev)
CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else {
- /* some multicasts */
- if (bnx2x_set_mc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ if (IS_PF(bp)) {
+ /* some multicasts */
+ if (bnx2x_set_mc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_ALLMULTI;
- if (bnx2x_set_uc_list(bp) < 0)
- rx_mode = BNX2X_RX_MODE_PROMISC;
+ if (bnx2x_set_uc_list(bp) < 0)
+ rx_mode = BNX2X_RX_MODE_PROMISC;
+ } else {
+ /* configuring mcast to a vf involves sleeping (when we
+ * wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
bp->rx_mode = rx_mode;
@@ -11468,7 +11673,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
return;
}
- bnx2x_set_storm_rx_mode(bp);
+ if (IS_PF(bp)) {
+ bnx2x_set_storm_rx_mode(bp);
+ } else {
+ /* configuring rx mode to storms in a vf involves sleeping (when
+ * we wait for the pf's response). Since this function is
+ * called from non sleepable context we must schedule
+ * a work item for this purpose
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
}
/* called with rtnl_lock */
@@ -11571,7 +11789,9 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_poll_controller = poll_bnx2x,
#endif
.ndo_setup_tc = bnx2x_setup_tc,
-
+#ifdef CONFIG_BNX2X_SRIOV
+ .ndo_set_vf_mac = bnx2x_set_vf_mac,
+#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
@@ -11595,10 +11815,9 @@ static int bnx2x_set_coherency_mask(struct bnx2x *bp)
return 0;
}
-static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
- unsigned long board_type)
+static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
+ struct net_device *dev, unsigned long board_type)
{
- struct bnx2x *bp;
int rc;
u32 pci_cfg_dword;
bool chip_is_e1x = (board_type == BCM57710 ||
@@ -11606,11 +11825,9 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
board_type == BCM57711E);
SET_NETDEV_DEV(dev, &pdev->dev);
- bp = netdev_priv(dev);
bp->dev = dev;
bp->pdev = pdev;
- bp->flags = 0;
rc = pci_enable_device(pdev);
if (rc) {
@@ -11626,9 +11843,8 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
goto err_out_disable;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&bp->pdev->dev, "Cannot find second PCI device"
- " base address, aborting\n");
+ if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11653,12 +11869,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
pci_save_state(pdev);
}
- bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (bp->pm_cap == 0) {
- dev_err(&bp->pdev->dev,
- "Cannot find power management capability, aborting\n");
- rc = -EIO;
- goto err_out_release;
+ if (IS_PF(bp)) {
+ bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (bp->pm_cap == 0) {
+ dev_err(&bp->pdev->dev,
+ "Cannot find power management capability, aborting\n");
+ rc = -EIO;
+ goto err_out_release;
+ }
}
if (!pci_is_pcie(pdev)) {
@@ -11690,13 +11908,14 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* support Physical Device Assignment where kernel BDF maybe arbitrary
* (depending on hypervisor).
*/
- if (chip_is_e1x)
+ if (chip_is_e1x) {
bp->pf_num = PCI_FUNC(pdev->devfn);
- else {/* chip is E2/3*/
+ } else {
+ /* chip is E2/3*/
pci_read_config_dword(bp->pdev,
PCICFG_ME_REGISTER, &pci_cfg_dword);
bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
- ME_REG_ABS_PF_NUM_SHIFT);
+ ME_REG_ABS_PF_NUM_SHIFT);
}
BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
@@ -11709,25 +11928,28 @@ static int bnx2x_init_dev(struct pci_dev *pdev, struct net_device *dev,
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
*/
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+ if (IS_PF(bp)) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
+
+ if (chip_is_e1x) {
+ REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
+ REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ }
- if (chip_is_e1x) {
- REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
- REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
+ /* Enable internal target-read (in case we are probed after PF
+ * FLR). Must be done prior to any BAR read access. Only for
+ * 57712 and up
+ */
+ if (!chip_is_e1x)
+ REG_WR(bp,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
}
- /*
- * Enable internal target-read (in case we are probed after PF FLR).
- * Must be done prior to any BAR read access. Only for 57712 and up
- */
- if (!chip_is_e1x)
- REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
-
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11778,8 +12000,9 @@ err_out:
static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
{
- u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+ u32 val = 0;
+ pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
/* return value of 1=2.5GHz 2=5GHz */
@@ -11792,7 +12015,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
struct bnx2x_fw_file_hdr *fw_hdr;
struct bnx2x_fw_file_section *sections;
u32 offset, len, num_ops;
- u16 *ops_offsets;
+ __be16 *ops_offsets;
int i;
const u8 *fw_ver;
@@ -11817,7 +12040,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Likewise for the init_ops offsets */
offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
- ops_offsets = (u16 *)(firmware->data + offset);
+ ops_offsets = (__force __be16 *)(firmware->data + offset);
num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
@@ -12044,8 +12267,12 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
int cid_count = BNX2X_L2_MAX_CID(bp);
+ if (IS_SRIOV(bp))
+ cid_count += BNX2X_VF_CIDS;
+
if (CNIC_SUPPORT(bp))
cid_count += CNIC_CID_MAX;
+
return roundup(cid_count, QM_CID_ROUND);
}
@@ -12056,10 +12283,10 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
*
*/
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt)
+ int cnic_cnt, bool is_vf)
{
- int pos;
- u16 control;
+ int pos, index;
+ u16 control = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
@@ -12067,85 +12294,114 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos)
+ if (!pos) {
+ dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
+ }
+ dev_info(&pdev->dev, "msix capability found\n");
/*
* The value in the PCI configuration space is the index of the last
* entry, namely one less than the actual size of the table, which is
* exactly what we want to return from this function: number of all SBs
* without the default SB.
+ * For VFs there is no default SB, then we return (index+1).
*/
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
- return control & PCI_MSIX_FLAGS_QSIZE;
-}
-struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *);
+ index = control & PCI_MSIX_FLAGS_QSIZE;
-static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *dev = NULL;
- struct bnx2x *bp;
- int pcie_width, pcie_speed;
- int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count, doorbell_size;
- int cnic_cnt;
- /*
- * An estimated maximum supported CoS number according to the chip
- * version.
- * We will try to roughly estimate the maximum number of CoSes this chip
- * may support in order to minimize the memory allocated for Tx
- * netdev_queue's. This number will be accurately calculated during the
- * initialization of bp->max_cos based on the chip versions AND chip
- * revision in the bnx2x_init_bp().
- */
- u8 max_cos_est = 0;
+ return is_vf ? index + 1 : index;
+}
- switch (ent->driver_data) {
+static int set_max_cos_est(int chip_id)
+{
+ switch (chip_id) {
case BCM57710:
case BCM57711:
case BCM57711E:
- max_cos_est = BNX2X_MULTI_TX_COS_E1X;
- break;
-
+ return BNX2X_MULTI_TX_COS_E1X;
case BCM57712:
case BCM57712_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
- break;
-
+ case BCM57712_VF:
+ return BNX2X_MULTI_TX_COS_E2_E3A0;
case BCM57800:
case BCM57800_MF:
+ case BCM57800_VF:
case BCM57810:
case BCM57810_MF:
- case BCM57840_O:
case BCM57840_4_10:
case BCM57840_2_20:
+ case BCM57840_O:
case BCM57840_MFO:
+ case BCM57810_VF:
case BCM57840_MF:
+ case BCM57840_VF:
case BCM57811:
case BCM57811_MF:
- max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
- break;
-
+ case BCM57811_VF:
+ return BNX2X_MULTI_TX_COS_E3B0;
+ return 1;
default:
- pr_err("Unknown board_type (%ld), aborting\n",
- ent->driver_data);
+ pr_err("Unknown board_type (%d), aborting\n", chip_id);
return -ENODEV;
}
+}
+
+static int set_is_vf(int chip_id)
+{
+ switch (chip_id) {
+ case BCM57712_VF:
+ case BCM57800_VF:
+ case BCM57810_VF:
+ case BCM57840_VF:
+ case BCM57811_VF:
+ return true;
+ default:
+ return false;
+ }
+}
- cnic_cnt = 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
- WARN_ON(!max_non_def_sbs);
+static int bnx2x_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *dev = NULL;
+ struct bnx2x *bp;
+ int pcie_width, pcie_speed;
+ int rc, max_non_def_sbs;
+ int rx_count, tx_count, rss_count, doorbell_size;
+ int max_cos_est;
+ bool is_vf;
+ int cnic_cnt;
+
+ /* An estimated maximum supported CoS number according to the chip
+ * version.
+ * We will try to roughly estimate the maximum number of CoSes this chip
+ * may support in order to minimize the memory allocated for Tx
+ * netdev_queue's. This number will be accurately calculated during the
+ * initialization of bp->max_cos based on the chip versions AND chip
+ * revision in the bnx2x_init_bp().
+ */
+ max_cos_est = set_max_cos_est(ent->driver_data);
+ if (max_cos_est < 0)
+ return max_cos_est;
+ is_vf = set_is_vf(ent->driver_data);
+ cnic_cnt = is_vf ? 0 : 1;
+
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = max_non_def_sbs - cnic_cnt;
+ rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+
+ if (rss_count < 1)
+ return -EINVAL;
/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
rx_count = rss_count + cnic_cnt;
- /*
- * Maximum number of netdev Tx queues:
+ /* Maximum number of netdev Tx queues:
* Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
tx_count = rss_count * max_cos_est + cnic_cnt;
@@ -12157,42 +12413,55 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp = netdev_priv(dev);
+ bp->flags = 0;
+ if (is_vf)
+ bp->flags |= IS_VF_FLAG;
+
bp->igu_sb_cnt = max_non_def_sbs;
+ bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
bp->msg_enable = debug;
bp->cnic_support = cnic_cnt;
bp->cnic_probe = bnx2x_cnic_probe;
pci_set_drvdata(pdev, dev);
- rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+ rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
if (rc < 0) {
free_netdev(dev);
return rc;
}
+ BNX2X_DEV_INFO("This is a %s function\n",
+ IS_PF(bp) ? "physical" : "virtual");
BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
- BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
-
+ BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
+ tx_count, rx_count);
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
- /*
- * Map doorbels here as we need the real value of bp->max_cos which
- * is initialized in bnx2x_init_bp().
+ /* Map doorbells here as we need the real value of bp->max_cos which
+ * is initialized in bnx2x_init_bp() to determine the number of
+ * l2 connections.
*/
- doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
- if (doorbell_size > pci_resource_len(pdev, 2)) {
- dev_err(&bp->pdev->dev,
- "Cannot map doorbells, bar size too small, aborting\n");
- rc = -ENOMEM;
- goto init_one_exit;
+ if (IS_VF(bp)) {
+ bnx2x_vf_map_doorbells(bp);
+ rc = bnx2x_vf_pci_alloc(bp);
+ if (rc)
+ goto init_one_exit;
+ } else {
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
+ bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+ doorbell_size);
}
- bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -12200,8 +12469,25 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_one_exit;
}
+ if (IS_VF(bp)) {
+ rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
+ if (rc)
+ goto init_one_exit;
+ }
+
+ /* Enable SRIOV if capability found in configuration space.
+ * Once the generic SR-IOV framework makes it in from the
+ * pci tree this will be revised, to allow dynamic control
+ * over the number of VFs. Right now, change the num of vfs
+ * param below to enable SR-IOV.
+ */
+ rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ if (rc)
+ goto init_one_exit;
+
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+ BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
/* disable FCOE L2 queue for E1x*/
if (CHIP_IS_E1x(bp))
@@ -12223,13 +12509,20 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Configure interrupt mode: try to enable MSI-X/MSI if
* needed.
*/
- bnx2x_set_int_mode(bp);
+ rc = bnx2x_set_int_mode(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Cannot set interrupts\n");
+ goto init_one_exit;
+ }
+ BNX2X_DEV_INFO("set interrupts successfully\n");
+ /* register the net device */
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
goto init_one_exit;
}
+ BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
if (!NO_FCOE(bp)) {
@@ -12240,6 +12533,8 @@ static int bnx2x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+ BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
+ pcie_width, pcie_speed);
BNX2X_DEV_INFO(
"%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
@@ -12257,7 +12552,7 @@ init_one_exit:
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
+ if (IS_PF(bp) && bp->doorbells)
iounmap(bp->doorbells);
free_netdev(dev);
@@ -12297,25 +12592,37 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
/* Power on: we can't let PCI layer write to us while we are in D3 */
- bnx2x_set_power_state(bp, PCI_D0);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D0);
/* Disable MSI/MSI-X */
bnx2x_disable_msi(bp);
/* Power off */
- bnx2x_set_power_state(bp, PCI_D3hot);
+ if (IS_PF(bp))
+ bnx2x_set_power_state(bp, PCI_D3hot);
/* Make sure RESET task is not scheduled before continuing */
cancel_delayed_work_sync(&bp->sp_rtnl_task);
+ bnx2x_iov_remove_one(bp);
+
+ /* send message via vfpf channel to release the resources of this vf */
+ if (IS_VF(bp))
+ bnx2x_vfpf_release(bp);
+
if (bp->regview)
iounmap(bp->regview);
- if (bp->doorbells)
- iounmap(bp->doorbells);
-
- bnx2x_release_firmware(bp);
+ /* for vf doorbells are part of the regview and were unmapped along with
+ * it. FW is only loaded by PF.
+ */
+ if (IS_PF(bp)) {
+ if (bp->doorbells)
+ iounmap(bp->doorbells);
+ bnx2x_release_firmware(bp);
+ }
bnx2x_free_mem_bp(bp);
free_netdev(dev);
@@ -13103,4 +13410,36 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
return cp;
}
+u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ u32 offset = BAR_USTRORM_INTMEM;
+ if (IS_VF(bp))
+ return bnx2x_vf_ustorm_prods_offset(bp, fp);
+ else if (!CHIP_IS_E1x(bp))
+ offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+ else
+ offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+
+ return offset;
+}
+
+/* called only on E1H or E2.
+ * When pretending to be PF, the pretend value is the function number 0...7
+ * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
+ * combination
+ */
+int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
+{
+ u32 pretend_reg;
+
+ if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
+ return -1;
+
+ /* get my own pretend register */
+ pretend_reg = bnx2x_get_pretend_reg(bp);
+ REG_WR(bp, pretend_reg, pretend_func_val);
+ REG_RD(bp, pretend_reg);
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index ddd5106ad2f..caf1aef651e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,6 @@
/* bnx2x_mfw_req.h: Broadcom Everest network driver.
*
- * Copyright (c) 2012 Broadcom Corporation
+ * Copyright (c) 2012-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bc2f65b3264..791eb2d5301 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -825,6 +825,7 @@
/* [RW 28] The value sent to CM header in the case of CFC load error. */
#define DORQ_REG_ERR_CMHEAD 0x170058
#define DORQ_REG_IF_EN 0x170004
+#define DORQ_REG_MAX_RVFID_SIZE 0x1701ec
#define DORQ_REG_MODE_ACT 0x170008
/* [RW 5] The normal mode CID extraction offset. */
#define DORQ_REG_NORM_CID_OFST 0x17002c
@@ -847,6 +848,22 @@
writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
read reads this written value. */
#define DORQ_REG_RSP_INIT_CRD 0x170048
+#define DORQ_REG_RSPB_CRD_CNT 0x1700b0
+#define DORQ_REG_VF_NORM_CID_BASE 0x1701a0
+#define DORQ_REG_VF_NORM_CID_OFST 0x1701f4
+#define DORQ_REG_VF_NORM_CID_WND_SIZE 0x1701a4
+#define DORQ_REG_VF_NORM_MAX_CID_COUNT 0x1701e4
+#define DORQ_REG_VF_NORM_VF_BASE 0x1701a8
+/* [RW 10] VF type validation mask value */
+#define DORQ_REG_VF_TYPE_MASK_0 0x170218
+/* [RW 17] VF type validation Min MCID value */
+#define DORQ_REG_VF_TYPE_MAX_MCID_0 0x1702d8
+/* [RW 17] VF type validation Max MCID value */
+#define DORQ_REG_VF_TYPE_MIN_MCID_0 0x170298
+/* [RW 10] VF type validation comp value */
+#define DORQ_REG_VF_TYPE_VALUE_0 0x170258
+#define DORQ_REG_VF_USAGE_CT_LIMIT 0x170340
+
/* [RW 4] Initial activity counter value on the load request; when the
shortcut is done. */
#define DORQ_REG_SHRT_ACT_CNT 0x170070
@@ -859,6 +876,7 @@
#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 (0x1<<2)
#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0 (0x1<<1)
#define HC_CONFIG_1_REG_BLOCK_DISABLE_1 (0x1<<0)
+#define DORQ_REG_VF_USAGE_CNT 0x170320
#define HC_REG_AGG_INT_0 0x108050
#define HC_REG_AGG_INT_1 0x108054
#define HC_REG_ATTN_BIT 0x108120
@@ -2136,6 +2154,8 @@
/* [R 32] Interrupt register #0 read */
#define NIG_REG_NIG_INT_STS_0 0x103b0
#define NIG_REG_NIG_INT_STS_1 0x103c0
+/* [RC 32] Interrupt register #0 read clear */
+#define NIG_REG_NIG_INT_STS_CLR_0 0x103b4
/* [R 32] Legacy E1 and E1H location for parity error mask register. */
#define NIG_REG_NIG_PRTY_MASK 0x103dc
/* [RW 32] Parity mask register #0 read/write */
@@ -2571,6 +2591,7 @@
current task in process). */
#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
#define PBF_REG_DISABLE_PF 0x1402e8
+#define PBF_REG_DISABLE_VF 0x1402ec
/* [RW 18] For port 0: For each client that is subject to WFQ (the
* corresponding bit is 1); indicates to which of the credit registers this
* client is mapped. For clients which are not credit blocked; their mapping
@@ -3708,6 +3729,10 @@
#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
/* [WB 160] Used for initialization of the inbound interrupts memory */
#define PXP_REG_HST_INBOUND_INT 0x103800
+/* [RW 7] Indirect access to the permission table. The fields are : {Valid;
+ * VFID[5:0]}
+ */
+#define PXP_REG_HST_ZONE_PERMISSION_TABLE 0x103400
/* [RW 32] Interrupt mask register #0 read/write */
#define PXP_REG_PXP_INT_MASK_0 0x103074
#define PXP_REG_PXP_INT_MASK_1 0x103084
@@ -5966,6 +5991,7 @@
#define HW_LOCK_RESOURCE_SPIO 2
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_HW_INTERRUPT (0x1<<19)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT (0x1<<31)
#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR (0x1<<30)
@@ -6305,6 +6331,15 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs \
+ * programmed for each PF.
+ * Since registers from 0x000-0x7ff are split across functions, each PF will
+ * have the same location for the same 4 bits
+ */
#define PXPCS_TL_CONTROL_5 0x814
#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
@@ -6554,6 +6589,27 @@
(7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+#define PXP_VF_ADDR_IGU_START 0
+#define PXP_VF_ADDR_IGU_SIZE 0x3000
+#define PXP_VF_ADDR_IGU_END\
+ ((PXP_VF_ADDR_IGU_START) + (PXP_VF_ADDR_IGU_SIZE) - 1)
+
+#define PXP_VF_ADDR_USDM_QUEUES_START 0x3000
+#define PXP_VF_ADDR_USDM_QUEUES_SIZE\
+ (PXP_VF_ADRR_NUM_QUEUES * PXP_ADDR_QUEUE_SIZE)
+#define PXP_VF_ADDR_USDM_QUEUES_END\
+ ((PXP_VF_ADDR_USDM_QUEUES_START) + (PXP_VF_ADDR_USDM_QUEUES_SIZE) - 1)
+
+#define PXP_VF_ADDR_CSDM_GLOBAL_START 0x7600
+#define PXP_VF_ADDR_CSDM_GLOBAL_SIZE (PXP_ADDR_REG_SIZE)
+#define PXP_VF_ADDR_CSDM_GLOBAL_END\
+ ((PXP_VF_ADDR_CSDM_GLOBAL_START) + (PXP_VF_ADDR_CSDM_GLOBAL_SIZE) - 1)
+
+#define PXP_VF_ADDR_DB_START 0x7c00
+#define PXP_VF_ADDR_DB_SIZE 0x200
+#define PXP_VF_ADDR_DB_END\
+ ((PXP_VF_ADDR_DB_START) + (PXP_VF_ADDR_DB_SIZE) - 1)
+
#define MDIO_REG_BANK_CL73_IEEEB0 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 09b625e0fda..7306416bc90 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,6 +1,6 @@
/* bnx2x_sp.c: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -325,7 +325,7 @@ static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
return 0;
}
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
if (bp->panic)
return -EIO;
@@ -707,7 +707,8 @@ static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
struct eth_classify_header *hdr, int rule_cnt)
{
- hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+ hdr->echo = cpu_to_le32((cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
hdr->rule_cnt = (u8)rule_cnt;
}
@@ -813,8 +814,9 @@ static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
hdr->length = 1;
hdr->offset = (u8)cam_offset;
- hdr->client_id = 0xff;
- hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+ hdr->client_id = cpu_to_le16(0xff);
+ hdr->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (type << BNX2X_SWCID_SHIFT));
}
static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
@@ -903,7 +905,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
@@ -953,7 +955,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
(struct eth_classify_rules_ramrod_data *)(raw->rdata);
int rule_cnt = rule_idx + 1;
union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
@@ -1407,7 +1409,7 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
/* Wait until there are no pending commands */
if (!bnx2x_exe_queue_empty(exeq))
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
else
return 0;
}
@@ -1442,7 +1444,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
if (cqe->message.error)
return -EINVAL;
- /* Run the next bulk of pending commands if requeted */
+ /* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
if (rc < 0)
@@ -1532,7 +1534,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
bool restore,
struct bnx2x_vlan_mac_registry_elem **re)
{
- int cmd = elem->cmd_data.vlan_mac.cmd;
+ enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
struct bnx2x_vlan_mac_registry_elem *reg_elem;
/* Allocate a new registry element if needed. */
@@ -1591,7 +1593,7 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
struct bnx2x_vlan_mac_registry_elem *reg_elem;
- int cmd;
+ enum bnx2x_vlan_mac_cmd cmd;
/*
* If DRIVER_ONLY execution is requested, cleanup a registry
@@ -2103,7 +2105,7 @@ static inline void __storm_memset_mac_filters(struct bnx2x *bp,
static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
struct bnx2x_rx_mode_ramrod_params *p)
{
- /* update the bp MAC filter structure */
+ /* update the bp MAC filter structure */
u32 mask = (1 << p->cl_id);
struct tstorm_eth_mac_filter_config *mac_filters =
@@ -2166,7 +2168,7 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
mac_filters->unmatched_unicast & ~mask;
DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
- "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
mac_filters->bcast_accept_all);
@@ -2186,12 +2188,12 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
struct eth_classify_header *hdr,
u8 rule_cnt)
{
- hdr->echo = cid;
+ hdr->echo = cpu_to_le32(cid);
hdr->rule_cnt = rule_cnt;
}
static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
- unsigned long accept_flags,
+ unsigned long *accept_flags,
struct eth_filter_rules_cmd *cmd,
bool clear_accept_all)
{
@@ -2201,33 +2203,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (accept_flags) {
- if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
- }
+ if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
- state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
- state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
- }
- if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+ if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
+ state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+ state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+ }
- if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
- state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
- state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
- }
- if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
- state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+ if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+ if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
+ state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+ state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
}
+ if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
+ state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+
/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
if (clear_accept_all) {
state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
@@ -2260,8 +2262,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
/* Rx */
@@ -2272,8 +2275,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]), false);
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx++]),
+ false);
}
@@ -2293,9 +2297,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
/* Rx */
@@ -2306,9 +2311,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD;
- bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
- &(data->rules[rule_idx++]),
+ bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
+ &(data->rules[rule_idx]),
true);
+ rule_idx++;
}
}
@@ -2429,7 +2435,7 @@ static int bnx2x_mcast_wait(struct bnx2x *bp,
static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
struct bnx2x_mcast_obj *o,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int total_sz;
struct bnx2x_pending_mcast_cmd *new_cmd;
@@ -2561,7 +2567,7 @@ static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct eth_multicast_rules_ramrod_data *data =
@@ -2625,7 +2631,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e2(
int *rdata_idx)
{
int cur_bin, cnt = *rdata_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the bins from it */
for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
@@ -2657,7 +2663,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
{
struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
int cnt = *line_idx;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
link) {
@@ -2780,7 +2786,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
int *line_idx)
{
struct bnx2x_mcast_list_elem *mlist_pos;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = *line_idx;
list_for_each_entry(mlist_pos, &p->mcast_list, link) {
@@ -2790,7 +2796,7 @@ static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- mlist_pos->mac);
+ mlist_pos->mac);
}
*line_idx = cnt;
@@ -2827,7 +2833,8 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
* Returns number of lines filled in the ramrod data in total.
*/
static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd,
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd,
int start_cnt)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -2861,7 +2868,7 @@ static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -2930,8 +2937,9 @@ static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
struct eth_multicast_rules_ramrod_data *data =
(struct eth_multicast_rules_ramrod_data *)(r->rdata);
- data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->header.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->header.rule_cnt = len;
}
@@ -2965,7 +2973,7 @@ static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3051,7 +3059,7 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
/* Mark, that there is a work to do */
if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
@@ -3085,7 +3093,7 @@ static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
- mlist_pos->mac, bit);
+ mlist_pos->mac, bit);
/* bookkeeping... */
BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
@@ -3113,7 +3121,7 @@ static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
*/
static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
int i;
struct bnx2x_mcast_obj *o = p->mcast_obj;
@@ -3167,7 +3175,7 @@ static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
int reg_sz = o->get_registry_size(o);
@@ -3240,7 +3248,7 @@ static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
union bnx2x_mcast_config_data *cfg_data,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_raw_obj *r = &o->raw;
struct mac_configuration_cmd *data =
@@ -3284,9 +3292,10 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
BNX2X_MAX_MULTICAST*(1 + r->func_id));
data->hdr.offset = offset;
- data->hdr.client_id = 0xff;
- data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
- (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+ data->hdr.client_id = cpu_to_le16(0xff);
+ data->hdr.echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (BNX2X_FILTER_MCAST_PENDING <<
+ BNX2X_SWCID_SHIFT));
data->hdr.length = len;
}
@@ -3309,7 +3318,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
{
struct bnx2x_mcast_mac_elem *elem;
int i = 0;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
/* go through the registry and configure the MACs from it. */
list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
@@ -3319,7 +3328,7 @@ static inline int bnx2x_mcast_handle_restore_cmd_e1(
i++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- cfg_data.mac);
+ cfg_data.mac);
}
*rdata_idx = i;
@@ -3334,7 +3343,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
struct bnx2x_pending_mcast_cmd *cmd_pos;
struct bnx2x_mcast_mac_elem *pmac_pos;
struct bnx2x_mcast_obj *o = p->mcast_obj;
- union bnx2x_mcast_config_data cfg_data = {0};
+ union bnx2x_mcast_config_data cfg_data = {NULL};
int cnt = 0;
@@ -3355,7 +3364,7 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- pmac_pos->mac);
+ pmac_pos->mac);
}
break;
@@ -3458,7 +3467,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *raw = &o->raw;
@@ -3562,7 +3571,7 @@ static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
int bnx2x_config_mcast(struct bnx2x *bp,
struct bnx2x_mcast_ramrod_params *p,
- int cmd)
+ enum bnx2x_mcast_cmd cmd)
{
struct bnx2x_mcast_obj *o = p->mcast_obj;
struct bnx2x_raw_obj *r = &o->raw;
@@ -4085,8 +4094,8 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "Configuring RSS\n");
/* Set an echo field */
- data->echo = (r->cid & BNX2X_SWCID_MASK) |
- (r->state << BNX2X_SWCID_SHIFT);
+ data->echo = cpu_to_le32((r->cid & BNX2X_SWCID_MASK) |
+ (r->state << BNX2X_SWCID_SHIFT));
/* RSS mode */
if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
@@ -4237,11 +4246,16 @@ int bnx2x_queue_state_change(struct bnx2x *bp,
unsigned long *pending = &o->pending;
/* Check that the requested transition is legal */
- if (o->check_transition(bp, o, params))
+ rc = o->check_transition(bp, o, params);
+ if (rc) {
+ BNX2X_ERR("check transition returned an error. rc %d\n", rc);
return -EINVAL;
+ }
/* Set "pending" bit */
+ DP(BNX2X_MSG_SP, "pending bit was=%lx\n", o->pending);
pending_bit = o->set_pending(o, params);
+ DP(BNX2X_MSG_SP, "pending bit now=%lx\n", o->pending);
/* Don't send a command if only driver cleanup was requested */
if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
@@ -5025,8 +5039,11 @@ static int bnx2x_queue_chk_transition(struct bnx2x *bp,
* Don't allow a next state transition if we are in the middle of
* the previous one.
*/
- if (o->pending)
+ if (o->pending) {
+ BNX2X_ERR("Blocking transition since pending was %lx\n",
+ o->pending);
return -EBUSY;
+ }
switch (state) {
case BNX2X_Q_STATE_RESET:
@@ -5199,6 +5216,27 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
obj->set_pending = bnx2x_queue_set_pending;
}
+/* return a queue object's logical state*/
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj)
+{
+ switch (obj->state) {
+ case BNX2X_Q_STATE_ACTIVE:
+ case BNX2X_Q_STATE_MULTI_COS:
+ return BNX2X_Q_LOGICAL_STATE_ACTIVE;
+ case BNX2X_Q_STATE_RESET:
+ case BNX2X_Q_STATE_INITIALIZED:
+ case BNX2X_Q_STATE_MCOS_TERMINATED:
+ case BNX2X_Q_STATE_INACTIVE:
+ case BNX2X_Q_STATE_STOPPED:
+ case BNX2X_Q_STATE_TERMINATED:
+ case BNX2X_Q_STATE_FLRED:
+ return BNX2X_Q_LOGICAL_STATE_STOPPED;
+ default:
+ return -EINVAL;
+ }
+}
+
/********************** Function state object *********************************/
enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o)
@@ -5631,9 +5669,9 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
/*
@@ -5716,21 +5754,20 @@ inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o = params->f_obj;
struct afex_vif_list_ramrod_data *rdata =
(struct afex_vif_list_ramrod_data *)o->afex_rdata;
- struct bnx2x_func_afex_viflists_params *afex_viflist_params =
+ struct bnx2x_func_afex_viflists_params *afex_vif_params =
&params->params.afex_viflists;
u64 *p_rdata = (u64 *)rdata;
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->vif_list_index = afex_viflist_params->vif_list_index;
- rdata->func_bit_map = afex_viflist_params->func_bit_map;
- rdata->afex_vif_list_command =
- afex_viflist_params->afex_vif_list_command;
- rdata->func_to_clear = afex_viflist_params->func_to_clear;
+ rdata->vif_list_index = cpu_to_le16(afex_vif_params->vif_list_index);
+ rdata->func_bit_map = afex_vif_params->func_bit_map;
+ rdata->afex_vif_list_command = afex_vif_params->afex_vif_list_command;
+ rdata->func_to_clear = afex_vif_params->func_to_clear;
/* send in echo type of sub command */
- rdata->echo = afex_viflist_params->afex_vif_list_command;
+ rdata->echo = afex_vif_params->afex_vif_list_command;
/* No need for an explicit memory barrier here as long we would
* need to ensure the ordering of writing to the SPQ element
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index adbd91b1bdf..ff907609b9f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,6 +1,6 @@
/* bnx2x_sp.h: Broadcom Everest network driver.
*
- * Copyright (c) 2011-2012 Broadcom Corporation
+ * Copyright (c) 2011-2013 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -54,7 +54,7 @@ typedef enum {
BNX2X_OBJ_TYPE_RX_TX,
} bnx2x_obj_type;
-/* Filtering states */
+/* Public slow path states */
enum {
BNX2X_FILTER_MAC_PENDING,
BNX2X_FILTER_VLAN_PENDING,
@@ -524,7 +524,7 @@ struct bnx2x_mcast_ramrod_params {
int mcast_list_len;
};
-enum {
+enum bnx2x_mcast_cmd {
BNX2X_MCAST_CMD_ADD,
BNX2X_MCAST_CMD_CONT,
BNX2X_MCAST_CMD_DEL,
@@ -573,7 +573,8 @@ struct bnx2x_mcast_obj {
* @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
*/
int (*config_mcast)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Fills the ramrod data during the RESTORE flow.
@@ -590,11 +591,13 @@ struct bnx2x_mcast_obj {
int start_bin, int *rdata_idx);
int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
void (*set_one_rule)(struct bnx2x *bp,
struct bnx2x_mcast_obj *o, int idx,
- union bnx2x_mcast_config_data *cfg_data, int cmd);
+ union bnx2x_mcast_config_data *cfg_data,
+ enum bnx2x_mcast_cmd cmd);
/** Checks if there are more mcast MACs to be set or a previous
* command is still pending.
@@ -617,7 +620,8 @@ struct bnx2x_mcast_obj {
* feasible.
*/
int (*validate)(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/**
* Restore the values of internal counters in case of a failure.
@@ -776,6 +780,12 @@ enum bnx2x_q_state {
BNX2X_Q_STATE_MAX,
};
+/* Allowed Queue states */
+enum bnx2x_q_logical_state {
+ BNX2X_Q_LOGICAL_STATE_ACTIVE,
+ BNX2X_Q_LOGICAL_STATE_STOPPED,
+};
+
/* Allowed commands */
enum bnx2x_queue_cmd {
BNX2X_Q_CMD_INIT,
@@ -1261,6 +1271,9 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
int bnx2x_queue_state_change(struct bnx2x *bp,
struct bnx2x_queue_state_params *params);
+int bnx2x_get_q_logical_state(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj);
+
/********************* VLAN-MAC ****************/
void bnx2x_init_mac_obj(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
@@ -1338,7 +1351,8 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* completions.
*/
int bnx2x_config_mcast(struct bnx2x *bp,
- struct bnx2x_mcast_ramrod_params *p, int cmd);
+ struct bnx2x_mcast_ramrod_params *p,
+ enum bnx2x_mcast_cmd cmd);
/****************** CREDIT POOL ****************/
void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
new file mode 100644
index 00000000000..6adfa209358
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -0,0 +1,3198 @@
+/* bnx2x_sriov.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ *
+ */
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+/* General service functions */
+static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+ u16 pf_id)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+ pf_id);
+}
+
+static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+ u8 enable)
+{
+ REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+ REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+ enable);
+}
+
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ int idx;
+
+ for_each_vf(bp, idx)
+ if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
+ break;
+ return idx;
+}
+
+static
+struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
+{
+ u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
+ return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
+}
+
+static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ u8 igu_sb_id, u8 segment, u16 index, u8 op,
+ u8 update)
+{
+ /* acking a VF sb through the PF - use the GRC */
+ u32 ctl;
+ u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+ u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+ u32 func_encode = vf->abs_vfid;
+ u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
+ struct igu_regular cmd_data = {0};
+
+ cmd_data.sb_id_and_flags =
+ ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+ (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+ (update << IGU_REGULAR_BUPDATE_SHIFT) |
+ (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+ ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
+ func_encode << IGU_CTRL_REG_FID_SHIFT |
+ IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ cmd_data.sb_id_and_flags, igu_addr_data);
+ REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
+ mmiowb();
+ barrier();
+
+ DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+ ctl, igu_addr_ctl);
+ REG_WR(bp, igu_addr_ctl, ctl);
+ mmiowb();
+ barrier();
+}
+/* VFOP - VF slow-path operation support */
+
+#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
+
+/* VFOP operations states */
+enum bnx2x_vfop_qctor_state {
+ BNX2X_VFOP_QCTOR_INIT,
+ BNX2X_VFOP_QCTOR_SETUP,
+ BNX2X_VFOP_QCTOR_INT_EN
+};
+
+enum bnx2x_vfop_qdtor_state {
+ BNX2X_VFOP_QDTOR_HALT,
+ BNX2X_VFOP_QDTOR_TERMINATE,
+ BNX2X_VFOP_QDTOR_CFCDEL,
+ BNX2X_VFOP_QDTOR_DONE
+};
+
+enum bnx2x_vfop_vlan_mac_state {
+ BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
+ BNX2X_VFOP_VLAN_MAC_CLEAR,
+ BNX2X_VFOP_VLAN_MAC_CHK_DONE,
+ BNX2X_VFOP_MAC_CONFIG_LIST,
+ BNX2X_VFOP_VLAN_CONFIG_LIST,
+ BNX2X_VFOP_VLAN_CONFIG_LIST_0
+};
+
+enum bnx2x_vfop_qsetup_state {
+ BNX2X_VFOP_QSETUP_CTOR,
+ BNX2X_VFOP_QSETUP_VLAN0,
+ BNX2X_VFOP_QSETUP_DONE
+};
+
+enum bnx2x_vfop_mcast_state {
+ BNX2X_VFOP_MCAST_DEL,
+ BNX2X_VFOP_MCAST_ADD,
+ BNX2X_VFOP_MCAST_CHK_DONE
+};
+enum bnx2x_vfop_qflr_state {
+ BNX2X_VFOP_QFLR_CLR_VLAN,
+ BNX2X_VFOP_QFLR_CLR_MAC,
+ BNX2X_VFOP_QFLR_TERMINATE,
+ BNX2X_VFOP_QFLR_DONE
+};
+
+enum bnx2x_vfop_flr_state {
+ BNX2X_VFOP_FLR_QUEUES,
+ BNX2X_VFOP_FLR_HW
+};
+
+enum bnx2x_vfop_close_state {
+ BNX2X_VFOP_CLOSE_QUEUES,
+ BNX2X_VFOP_CLOSE_HW
+};
+
+enum bnx2x_vfop_rxmode_state {
+ BNX2X_VFOP_RXMODE_CONFIG,
+ BNX2X_VFOP_RXMODE_DONE
+};
+
+enum bnx2x_vfop_qteardown_state {
+ BNX2X_VFOP_QTEARDOWN_RXMODE,
+ BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
+ BNX2X_VFOP_QTEARDOWN_CLR_MAC,
+ BNX2X_VFOP_QTEARDOWN_QDTOR,
+ BNX2X_VFOP_QTEARDOWN_DONE
+};
+
+#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
+
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->tx.sb_cq_index,
+ init_params->tx.hc_rate,
+ setup_params->flags,
+ setup_params->txq_params.traffic_type);
+}
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx)
+{
+ struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
+ "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
+ vf->abs_vfid,
+ q_idx,
+ sb_idx,
+ init_params->rx.sb_cq_index,
+ init_params->rx.hc_rate,
+ setup_params->gen_params.mtu,
+ rxq_params->buf_sz,
+ rxq_params->sge_buf_sz,
+ rxq_params->max_sges_pkt,
+ rxq_params->tpa_agg_sz,
+ setup_params->flags,
+ rxq_params->drop_flags,
+ rxq_params->cache_line_log);
+}
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vfop_qctor_params *p,
+ unsigned long q_type)
+{
+ struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
+ struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
+
+ /* INIT */
+
+ /* Enable host coalescing in the transition to INIT state */
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
+
+ if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
+ __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
+
+ /* FW SB ID */
+ init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+
+ /* context */
+ init_p->cxts[0] = q->cxt;
+
+ /* SETUP */
+
+ /* Setup-op general parameters */
+ setup_p->gen_params.spcl_id = vf->sp_cl_id;
+ setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
+
+ /* Setup-op pause params:
+ * Nothing to do, the pause thresholds are set by default to 0 which
+ * effectively turns off the feature for this queue. We don't want
+ * one queue (VF) to interfering with another queue (another VF)
+ */
+ if (vf->cfg_flags & VF_CFG_FW_FC)
+ BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
+ vf->abs_vfid);
+ /* Setup-op flags:
+ * collect statistics, zero statistics, local-switching, security,
+ * OV for Flex10, RSS and MCAST for leading
+ */
+ if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
+
+ /* for VFs, enable tx switching, bd coherency, and mac address
+ * anti-spoofing
+ */
+ __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
+
+ if (vfq_is_leading(q)) {
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
+ __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
+ }
+
+ /* Setup-op rx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
+ struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
+
+ rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
+ rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
+ rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
+ }
+
+ /* Setup-op tx parameters */
+ if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
+ setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
+ setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
+ }
+}
+
+/* VFOP queue construction */
+static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
+ struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
+ enum bnx2x_vfop_qctor_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QCTOR_INIT:
+
+ /* has this queue already been opened? */
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ DP(BNX2X_MSG_IOV,
+ "Entered qctor but queue was already up. Aborting gracefully\n");
+ goto op_done;
+ }
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QCTOR_SETUP;
+
+ q_params->cmd = BNX2X_Q_CMD_INIT;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QCTOR_SETUP:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
+
+ /* copy pre-prepared setup params to the queue-state params */
+ vfop->op_p->qctor.qstate.params.setup =
+ vfop->op_p->qctor.prep_qsetup;
+
+ q_params->cmd = BNX2X_Q_CMD_SETUP;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QCTOR_INT_EN:
+
+ /* enable interrupts */
+ bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
+ USTORM_ID, 0, IGU_INT_ENABLE, 0);
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
+ vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
+op_done:
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+
+ vfop->args.qctor.qid = qid;
+ vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
+ bnx2x_vfop_qctor, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue destruction */
+static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
+ struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
+ enum bnx2x_vfop_qdtor_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QDTOR_HALT:
+
+ /* has this queue already been stopped? */
+ if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED) {
+ DP(BNX2X_MSG_IOV,
+ "Entered qdtor but queue was already stopped. Aborting gracefully\n");
+ goto op_done;
+ }
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
+
+ q_params->cmd = BNX2X_Q_CMD_HALT;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QDTOR_TERMINATE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
+
+ q_params->cmd = BNX2X_Q_CMD_TERMINATE;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_QDTOR_CFCDEL:
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_DONE;
+
+ q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
+ vfop->rc = bnx2x_queue_state_change(bp, q_params);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
+ vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QDTOR_DONE:
+ /* invalidate the context */
+ qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
+ qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_queue_state_params *qstate =
+ &vf->op_params.qctor.qstate;
+
+ memset(qstate, 0, sizeof(*qstate));
+ qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+
+ vfop->args.qdtor.qid = qid;
+ vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
+ bnx2x_vfop_qdtor, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
+ cmd->block);
+ }
+ DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
+ return -ENOMEM;
+}
+
+static void
+bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
+{
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ if (vf) {
+ if (!vf_sb_count(vf))
+ vf->igu_base_id = igu_sb_id;
+ ++vf_sb_count(vf);
+ }
+}
+
+/* VFOP MAC/VLAN helpers */
+static inline void bnx2x_vfop_credit(struct bnx2x *bp,
+ struct bnx2x_vfop *vfop,
+ struct bnx2x_vlan_mac_obj *obj)
+{
+ struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
+
+ /* update credit only if there is no error
+ * and a valid credit counter
+ */
+ if (!vfop->rc && args->credit) {
+ int cnt = 0;
+ struct list_head *pos;
+
+ list_for_each(pos, &obj->head)
+ cnt++;
+
+ atomic_set(args->credit, cnt);
+ }
+}
+
+static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
+ struct bnx2x_vfop_filter *pos,
+ struct bnx2x_vlan_mac_data *user_req)
+{
+ user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+
+ switch (pos->type) {
+ case BNX2X_VFOP_FILTER_MAC:
+ memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
+ break;
+ case BNX2X_VFOP_FILTER_VLAN:
+ user_req->u.vlan.vlan = pos->vid;
+ break;
+ default:
+ BNX2X_ERR("Invalid filter type, skipping\n");
+ return 1;
+ }
+ return 0;
+}
+
+static int
+bnx2x_vfop_config_vlan0(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
+ bool add)
+{
+ int rc;
+
+ vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
+ BNX2X_VLAN_MAC_DEL;
+ vlan_mac->user_req.u.vlan.vlan = 0;
+
+ rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (rc == -EEXIST)
+ rc = 0;
+ return rc;
+}
+
+static int bnx2x_vfop_config_list(struct bnx2x *bp,
+ struct bnx2x_vfop_filters *filters,
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
+{
+ struct bnx2x_vfop_filter *pos, *tmp;
+ struct list_head rollback_list, *filters_list = &filters->head;
+ struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
+ int rc = 0, cnt = 0;
+
+ INIT_LIST_HEAD(&rollback_list);
+
+ list_for_each_entry_safe(pos, tmp, filters_list, link) {
+ if (bnx2x_vfop_set_user_req(bp, pos, user_req))
+ continue;
+
+ rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (rc >= 0) {
+ cnt += pos->add ? 1 : -1;
+ list_del(&pos->link);
+ list_add(&pos->link, &rollback_list);
+ rc = 0;
+ } else if (rc == -EEXIST) {
+ rc = 0;
+ } else {
+ BNX2X_ERR("Failed to add a new vlan_mac command\n");
+ break;
+ }
+ }
+
+ /* rollback if error or too many rules added */
+ if (rc || cnt > filters->add_cnt) {
+ BNX2X_ERR("error or too many rules added. Performing rollback\n");
+ list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
+ pos->add = !pos->add; /* reverse op */
+ bnx2x_vfop_set_user_req(bp, pos, user_req);
+ bnx2x_config_vlan_mac(bp, vlan_mac);
+ list_del(&pos->link);
+ }
+ cnt = 0;
+ if (!rc)
+ rc = -EINVAL;
+ }
+ filters->add_cnt = cnt;
+ return rc;
+}
+
+/* VFOP set VLAN/MAC */
+static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
+ struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
+ struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
+
+ enum bnx2x_vfop_vlan_mac_state state = vfop->state;
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ bnx2x_vfop_reset_wq(vf);
+
+ switch (state) {
+ case BNX2X_VFOP_VLAN_MAC_CLEAR:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do delete */
+ vfop->rc = obj->delete_all(bp, obj,
+ &vlan_mac->user_req.vlan_mac_flags,
+ &vlan_mac->ramrod_flags);
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do config */
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ if (vfop->rc == -EEXIST)
+ vfop->rc = 0;
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
+ vfop->rc = !!obj->raw.check_pending(&obj->raw);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_MAC_CONFIG_LIST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ /* do list config */
+ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
+ if (vfop->rc)
+ goto op_err;
+
+ set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_VLAN_CONFIG_LIST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
+
+ /* remove vlan0 - could be no-op */
+ vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
+ if (vfop->rc)
+ goto op_err;
+
+ /* Do vlan list config. if this operation fails we try to
+ * restore vlan0 to keep the queue is working order
+ */
+ vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
+ if (!vfop->rc) {
+ set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
+ vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
+ }
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
+
+ case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
+ /* next state */
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
+
+ if (list_empty(&obj->head))
+ /* add vlan0 */
+ vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
+op_done:
+ kfree(filters);
+ bnx2x_vfop_credit(bp, vfop, obj);
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+struct bnx2x_vfop_vlan_mac_flags {
+ bool drv_only;
+ bool dont_consume;
+ bool single_cmd;
+ bool add;
+};
+
+static void
+bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
+ struct bnx2x_vfop_vlan_mac_flags *flags)
+{
+ struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
+
+ memset(ramrod, 0, sizeof(*ramrod));
+
+ /* ramrod flags */
+ if (flags->drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
+ if (flags->single_cmd)
+ set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
+
+ /* mac_vlan flags */
+ if (flags->dont_consume)
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
+
+ /* cmd */
+ ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
+}
+
+static inline void
+bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
+ struct bnx2x_vfop_vlan_mac_flags *flags)
+{
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
+ set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
+}
+
+static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single */
+ .credit = NULL, /* consume credit */
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = false /* don't care */,
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *macs,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = macs,
+ .credit = NULL, /* consume credit */
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = false,
+ .add = false, /* don't care since only the items in the
+ * filters list affect the sp operation,
+ * not the list itself
+ */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
+
+ /* set extra args */
+ filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single command */
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = false,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = add,
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+ ramrod->user_req.u.vlan.vlan = vid;
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = NULL, /* single command */
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = true,
+ .add = false, /* don't care */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *vlans,
+ int qid, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_vfop_args_filters filters = {
+ .multi_filter = vlans,
+ .credit = &bnx2x_vfq(vf, qid, vlan_count),
+ };
+ struct bnx2x_vfop_vlan_mac_flags flags = {
+ .drv_only = drv_only,
+ .dont_consume = (filters.credit != NULL),
+ .single_cmd = false,
+ .add = false, /* don't care */
+ };
+ struct bnx2x_vlan_mac_ramrod_params *ramrod =
+ &vf->op_params.vlan_mac;
+
+ /* set ramrod params */
+ bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
+
+ /* set object */
+ ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
+
+ /* set extra args */
+ filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
+ atomic_read(filters.credit);
+
+ vfop->args.filters = filters;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
+ bnx2x_vfop_vlan_mac, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue setup (queue constructor + set vlan 0) */
+static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qctor.qid;
+ enum bnx2x_vfop_qsetup_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_qsetup,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_QSETUP_CTOR:
+ /* init the queue ctor command */
+ vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
+ vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QSETUP_VLAN0:
+ /* skip if non-leading or FPGA/EMU*/
+ if (qid)
+ goto op_done;
+
+ /* init the queue set-vlan command (for vlan 0) */
+ vfop->state = BNX2X_VFOP_QSETUP_DONE;
+ vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+op_err:
+ BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QSETUP_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qctor.qid = qid;
+
+ bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
+ bnx2x_vfop_qsetup, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
+static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qx.qid;
+ enum bnx2x_vfop_qflr_state state = vfop->state;
+ struct bnx2x_queue_state_params *qstate;
+ struct bnx2x_vfop_cmd cmd;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ cmd.done = bnx2x_vfop_qflr;
+ cmd.block = false;
+
+ switch (state) {
+ case BNX2X_VFOP_QFLR_CLR_VLAN:
+ /* vlan-clear-all: driver-only, don't consume credit */
+ vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QFLR_CLR_MAC:
+ /* mac-clear-all: driver only consume credit */
+ vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
+ vf->abs_vfid, vfop->rc);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QFLR_TERMINATE:
+ qstate = &vfop->op_p->qctor.qstate;
+ memset(qstate , 0, sizeof(*qstate));
+ qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
+ vfop->state = BNX2X_VFOP_QFLR_DONE;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
+ vf->abs_vfid, qstate->q_obj->state);
+
+ if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
+ qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
+ qstate->cmd = BNX2X_Q_CMD_TERMINATE;
+ vfop->rc = bnx2x_queue_state_change(bp, qstate);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
+ } else {
+ goto op_done;
+ }
+
+op_err:
+ BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, vfop->rc);
+op_done:
+ case BNX2X_VFOP_QFLR_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = qid;
+ bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
+ bnx2x_vfop_qflr, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP multi-casts */
+static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
+ struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
+ struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
+ enum bnx2x_vfop_mcast_state state = vfop->state;
+ int i;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_MCAST_DEL:
+ /* clear existing mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_ADD;
+ vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+
+ case BNX2X_VFOP_MCAST_ADD:
+ if (raw->check_pending(raw))
+ goto op_pending;
+
+ if (args->mc_num) {
+ /* update mcast list on the ramrod params */
+ INIT_LIST_HEAD(&mcast->mcast_list);
+ for (i = 0; i < args->mc_num; i++)
+ list_add_tail(&(args->mc[i].link),
+ &mcast->mcast_list);
+ /* add new mcasts */
+ vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
+ vfop->rc = bnx2x_config_mcast(bp, mcast,
+ BNX2X_MCAST_CMD_ADD);
+ }
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+
+ case BNX2X_VFOP_MCAST_CHK_DONE:
+ vfop->rc = raw->check_pending(raw) ? 1 : 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
+op_done:
+ kfree(args->mc);
+ bnx2x_vfop_end(bp, vf, vfop);
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ bnx2x_mac_addr_t *mcasts,
+ int mcast_num, bool drv_only)
+{
+ struct bnx2x_vfop *vfop = NULL;
+ size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
+ struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
+ NULL;
+
+ if (!mc_sz || mc) {
+ vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ int i;
+ struct bnx2x_mcast_ramrod_params *ramrod =
+ &vf->op_params.mcast;
+
+ /* set ramrod params */
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->mcast_obj = &vf->mcast_obj;
+ if (drv_only)
+ set_bit(RAMROD_DRV_CLR_ONLY,
+ &ramrod->ramrod_flags);
+
+ /* copy mcasts pointers */
+ vfop->args.mc_list.mc_num = mcast_num;
+ vfop->args.mc_list.mc = mc;
+ for (i = 0; i < mcast_num; i++)
+ mc[i].mac = mcasts[i];
+
+ bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
+ bnx2x_vfop_mcast, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
+ cmd->block);
+ } else {
+ kfree(mc);
+ }
+ }
+ return -ENOMEM;
+}
+
+/* VFOP rx-mode */
+static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
+ enum bnx2x_vfop_rxmode_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_RXMODE_CONFIG:
+ /* next state */
+ vfop->state = BNX2X_VFOP_RXMODE_DONE;
+
+ vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_RXMODE_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ struct bnx2x_rx_mode_ramrod_params *ramrod =
+ &vf->op_params.rx_mode;
+
+ memset(ramrod, 0, sizeof(*ramrod));
+
+ /* Prepare ramrod parameters */
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata =
+ bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping =
+ bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+
+ bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
+ bnx2x_vfop_rxmode, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
+ * queue destructor)
+ */
+static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ int qid = vfop->args.qx.qid;
+ enum bnx2x_vfop_qteardown_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd;
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ cmd.done = bnx2x_vfop_qdown;
+ cmd.block = false;
+
+ switch (state) {
+ case BNX2X_VFOP_QTEARDOWN_RXMODE:
+ /* Drop all */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
+ vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
+ /* vlan-clear-all: don't consume credit */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
+ /* mac-clear-all: consume credit */
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_QTEARDOWN_QDTOR:
+ /* run the queue destruction flow */
+ DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
+ vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
+ DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
+ vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
+ DP(BNX2X_MSG_IOV, "returned from cmd\n");
+ if (vfop->rc)
+ goto op_err;
+ return;
+op_err:
+ BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
+ vf->abs_vfid, qid, vfop->rc);
+
+ case BNX2X_VFOP_QTEARDOWN_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = qid;
+ bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
+ bnx2x_vfop_qdown, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
+ cmd->block);
+ }
+
+ return -ENOMEM;
+}
+
+/* VF enable primitives
+ * when pretend is required the caller is responsible
+ * for calling pretend prior to calling these routines
+ */
+
+/* internal vf enable - until vf is enabled internally all transactions
+ * are blocked. this routine should always be called last with pretend.
+ */
+static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
+{
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
+}
+
+/* clears vf error in all semi blocks */
+static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
+ REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
+}
+
+static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
+{
+ u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
+ u32 was_err_reg = 0;
+
+ switch (was_err_group) {
+ case 0:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
+ break;
+ case 1:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
+ break;
+ case 2:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
+ break;
+ case 3:
+ was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
+ break;
+ }
+ REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
+}
+
+static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+ u32 val;
+
+ /* Set VF masks and configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
+ if (vf->cfg_flags & VF_CFG_INT_SIMD)
+ val |= IGU_VF_CONF_SINGLE_ISR_EN;
+ val &= ~IGU_VF_CONF_PARENT_MASK;
+ val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+
+ DP(BNX2X_MSG_IOV,
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
+ vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf_sb_count(vf); i++) {
+ u8 igu_sb_id = vf_igu_sb(vf, i);
+
+ /* zero prod memory */
+ REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
+
+ /* clear sb state machine */
+ bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
+ false /* VF */);
+
+ /* disable + update */
+ bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
+ IGU_INT_DISABLE, 1);
+ }
+}
+
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* set the VF-PF association in the FW */
+ storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
+ storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+
+ /* clear vf errors*/
+ bnx2x_vf_semi_clear_err(bp, abs_vfid);
+ bnx2x_vf_pglue_clear_err(bp, abs_vfid);
+
+ /* internal vf-enable - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
+ DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
+ bnx2x_vf_enable_internal(bp, true);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* Reset vf in IGU interrupts are still disabled */
+ bnx2x_vf_igu_reset(bp, vf);
+
+ /* pretend to enable the vf with the PBF */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ REG_WR(bp, PBF_REG_DISABLE_VF, 0);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
+{
+ struct pci_dev *dev;
+ struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf)
+ goto unknown_dev;
+
+ dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
+ if (dev)
+ return bnx2x_is_pcie_pending(dev);
+
+unknown_dev:
+ BNX2X_ERR("Unknown device\n");
+ return false;
+}
+
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
+{
+ /* Wait 100ms */
+ msleep(100);
+
+ /* Verify no pending pci transactions */
+ if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
+ BNX2X_ERR("PCIE Transactions still pending\n");
+
+ return 0;
+}
+
+/* must be called after the number of PF queues and the number of VFs are
+ * both known
+ */
+static void
+bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+{
+ u16 vlan_count = 0;
+
+ /* will be set only during VF-ACQUIRE */
+ resc->num_rxqs = 0;
+ resc->num_txqs = 0;
+
+ /* no credit calculcis for macs (just yet) */
+ resc->num_mac_filters = 1;
+
+ /* divvy up vlan rules */
+ vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
+ vlan_count = 1 << ilog2(vlan_count);
+ resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
+
+ /* no real limitation */
+ resc->num_mc_filters = 0;
+
+ /* num_sbs already set */
+}
+
+/* FLR routines: */
+static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ /* reset the state variables */
+ bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ vf->state = VF_FREE;
+}
+
+static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+ /* DQ usage counter */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
+ "DQ VF usage counter timed out",
+ poll_cnt);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* FW cleanup command - poll for the results */
+ if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
+ poll_cnt))
+ BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
+
+ /* verify TX hw is flushed */
+ bnx2x_tx_hw_flushed(bp, poll_cnt);
+}
+
+static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
+ enum bnx2x_vfop_flr_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_flr,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_FLR_QUEUES:
+ /* the cleanup operations are valid if and only if the VF
+ * was first acquired.
+ */
+ if (++(qx->qid) < vf_rxq_count(vf)) {
+ vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
+ qx->qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ }
+ /* remove multicasts */
+ vfop->state = BNX2X_VFOP_FLR_HW;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
+ 0, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ case BNX2X_VFOP_FLR_HW:
+
+ /* dispatch final cleanup and wait for HW queues to flush */
+ bnx2x_vf_flr_clnup_hw(bp, vf);
+
+ /* release VF resources */
+ bnx2x_vf_free_resc(bp, vf);
+
+ /* re-open the mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ vf->flr_clnup_stage = VF_FLR_ACK;
+ bnx2x_vfop_end(bp, vf, vfop);
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+}
+
+static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ vfop_handler_t done)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ vfop->args.qx.qid = -1; /* loop */
+ bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
+ bnx2x_vfop_flr, done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
+ }
+ return -ENOMEM;
+}
+
+static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
+{
+ int i = prev_vf ? prev_vf->index + 1 : 0;
+ struct bnx2x_virtf *vf;
+
+ /* find next VF to cleanup */
+next_vf_to_clean:
+ for (;
+ i < BNX2X_NR_VIRTFN(bp) &&
+ (bnx2x_vf(bp, i, state) != VF_RESET ||
+ bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
+ i++)
+ ;
+
+ DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. num of vfs: %d\n", i,
+ BNX2X_NR_VIRTFN(bp));
+
+ if (i < BNX2X_NR_VIRTFN(bp)) {
+ vf = BP_VF(bp, i);
+
+ /* lock the vf pf channel */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
+
+ /* invoke the VF FLR SM */
+ if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
+ BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
+ vf->abs_vfid);
+
+ /* mark the VF to be ACKED and continue */
+ vf->flr_clnup_stage = VF_FLR_ACK;
+ goto next_vf_to_clean;
+ }
+ return;
+ }
+
+ /* we are done, update vf records */
+ for_each_vf(bp, i) {
+ vf = BP_VF(bp, i);
+
+ if (vf->flr_clnup_stage != VF_FLR_ACK)
+ continue;
+
+ vf->flr_clnup_stage = VF_FLR_EPILOG;
+ }
+
+ /* Acknowledge the handled VFs.
+ * we are acknowledge all the vfs which an flr was requested for, even
+ * if amongst them there are such that we never opened, since the mcp
+ * will interrupt us immediately again if we only ack some of the bits,
+ * resulting in an endless loop. This can happen for example in KVM
+ * where an 'all ones' flr request is sometimes given by hyper visor
+ */
+ DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
+ bp->vfdb->flrd_vfs[i]);
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
+
+ /* clear the acked bits - better yet if the MCP implemented
+ * write to clear semantics
+ */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
+}
+
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
+{
+ int i;
+
+ /* Read FLR'd VFs */
+ for (i = 0; i < FLRD_VFS_DWORDS; i++)
+ bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
+
+ DP(BNX2X_MSG_MCP,
+ "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
+ bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
+
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+ u32 reset = 0;
+
+ if (vf->abs_vfid < 32)
+ reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
+ else
+ reset = bp->vfdb->flrd_vfs[1] &
+ (1 << (vf->abs_vfid - 32));
+
+ if (reset) {
+ /* set as reset and ready for cleanup */
+ vf->state = VF_RESET;
+ vf->flr_clnup_stage = VF_FLR_CLN;
+
+ DP(BNX2X_MSG_IOV,
+ "Initiating Final cleanup for VF %d\n",
+ vf->abs_vfid);
+ }
+ }
+
+ /* do the FLR cleanup for all marked VFs*/
+ bnx2x_vf_flr_clnup(bp, NULL);
+}
+
+/* IOV global initialization routines */
+void bnx2x_iov_init_dq(struct bnx2x *bp)
+{
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* Set the DQ such that the CID reflect the abs_vfid */
+ REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
+ REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
+
+ /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
+ * the PF L2 queues
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
+
+ /* The VF window size is the log2 of the max number of CIDs per VF */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
+
+ /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
+ * the Pf doorbell size although the 2 are independent.
+ */
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
+ BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+
+ /* No security checks for now -
+ * configure single rule (out of 16) mask = 0x1, value = 0x0,
+ * CID range 0 - 0x1ffff
+ */
+ REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
+ REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
+ REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
+
+ /* set the number of VF alllowed doorbells to the full DQ range */
+ REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
+
+ /* set the VF doorbell threshold */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
+}
+
+void bnx2x_iov_init_dmae(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
+ if (!IS_SRIOV(bp))
+ return;
+
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+}
+
+static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return dev->bus->number + ((dev->devfn + iov->offset +
+ iov->stride * vfid) >> 8);
+}
+
+static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
+{
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
+}
+
+static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i, n;
+ struct pci_dev *dev = bp->pdev;
+ struct bnx2x_sriov *iov = &bp->vfdb->sriov;
+
+ for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
+ u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
+ u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
+
+ size /= iov->total;
+ vf->bars[n].bar = start + size * vf->abs_vfid;
+ vf->bars[n].size = size;
+ }
+}
+
+static int bnx2x_ari_enabled(struct pci_dev *dev)
+{
+ return dev->bus->self && dev->bus->self->ari_enabled;
+}
+
+static void
+bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
+{
+ int sb_id;
+ u32 val;
+ u8 fid;
+
+ /* IGU in normal mode - read CAM */
+ for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+ continue;
+ fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
+ if (!(fid & IGU_FID_ENCODE_IS_PF))
+ bnx2x_vf_set_igu_info(bp, sb_id,
+ (fid & IGU_FID_VF_NUM_MASK));
+
+ DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
+ ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
+ ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
+ (fid & IGU_FID_VF_NUM_MASK)), sb_id,
+ GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
+ }
+}
+
+static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
+{
+ if (bp->vfdb) {
+ kfree(bp->vfdb->vfqs);
+ kfree(bp->vfdb->vfs);
+ kfree(bp->vfdb);
+ }
+ bp->vfdb = NULL;
+}
+
+static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ int pos;
+ struct pci_dev *dev = bp->pdev;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ BNX2X_ERR("failed to find SRIOV capability in device\n");
+ return -ENODEV;
+ }
+
+ iov->pos = pos;
+ DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
+ pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+ pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+ pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ return 0;
+}
+
+static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
+{
+ u32 val;
+
+ /* read the SRIOV capability structure
+ * The fields can be read via configuration read or
+ * directly from the device (starting at offset PCICFG_OFFSET)
+ */
+ if (bnx2x_sriov_pci_cfg_info(bp, iov))
+ return -ENODEV;
+
+ /* get the number of SRIOV bars */
+ iov->nres = 0;
+
+ /* read the first_vfid */
+ val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
+ iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
+ * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
+
+ DP(BNX2X_MSG_IOV,
+ "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ BP_FUNC(bp),
+ iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
+ iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ return 0;
+}
+
+static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
+{
+ int i;
+ u8 queue_count = 0;
+
+ if (IS_SRIOV(bp))
+ for_each_vf(bp, i)
+ queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
+
+ return queue_count;
+}
+
+/* must be called after PF bars are mapped */
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param)
+{
+ int err, i, qcount;
+ struct bnx2x_sriov *iov;
+ struct pci_dev *dev = bp->pdev;
+
+ bp->vfdb = NULL;
+
+ /* verify is pf */
+ if (IS_VF(bp))
+ return 0;
+
+ /* verify sriov capability is present in configuration space */
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
+ return 0;
+
+ /* verify chip revision */
+ if (CHIP_IS_E1x(bp))
+ return 0;
+
+ /* check if SRIOV support is turned off */
+ if (!num_vfs_param)
+ return 0;
+
+ /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
+ if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
+ BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
+ BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
+ return 0;
+ }
+
+ /* SRIOV can be enabled only with MSIX */
+ if (int_mode_param == BNX2X_INT_MODE_MSI ||
+ int_mode_param == BNX2X_INT_MODE_INTX)
+ BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+
+ err = -EIO;
+ /* verify ari is enabled */
+ if (!bnx2x_ari_enabled(bp->pdev)) {
+ BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
+ return err;
+ }
+
+ /* verify igu is in normal mode */
+ if (CHIP_INT_MODE_IS_BC(bp)) {
+ BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
+ return err;
+ }
+
+ /* allocate the vfs database */
+ bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
+ if (!bp->vfdb) {
+ BNX2X_ERR("failed to allocate vf database\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* get the sriov info - Linux already collected all the pertinent
+ * information, however the sriov structure is for the private use
+ * of the pci module. Also we want this information regardless
+ * of the hyper-visor.
+ */
+ iov = &(bp->vfdb->sriov);
+ err = bnx2x_sriov_info(bp, iov);
+ if (err)
+ goto failed;
+
+ /* SR-IOV capability was enabled but there are no VFs*/
+ if (iov->total == 0)
+ goto failed;
+
+ /* calculate the actual number of VFs */
+ iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+
+ /* allocate the vf array */
+ bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
+ BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
+ if (!bp->vfdb->vfs) {
+ BNX2X_ERR("failed to allocate vf array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
+ for_each_vf(bp, i) {
+ bnx2x_vf(bp, i, index) = i;
+ bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
+ bnx2x_vf(bp, i, state) = VF_FREE;
+ INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
+ mutex_init(&bnx2x_vf(bp, i, op_mutex));
+ bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
+ }
+
+ /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ /* get the total queue count and allocate the global queue arrays */
+ qcount = bnx2x_iov_get_max_queue_count(bp);
+
+ /* allocate the queue arrays for all VFs */
+ bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+ if (!bp->vfdb->vfqs) {
+ BNX2X_ERR("failed to allocate vf queue array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ return 0;
+failed:
+ DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
+ __bnx2x_iov_free_vfdb(bp);
+ return err;
+}
+
+void bnx2x_iov_remove_one(struct bnx2x *bp)
+{
+ /* if SRIOV is not enabled there's nothing to do */
+ if (!IS_SRIOV(bp))
+ return;
+
+ DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* free vf database */
+ __bnx2x_iov_free_vfdb(bp);
+}
+
+void bnx2x_iov_free_mem(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ /* free vfs hw contexts */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = &bp->vfdb->context[i];
+ BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
+ }
+
+ BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
+ BP_VFDB(bp)->sp_dma.mapping,
+ BP_VFDB(bp)->sp_dma.size);
+
+ BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
+ BP_VF_MBX_DMA(bp)->mapping,
+ BP_VF_MBX_DMA(bp)->size);
+
+ BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
+ BP_VF_BULLETIN_DMA(bp)->mapping,
+ BP_VF_BULLETIN_DMA(bp)->size);
+}
+
+int bnx2x_iov_alloc_mem(struct bnx2x *bp)
+{
+ size_t tot_size;
+ int i, rc = 0;
+
+ if (!IS_SRIOV(bp))
+ return rc;
+
+ /* allocate vfs hw contexts */
+ tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
+ BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
+
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
+ cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
+
+ if (cxt->size) {
+ BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+ } else {
+ cxt->addr = NULL;
+ cxt->mapping = 0;
+ }
+ tot_size -= cxt->size;
+ }
+
+ /* allocate vfs ramrods dma memory - client_init and set_mac */
+ tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
+ BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ BP_VFDB(bp)->sp_dma.size = tot_size;
+
+ /* allocate mailboxes */
+ tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
+ BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ BP_VF_MBX_DMA(bp)->size = tot_size;
+
+ /* allocate local bulletin boards */
+ tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
+ BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
+ &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+ BP_VF_BULLETIN_DMA(bp)->size = tot_size;
+
+ return 0;
+
+alloc_mem_err:
+ return -ENOMEM;
+}
+
+static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+ unsigned long q_type = 0;
+
+ set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+ set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Queue State object */
+ bnx2x_init_queue_obj(bp, &q->sp_obj,
+ cl_id, &q->cid, 1, func_id,
+ bnx2x_vf_sp(bp, vf, q_data),
+ bnx2x_vf_sp_map(bp, vf, q_data),
+ q_type);
+
+ DP(BNX2X_MSG_IOV,
+ "initialized vf %d's queue object. func id set to %d\n",
+ vf->abs_vfid, q->sp_obj.func_id);
+
+ /* mac/vlan objects are per queue, but only those
+ * that belong to the leading queue are initialized
+ */
+ if (vfq_is_leading(q)) {
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ }
+}
+
+/* called by bnx2x_nic_load */
+int bnx2x_iov_nic_init(struct bnx2x *bp)
+{
+ int vfid, qcount, i;
+
+ if (!IS_SRIOV(bp)) {
+ DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
+ return 0;
+ }
+
+ DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
+
+ /* initialize vf database */
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
+
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
+ vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
+ BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
+
+ /* init statically provisioned resources */
+ bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+
+ /* queues are initialized during VF-ACQUIRE */
+
+ /* reserve the vf vlan credit */
+ bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
+
+ vf->filter_state = 0;
+ vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+ /* init mcast object - This object will be re-initialized
+ * during VF-ACQUIRE with the proper cl_id and cid.
+ * It needs to be initialized here so that it can be safely
+ * handled by a subsequent FLR flow.
+ */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
+ 0xFF, 0xFF, 0xFF,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* set the mailbox message addresses */
+ BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
+ (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
+ MBX_MSG_ALIGNED_SIZE);
+
+ BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
+ vfid * MBX_MSG_ALIGNED_SIZE;
+
+ /* Enable vf mailbox */
+ bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
+ }
+
+ /* Final VF init */
+ qcount = 0;
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ /* fill in the BDF and bars */
+ vf->bus = bnx2x_vf_bus(bp, i);
+ vf->devfn = bnx2x_vf_devfn(bp, i);
+ bnx2x_vf_set_bars(bp, vf);
+
+ DP(BNX2X_MSG_IOV,
+ "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
+ vf->abs_vfid, vf->bus, vf->devfn,
+ (unsigned)vf->bars[0].bar, vf->bars[0].size,
+ (unsigned)vf->bars[1].bar, vf->bars[1].size,
+ (unsigned)vf->bars[2].bar, vf->bars[2].size);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
+ }
+
+ return 0;
+}
+
+/* called by bnx2x_chip_cleanup */
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return 0;
+
+ /* release all the VFs */
+ for_each_vf(bp, i)
+ bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
+
+ return 0;
+}
+
+/* called by bnx2x_init_hw_func, returns the next ilt line */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
+{
+ int i;
+ struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+ if (!IS_SRIOV(bp))
+ return line;
+
+ /* set vfs ilt lines */
+ for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
+ struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
+
+ ilt->lines[line+i].page = hw_cxt->addr;
+ ilt->lines[line+i].page_mapping = hw_cxt->mapping;
+ ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
+ }
+ return line + i;
+}
+
+static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
+{
+ return ((cid >= BNX2X_FIRST_VF_CID) &&
+ ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
+}
+
+static
+void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
+ struct bnx2x_vf_queue *vfq,
+ union event_ring_elem *elem)
+{
+ unsigned long ramrod_flags = 0;
+ int rc = 0;
+
+ /* Always push next commands out, don't wait here */
+ set_bit(RAMROD_CONT, &ramrod_flags);
+
+ switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+ case BNX2X_FILTER_MAC_PENDING:
+ rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
+ &ramrod_flags);
+ break;
+ case BNX2X_FILTER_VLAN_PENDING:
+ rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
+ &ramrod_flags);
+ break;
+ default:
+ BNX2X_ERR("Unsupported classification command: %d\n",
+ elem->message.data.eth_event.echo);
+ return;
+ }
+ if (rc < 0)
+ BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+ else if (rc > 0)
+ DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
+}
+
+static
+void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
+ int rc;
+
+ rparam.mcast_obj = &vf->mcast_obj;
+ vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
+
+ /* If there are pending mcast commands - send them */
+ if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
+ rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+ if (rc < 0)
+ BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+ rc);
+ }
+}
+
+static
+void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ smp_mb__after_clear_bit();
+}
+
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
+{
+ struct bnx2x_virtf *vf;
+ int qidx = 0, abs_vfid;
+ u8 opcode;
+ u16 cid = 0xffff;
+
+ if (!IS_SRIOV(bp))
+ return 1;
+
+ /* first get the cid - the only events we handle here are cfc-delete
+ * and set-mac completion
+ */
+ opcode = elem->message.opcode;
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ cid = SW_CID((__force __le32)
+ elem->message.data.cfc_del_event.cid);
+ DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ cid = (elem->message.data.eth_event.echo &
+ BNX2X_SWCID_MASK);
+ DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ abs_vfid = elem->message.data.vf_flr_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ abs_vfid = elem->message.data.malicious_vf_event.vf_id;
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
+ abs_vfid);
+ goto get_vf;
+ default:
+ return 1;
+ }
+
+ /* check if the cid is the VF range */
+ if (!bnx2x_iov_is_vf_cid(bp, cid)) {
+ DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
+ return 1;
+ }
+
+ /* extract vf and rxq index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
+ abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+get_vf:
+ vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+
+ if (!vf) {
+ BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
+ cid, abs_vfid);
+ return 0;
+ }
+
+ switch (opcode) {
+ case EVENT_RING_OPCODE_CFC_DEL:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
+ vf->abs_vfid, qidx);
+ vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
+ &vfq_get(vf,
+ qidx)->sp_obj,
+ BNX2X_Q_CMD_CFC_DEL);
+ break;
+ case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
+ break;
+ case EVENT_RING_OPCODE_MULTICAST_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_mcast_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_FILTERS_RULES:
+ DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
+ vf->abs_vfid, qidx);
+ bnx2x_vf_handle_filters_eqe(bp, vf);
+ break;
+ case EVENT_RING_OPCODE_VF_FLR:
+ DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
+ vf->abs_vfid);
+ /* Do nothing for now */
+ break;
+ case EVENT_RING_OPCODE_MALICIOUS_VF:
+ DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
+ vf->abs_vfid);
+ /* Do nothing for now */
+ break;
+ }
+ /* SRIOV: reschedule any 'in_progress' operations */
+ bnx2x_iov_sp_event(bp, cid, false);
+
+ return 0;
+}
+
+static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
+{
+ /* extract the vf from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
+ return bnx2x_vf_by_abs_fid(bp, abs_vfid);
+}
+
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj)
+{
+ struct bnx2x_virtf *vf;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+
+ if (vf) {
+ /* extract queue index from vf_cid - relies on the following:
+ * 1. vfid on cid reflects the true abs_vfid
+ * 2. the max number of VFs (per path) is 64
+ */
+ int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
+ *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
+ } else {
+ BNX2X_ERR("No vf matching cid %d\n", vf_cid);
+ }
+}
+
+void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
+{
+ struct bnx2x_virtf *vf;
+
+ /* check if the cid is the VF range */
+ if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
+ return;
+
+ vf = bnx2x_vf_by_cid(bp, vf_cid);
+ if (vf) {
+ /* set in_progress flag */
+ atomic_set(&vf->op_in_progress, 1);
+ if (queue_work)
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+ }
+}
+
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
+{
+ int i;
+ int first_queue_query_index, num_queues_req;
+ dma_addr_t cur_data_offset;
+ struct stats_query_entry *cur_query_entry;
+ u8 stats_count = 0;
+ bool is_fcoe = false;
+
+ if (!IS_SRIOV(bp))
+ return;
+
+ if (!NO_FCOE(bp))
+ is_fcoe = true;
+
+ /* fcoe adds one global request and one queue request */
+ num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
+ (is_fcoe ? 0 : 1);
+
+ DP(BNX2X_MSG_IOV,
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
+
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats) +
+ num_queues_req * sizeof(struct per_queue_stats);
+
+ cur_query_entry = &bp->fw_stats_req->
+ query[first_queue_query_index + num_queues_req];
+
+ for_each_vf(bp, i) {
+ int j;
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (vf->state != VF_ENABLED) {
+ DP(BNX2X_MSG_IOV,
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
+ continue;
+ }
+
+ DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+ for_each_vfq(vf, j) {
+ struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+
+ /* collect stats fro active queues only */
+ if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
+ BNX2X_Q_LOGICAL_STATE_STOPPED)
+ continue;
+
+ /* create stats query entry for this queue */
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->funcID =
+ cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(vf->fw_stat_map));
+ DP(BNX2X_MSG_IOV,
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo, cur_query_entry->funcID,
+ j, cur_query_entry->index);
+ cur_query_entry++;
+ cur_data_offset += sizeof(struct per_queue_stats);
+ stats_count++;
+ }
+ }
+ bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
+}
+
+void bnx2x_iov_sp_task(struct bnx2x *bp)
+{
+ int i;
+
+ if (!IS_SRIOV(bp))
+ return;
+ /* Iterate over all VFs and invoke state transition for VFs with
+ * 'in-progress' slow-path operations
+ */
+ DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
+ for_each_vf(bp, i) {
+ struct bnx2x_virtf *vf = BP_VF(bp, i);
+
+ if (!list_empty(&vf->op_list_head) &&
+ atomic_read(&vf->op_in_progress)) {
+ DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
+ bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
+ }
+ }
+}
+
+static inline
+struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
+{
+ int i;
+ struct bnx2x_virtf *vf = NULL;
+
+ for_each_vf(bp, i) {
+ vf = BP_VF(bp, i);
+ if (stat_id >= vf->igu_base_id &&
+ stat_id < vf->igu_base_id + vf_sb_count(vf))
+ break;
+ }
+ return vf;
+}
+
+/* VF API helpers */
+static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
+ u8 enable)
+{
+ u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
+ u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
+
+ REG_WR(bp, reg, val);
+}
+
+static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int i;
+
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), false);
+}
+
+static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ u32 val;
+
+ /* clear the VF configuration - pretend */
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
+ val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
+ val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
+ IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
+ REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+}
+
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
+ BNX2X_VF_MAX_QUEUES);
+}
+
+static
+int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *req_resc)
+{
+ u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
+
+ return ((req_resc->num_rxqs <= rxq_cnt) &&
+ (req_resc->num_txqs <= txq_cnt) &&
+ (req_resc->num_sbs <= vf_sb_count(vf)) &&
+ (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
+ (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
+}
+
+/* CORE VF API */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc)
+{
+ int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
+ BNX2X_CIDS_PER_VF;
+
+ union cdu_context *base_cxt = (union cdu_context *)
+ BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
+ (base_vf_cid & (ILT_PAGE_CIDS-1));
+ int i;
+
+ /* if state is 'acquired' the VF was not released or FLR'd, in
+ * this case the returned resources match the acquired already
+ * acquired resources. Verify that the requested numbers do
+ * not exceed the already acquired numbers.
+ */
+ if (vf->state == VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
+ vf->abs_vfid);
+
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
+ vf->abs_vfid);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ /* Otherwise vf state must be 'free' or 'reset' */
+ if (vf->state != VF_FREE && vf->state != VF_RESET) {
+ BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+
+ /* static allocation:
+ * the global maximum number are fixed per VF. fail the request if
+ * requested number exceed these globals
+ */
+ if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
+ DP(BNX2X_MSG_IOV,
+ "cannot fulfill vf resource request. Placing maximal available values in response\n");
+ /* set the max resource in the vf */
+ return -ENOMEM;
+ }
+
+ /* Set resources counters - 0 request means max available */
+ vf_sb_count(vf) = resc->num_sbs;
+ vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
+ if (resc->num_mac_filters)
+ vf_mac_rules_cnt(vf) = resc->num_mac_filters;
+ if (resc->num_vlan_filters)
+ vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
+
+ DP(BNX2X_MSG_IOV,
+ "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
+ vf_sb_count(vf), vf_rxq_count(vf),
+ vf_txq_count(vf), vf_mac_rules_cnt(vf),
+ vf_vlan_rules_cnt(vf));
+
+ /* Initialize the queues */
+ if (!vf->vfqs) {
+ DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
+ return -EINVAL;
+ }
+
+ for_each_vfq(vf, i) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, i);
+
+ if (!q) {
+ DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ return -EINVAL;
+ }
+
+ q->index = i;
+ q->cxt = &((base_cxt + i)->eth);
+ q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
+
+ DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
+ vf->abs_vfid, i, q->index, q->cid, q->cxt);
+
+ /* init SP objects */
+ bnx2x_vfq_init(bp, vf, q);
+ }
+ vf->state = VF_ACQUIRED;
+ return 0;
+}
+
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
+{
+ struct bnx2x_func_init_params func_init = {0};
+ u16 flags = 0;
+ int i;
+
+ /* the sb resources are initialized at this point, do the
+ * FW/HW initializations
+ */
+ for_each_vf_sb(vf, i)
+ bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
+ vf_igu_sb(vf, i), vf_igu_sb(vf, i));
+
+ /* Sanity checks */
+ if (vf->state != VF_ACQUIRED) {
+ DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
+ vf->abs_vfid, vf->state);
+ return -EINVAL;
+ }
+ /* FLR cleanup epilogue */
+ if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
+ return -EBUSY;
+
+ /* reset IGU VF statistics: MSIX */
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
+
+ /* vf init */
+ if (vf->cfg_flags & VF_CFG_STATS)
+ flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
+
+ if (vf->cfg_flags & VF_CFG_TPA)
+ flags |= FUNC_FLG_TPA;
+
+ if (is_vf_multi(vf))
+ flags |= FUNC_FLG_RSS;
+
+ /* function setup */
+ func_init.func_flgs = flags;
+ func_init.pf_id = BP_FUNC(bp);
+ func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
+ func_init.fw_stat_map = vf->fw_stat_map;
+ func_init.spq_map = vf->spq_map;
+ func_init.spq_prod = 0;
+ bnx2x_func_init(bp, &func_init);
+
+ /* Enable the vf */
+ bnx2x_vf_enable_access(bp, vf->abs_vfid);
+ bnx2x_vf_enable_traffic(bp, vf);
+
+ /* queue protection table */
+ for_each_vfq(vf, i)
+ bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
+ vfq_qzone_id(vf, vfq_get(vf, i)), true);
+
+ vf->state = VF_ENABLED;
+
+ /* update vf bulletin board */
+ bnx2x_post_vf_bulletin(bp, vf->index);
+
+ return 0;
+}
+
+/* VFOP close (teardown the queues, delete mcasts and close HW) */
+static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
+ enum bnx2x_vfop_close_state state = vfop->state;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_close,
+ .block = false,
+ };
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_CLOSE_QUEUES:
+
+ if (++(qx->qid) < vf_rxq_count(vf)) {
+ vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
+ if (vfop->rc)
+ goto op_err;
+ return;
+ }
+
+ /* remove multicasts */
+ vfop->state = BNX2X_VFOP_CLOSE_HW;
+ vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case BNX2X_VFOP_CLOSE_HW:
+
+ /* disable the interrupts */
+ DP(BNX2X_MSG_IOV, "disabling igu\n");
+ bnx2x_vf_igu_disable(bp, vf);
+
+ /* disable the VF */
+ DP(BNX2X_MSG_IOV, "clearing qtbl\n");
+ bnx2x_vf_clr_qtbl(bp, vf);
+
+ goto op_done;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ vf->state = VF_ACQUIRED;
+ DP(BNX2X_MSG_IOV, "set state to acquired\n");
+ bnx2x_vfop_end(bp, vf, vfop);
+}
+
+int bnx2x_vfop_close_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ vfop->args.qx.qid = -1; /* loop */
+ bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
+ bnx2x_vfop_close, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VF release can be called either: 1. the VF was acquired but
+ * not enabled 2. the vf was enabled or in the process of being
+ * enabled
+ */
+static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_release,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
+ vf->state == VF_FREE ? "Free" :
+ vf->state == VF_ACQUIRED ? "Acquired" :
+ vf->state == VF_ENABLED ? "Enabled" :
+ vf->state == VF_RESET ? "Reset" :
+ "Unknown");
+
+ switch (vf->state) {
+ case VF_ENABLED:
+ vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
+ if (vfop->rc)
+ goto op_err;
+ return;
+
+ case VF_ACQUIRED:
+ DP(BNX2X_MSG_IOV, "about to free resources\n");
+ bnx2x_vf_free_resc(bp, vf);
+ DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
+ goto op_done;
+
+ case VF_FREE:
+ case VF_RESET:
+ /* do nothing */
+ goto op_done;
+ default:
+ bnx2x_vfop_default(vf->state);
+ }
+op_err:
+ BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
+op_done:
+ bnx2x_vfop_end(bp, vf, vfop);
+}
+
+int bnx2x_vfop_release_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ bnx2x_vfop_opset(-1, /* use vf->state */
+ bnx2x_vfop_release, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+/* VF release ~ VF close + VF release-resources
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = NULL,
+ .block = block,
+ };
+ int rc;
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+
+ rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ if (rc)
+ WARN(rc,
+ "VF[%d] Failed to allocate resources for release op- rc=%d\n",
+ vf->abs_vfid, rc);
+}
+
+static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
+ struct bnx2x_virtf *vf, u32 *sbdf)
+{
+ *sbdf = vf->devfn | (vf->bus << 8);
+}
+
+static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_bar_info *bar_info)
+{
+ int n;
+
+ bar_info->nr_bars = bp->vfdb->sriov.nres;
+ for (n = 0; n < bar_info->nr_bars; n++)
+ bar_info->bars[n] = vf->bars[n];
+}
+
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv)
+{
+ /* lock the channel */
+ mutex_lock(&vf->op_mutex);
+
+ /* record the locking op */
+ vf->op_current = tlv;
+
+ /* log the lock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
+ vf->abs_vfid, tlv);
+}
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv)
+{
+ WARN(expected_tlv != vf->op_current,
+ "lock mismatch: expected %d found %d", expected_tlv,
+ vf->op_current);
+
+ /* lock the channel */
+ mutex_unlock(&vf->op_mutex);
+
+ /* log the unlock */
+ DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
+ vf->abs_vfid, vf->op_current);
+
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+}
+
+void bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0;
+
+ /* disbale sriov in case it is still enabled */
+ pci_disable_sriov(bp->pdev);
+ DP(BNX2X_MSG_IOV, "sriov disabled\n");
+
+ /* enable sriov */
+ DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
+ rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ if (rc)
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ else
+ DP(BNX2X_MSG_IOV, "sriov enabled\n");
+}
+
+/* New mac for VF. Consider these cases:
+ * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
+ * supply at acquire.
+ * 2. VF has already been acquired but has not yet initialized - store in local
+ * bulletin board. mac will be posted on VF bulletin board after VF init. VF
+ * will configure this mac when it is ready.
+ * 3. VF has already initialized but has not yet setup a queue - post the new
+ * mac on VF's bulletin board right now. VF will configure this mac when it
+ * is ready.
+ * 4. VF has already set a queue - delete any macs already configured for this
+ * queue and manually config the new mac.
+ * In any event, once this function has been called refuse any attempts by the
+ * VF to configure any mac for itself except for this mac. In case of a race
+ * where the VF fails to see the new post on its bulletin board before sending a
+ * mac configuration request, the PF will simply fail the request and VF can try
+ * again after consulting its bulletin board
+ */
+int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state, vfidx = queue;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* if SRIOV is disabled there is nothing to do (and somewhere, someone
+ * has erred).
+ */
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ BNX2X_ERR("mac address invalid\n");
+ return -EINVAL;
+ }
+
+ /* update PF's copy of the VF's bulletin. will no longer accept mac
+ * configuration requests from vf unless match this mac
+ */
+ bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
+ memcpy(bulletin->mac, mac, ETH_ALEN);
+
+ /* Post update on VF's bulletin board */
+ rc = bnx2x_post_vf_bulletin(bp, vfidx);
+ if (rc) {
+ BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
+ return rc;
+ }
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the mac in device on this vf's queue */
+ unsigned long flags = 0;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+
+ /* remove existing eth macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete eth macs\n");
+ return -EINVAL;
+ }
+
+ /* remove existing uc list macs */
+ rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
+ if (rc) {
+ BNX2X_ERR("failed to delete uc_list macs\n");
+ return -EINVAL;
+ }
+
+ /* configure the new mac to device */
+ __set_bit(RAMROD_COMP_WAIT, &flags);
+ bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
+ BNX2X_ETH_MAC, &flags);
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
+ }
+
+ return rc;
+}
+
+/* crc is the first field in the bulletin board. compute the crc over the
+ * entire bulletin board excluding the crc field itself
+ */
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin)
+{
+ return crc32(BULLETIN_CRC_SEED,
+ ((u8 *)bulletin) + sizeof(bulletin->crc),
+ bulletin->length - sizeof(bulletin->crc));
+}
+
+/* Check for new posts on the bulletin board */
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+ int attempts;
+
+ /* bulletin board hasn't changed since last sample */
+ if (bp->old_bulletin.version == bulletin.version)
+ return PFVF_BULLETIN_UNCHANGED;
+
+ /* validate crc of new bulletin board */
+ if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
+ /* sampling structure in mid post may result with corrupted data
+ * validate crc to ensure coherency.
+ */
+ for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+ bulletin = bp->pf2vf_bulletin->content;
+ if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
+ &bulletin))
+ break;
+ BNX2X_ERR("bad crc on bulletin board. contained %x computed %x\n",
+ bulletin.crc,
+ bnx2x_crc_vf_bulletin(bp, &bulletin));
+ }
+ if (attempts >= BULLETIN_ATTEMPTS) {
+ BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+ attempts);
+ return PFVF_BULLETIN_CRC_ERR;
+ }
+ }
+
+ /* the mac address in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
+ memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
+ /* update new mac to net device */
+ memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ }
+
+ /* copy new bulletin board to bp */
+ bp->old_bulletin = bulletin;
+
+ return PFVF_BULLETIN_UPDATED;
+}
+
+void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+{
+ /* vf doorbells are embedded within the regview */
+ bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+}
+
+int bnx2x_vf_pci_alloc(struct bnx2x *bp)
+{
+ /* allocate vf2pf mailbox for vf to pf channel */
+ BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* allocate pf 2 vf bulletin board */
+ BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+
+ return 0;
+
+alloc_mem_err:
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ sizeof(union pf_vf_bulletin));
+ return -ENOMEM;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
new file mode 100644
index 00000000000..b4050173add
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -0,0 +1,809 @@
+/* bnx2x_sriov.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ */
+#ifndef BNX2X_SRIOV_H
+#define BNX2X_SRIOV_H
+
+#include "bnx2x_vfpf.h"
+#include "bnx2x.h"
+
+enum sample_bulletin_result {
+ PFVF_BULLETIN_UNCHANGED,
+ PFVF_BULLETIN_UPDATED,
+ PFVF_BULLETIN_CRC_ERR
+};
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* The bnx2x device structure holds vfdb structure described below.
+ * The VF array is indexed by the relative vfid.
+ */
+#define BNX2X_VF_MAX_QUEUES 16
+#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
+
+struct bnx2x_sriov {
+ u32 first_vf_in_pf;
+
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total; /* total VFs associated with the PF */
+ u16 initial; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+};
+
+/* bars */
+struct bnx2x_vf_bar {
+ u64 bar;
+ u32 size;
+};
+
+struct bnx2x_vf_bar_info {
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+ u8 nr_bars;
+};
+
+/* vf queue (used both for rx or tx) */
+struct bnx2x_vf_queue {
+ struct eth_context *cxt;
+
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* VLANs object */
+ struct bnx2x_vlan_mac_obj vlan_obj;
+ atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+
+ /* Queue Slow-path State object */
+ struct bnx2x_queue_sp_obj sp_obj;
+
+ u32 cid;
+ u16 index;
+ u16 sb_idx;
+};
+
+/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
+ * q-init, q-setup and SB index
+ */
+struct bnx2x_vfop_qctor_params {
+ struct bnx2x_queue_state_params qstate;
+ struct bnx2x_queue_setup_params prep_qsetup;
+};
+
+/* VFOP parameters (one copy per VF) */
+union bnx2x_vfop_params {
+ struct bnx2x_vlan_mac_ramrod_params vlan_mac;
+ struct bnx2x_rx_mode_ramrod_params rx_mode;
+ struct bnx2x_mcast_ramrod_params mcast;
+ struct bnx2x_config_rss_params rss;
+ struct bnx2x_vfop_qctor_params qctor;
+};
+
+/* forward */
+struct bnx2x_virtf;
+
+/* VFOP definitions */
+typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+struct bnx2x_vfop_cmd {
+ vfop_handler_t done;
+ bool block;
+};
+
+/* VFOP queue filters command additional arguments */
+struct bnx2x_vfop_filter {
+ struct list_head link;
+ int type;
+#define BNX2X_VFOP_FILTER_MAC 1
+#define BNX2X_VFOP_FILTER_VLAN 2
+
+ bool add;
+ u8 *mac;
+ u16 vid;
+};
+
+struct bnx2x_vfop_filters {
+ int add_cnt;
+ struct list_head head;
+ struct bnx2x_vfop_filter filters[];
+};
+
+/* transient list allocated, built and saved until its
+ * passed to the SP-VERBs layer.
+ */
+struct bnx2x_vfop_args_mcast {
+ int mc_num;
+ struct bnx2x_mcast_list_elem *mc;
+};
+
+struct bnx2x_vfop_args_qctor {
+ int qid;
+ u16 sb_idx;
+};
+
+struct bnx2x_vfop_args_qdtor {
+ int qid;
+ struct eth_context *cxt;
+};
+
+struct bnx2x_vfop_args_defvlan {
+ int qid;
+ bool enable;
+ u16 vid;
+ u8 prio;
+};
+
+struct bnx2x_vfop_args_qx {
+ int qid;
+ bool en_add;
+};
+
+struct bnx2x_vfop_args_filters {
+ struct bnx2x_vfop_filters *multi_filter;
+ atomic_t *credit; /* non NULL means 'don't consume credit' */
+};
+
+union bnx2x_vfop_args {
+ struct bnx2x_vfop_args_mcast mc_list;
+ struct bnx2x_vfop_args_qctor qctor;
+ struct bnx2x_vfop_args_qdtor qdtor;
+ struct bnx2x_vfop_args_defvlan defvlan;
+ struct bnx2x_vfop_args_qx qx;
+ struct bnx2x_vfop_args_filters filters;
+};
+
+struct bnx2x_vfop {
+ struct list_head link;
+ int rc; /* return code */
+ int state; /* next state */
+ union bnx2x_vfop_args args; /* extra arguments */
+ union bnx2x_vfop_params *op_p; /* ramrod params */
+
+ /* state machine callbacks */
+ vfop_handler_t transition;
+ vfop_handler_t done;
+};
+
+/* vf context */
+struct bnx2x_virtf {
+ u16 cfg_flags;
+#define VF_CFG_STATS 0x0001
+#define VF_CFG_FW_FC 0x0002
+#define VF_CFG_TPA 0x0004
+#define VF_CFG_INT_SIMD 0x0008
+#define VF_CACHE_LINE 0x0010
+
+ u8 state;
+#define VF_FREE 0 /* VF ready to be acquired holds no resc */
+#define VF_ACQUIRED 1 /* VF aquired, but not initalized */
+#define VF_ENABLED 2 /* VF Enabled */
+#define VF_RESET 3 /* VF FLR'd, pending cleanup */
+
+ /* non 0 during flr cleanup */
+ u8 flr_clnup_stage;
+#define VF_FLR_CLN 1 /* reclaim resources and do 'final cleanup'
+ * sans the end-wait
+ */
+#define VF_FLR_ACK 2 /* ACK flr notification */
+#define VF_FLR_EPILOG 3 /* wait for VF remnants to dissipate in the HW
+ * ~ final cleanup' end wait
+ */
+
+ /* dma */
+ dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ dma_addr_t spq_map;
+ dma_addr_t bulletin_map;
+
+ /* Allocated resources counters. Before the VF is acquired, the
+ * counters hold the following values:
+ *
+ * - xxq_count = 0 as the queues memory is not allocated yet.
+ *
+ * - sb_count = The number of status blocks configured for this VF in
+ * the IGU CAM. Initially read during probe.
+ *
+ * - xx_rules_count = The number of rules statically and equally
+ * allocated for each VF, during PF load.
+ */
+ struct vf_pf_resc_request alloc_resc;
+#define vf_rxq_count(vf) ((vf)->alloc_resc.num_rxqs)
+#define vf_txq_count(vf) ((vf)->alloc_resc.num_txqs)
+#define vf_sb_count(vf) ((vf)->alloc_resc.num_sbs)
+#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
+#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
+#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
+
+ u8 sb_count; /* actual number of SBs */
+ u8 igu_base_id; /* base igu status block id */
+
+ struct bnx2x_vf_queue *vfqs;
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+
+ u8 index; /* index in the vf array */
+ u8 abs_vfid;
+ u8 sp_cl_id;
+ u32 error; /* 0 means all's-well */
+
+ /* BDF */
+ unsigned int bus;
+ unsigned int devfn;
+
+ /* bars */
+ struct bnx2x_vf_bar bars[PCI_SRIOV_NUM_BARS];
+
+ /* set-mac ramrod state 1-pending, 0-done */
+ unsigned long filter_state;
+
+ /* leading rss client id ~~ the client id of the first rxq, must be
+ * set for each txq.
+ */
+ int leading_rss;
+
+ /* MCAST object */
+ struct bnx2x_mcast_obj mcast_obj;
+
+ /* RSS configuration object */
+ struct bnx2x_rss_config_obj rss_conf_obj;
+
+ /* slow-path operations */
+ atomic_t op_in_progress;
+ int op_rc;
+ bool op_wait_blocking;
+ struct list_head op_list_head;
+ union bnx2x_vfop_params op_params;
+ struct mutex op_mutex; /* one vfop at a time mutex */
+ enum channel_tlvs op_current;
+};
+
+#define BNX2X_NR_VIRTFN(bp) ((bp)->vfdb->sriov.nr_virtfn)
+
+#define for_each_vf(bp, var) \
+ for ((var) = 0; (var) < BNX2X_NR_VIRTFN(bp); (var)++)
+
+#define for_each_vfq(vf, var) \
+ for ((var) = 0; (var) < vf_rxq_count(vf); (var)++)
+
+#define for_each_vf_sb(vf, var) \
+ for ((var) = 0; (var) < vf_sb_count(vf); (var)++)
+
+#define is_vf_multi(vf) (vf_rxq_count(vf) > 1)
+
+#define HW_VF_HANDLE(bp, abs_vfid) \
+ (u16)(BP_ABS_FUNC((bp)) | (1<<3) | ((u16)(abs_vfid) << 4))
+
+#define FW_PF_MAX_HANDLE 8
+
+#define FW_VF_HANDLE(abs_vfid) \
+ (abs_vfid + FW_PF_MAX_HANDLE)
+
+/* locking and unlocking the channel mutex */
+void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs tlv);
+
+void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ enum channel_tlvs expected_tlv);
+
+/* VF mail box (aka vf-pf channel) */
+
+/* a container for the bi-directional vf<-->pf messages.
+ * The actual response will be placed according to the offset parameter
+ * provided in the request
+ */
+
+#define MBX_MSG_ALIGN 8
+#define MBX_MSG_ALIGNED_SIZE (roundup(sizeof(struct bnx2x_vf_mbx_msg), \
+ MBX_MSG_ALIGN))
+
+struct bnx2x_vf_mbx_msg {
+ union vfpf_tlvs req;
+ union pfvf_tlvs resp;
+};
+
+struct bnx2x_vf_mbx {
+ struct bnx2x_vf_mbx_msg *msg;
+ dma_addr_t msg_mapping;
+
+ /* VF GPA address */
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
+
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
+
+ u8 flags;
+#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
+ * more then one pending msg
+ */
+};
+
+struct bnx2x_vf_sp {
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } mac_rdata;
+
+ union {
+ struct eth_classify_rules_ramrod_data e2;
+ } vlan_rdata;
+
+ union {
+ struct eth_filter_rules_ramrod_data e2;
+ } rx_mode_rdata;
+
+ union {
+ struct eth_multicast_rules_ramrod_data e2;
+ } mcast_rdata;
+
+ union {
+ struct client_init_ramrod_data init_data;
+ struct client_update_ramrod_data update_data;
+ } q_data;
+};
+
+struct hw_dma {
+ void *addr;
+ dma_addr_t mapping;
+ size_t size;
+};
+
+struct bnx2x_vfdb {
+#define BP_VFDB(bp) ((bp)->vfdb)
+ /* vf array */
+ struct bnx2x_virtf *vfs;
+#define BP_VF(bp, idx) (&((bp)->vfdb->vfs[(idx)]))
+#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var)
+
+ /* queue array - for all vfs */
+ struct bnx2x_vf_queue *vfqs;
+
+ /* vf HW contexts */
+ struct hw_dma context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
+#define BP_VF_CXT_PAGE(bp, i) (&(bp)->vfdb->context[(i)])
+
+ /* SR-IOV information */
+ struct bnx2x_sriov sriov;
+ struct hw_dma mbx_dma;
+#define BP_VF_MBX_DMA(bp) (&((bp)->vfdb->mbx_dma))
+ struct bnx2x_vf_mbx mbxs[BNX2X_MAX_NUM_OF_VFS];
+#define BP_VF_MBX(bp, vfid) (&((bp)->vfdb->mbxs[(vfid)]))
+
+ struct hw_dma bulletin_dma;
+#define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
+#define BP_VF_BULLETIN(bp, vf) \
+ (((struct pf_vf_bulletin_content *)(BP_VF_BULLETIN_DMA(bp)->addr)) \
+ + (vf))
+
+ struct hw_dma sp_dma;
+#define bnx2x_vf_sp(bp, vf, field) ((bp)->vfdb->sp_dma.addr + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+#define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \
+ (vf)->index * sizeof(struct bnx2x_vf_sp) + \
+ offsetof(struct bnx2x_vf_sp, field))
+
+#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
+ u32 flrd_vfs[FLRD_VFS_DWORDS];
+};
+
+/* queue access */
+static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
+{
+ return &(vf->vfqs[index]);
+}
+
+static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
+{
+ return (vfq->index == 0);
+}
+
+/* FW ids */
+static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf->igu_base_id + sb_idx;
+}
+
+static inline u8 vf_hc_qzone(struct bnx2x_virtf *vf, u16 sb_idx)
+{
+ return vf_igu_sb(vf, sb_idx);
+}
+
+static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vf->igu_base_id + q->index;
+}
+
+static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
+{
+ return vfq_cl_id(vf, q);
+}
+
+/* global iov routines */
+int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line);
+int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, int num_vfs_param);
+void bnx2x_iov_remove_one(struct bnx2x *bp);
+void bnx2x_iov_free_mem(struct bnx2x *bp);
+int bnx2x_iov_alloc_mem(struct bnx2x *bp);
+int bnx2x_iov_nic_init(struct bnx2x *bp);
+int bnx2x_iov_chip_cleanup(struct bnx2x *bp);
+void bnx2x_iov_init_dq(struct bnx2x *bp);
+void bnx2x_iov_init_dmae(struct bnx2x *bp);
+void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj);
+void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work);
+int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem);
+void bnx2x_iov_adjust_stats_req(struct bnx2x *bp);
+void bnx2x_iov_storm_stats_update(struct bnx2x *bp);
+void bnx2x_iov_sp_task(struct bnx2x *bp);
+/* global vf mailbox routines */
+void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event);
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid);
+
+/* CORE VF API */
+typedef u8 bnx2x_mac_addr_t[ETH_ALEN];
+
+/* acquire */
+int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct vf_pf_resc_request *resc);
+/* init */
+int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ dma_addr_t *sb_map);
+
+/* VFOP generic helpers */
+#define bnx2x_vfop_default(state) do { \
+ BNX2X_ERR("Bad state %d\n", (state)); \
+ vfop->rc = -EINVAL; \
+ goto op_err; \
+ } while (0)
+
+enum {
+ VFOP_DONE,
+ VFOP_CONT,
+ VFOP_VERIFY_PEND,
+};
+
+#define bnx2x_vfop_finalize(vf, rc, next) do { \
+ if ((rc) < 0) \
+ goto op_err; \
+ else if ((rc) > 0) \
+ goto op_pending; \
+ else if ((next) == VFOP_DONE) \
+ goto op_done; \
+ else if ((next) == VFOP_VERIFY_PEND) \
+ BNX2X_ERR("expected pending\n"); \
+ else { \
+ DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \
+ atomic_set(&vf->op_in_progress, 1); \
+ queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
+ return; \
+ } \
+ } while (0)
+
+#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
+ do { \
+ vfop->state = first_state; \
+ vfop->op_p = &vf->op_params; \
+ vfop->transition = trans_hndlr; \
+ vfop->done = done_hndlr; \
+ } while (0)
+
+static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
+ WARN_ON(list_empty(&vf->op_list_head));
+ return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
+}
+
+static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
+
+ WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
+ if (vfop) {
+ INIT_LIST_HEAD(&vfop->link);
+ list_add(&vfop->link, &vf->op_list_head);
+ }
+ return vfop;
+}
+
+static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vfop *vfop)
+{
+ /* rc < 0 - error, otherwise set to 0 */
+ DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
+ if (vfop->rc >= 0)
+ vfop->rc = 0;
+ DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
+
+ /* unlink the current op context and propagate error code
+ * must be done before invoking the 'done()' handler
+ */
+ WARN(!mutex_is_locked(&vf->op_mutex),
+ "about to access vf op linked list but mutex was not locked!");
+ list_del(&vfop->link);
+
+ if (list_empty(&vf->op_list_head)) {
+ DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
+ vf->op_rc = vfop->rc;
+ DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+ } else {
+ struct bnx2x_vfop *cur_vfop;
+
+ DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
+ cur_vfop = bnx2x_vfop_cur(bp, vf);
+ cur_vfop->rc = vfop->rc;
+ DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+ }
+
+ /* invoke done handler */
+ if (vfop->done) {
+ DP(BNX2X_MSG_IOV, "calling done handler\n");
+ vfop->done(bp, vf);
+ } else {
+ /* there is no done handler for the operation to unlock
+ * the mutex. Must have gotten here from PF initiated VF RELEASE
+ */
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
+ }
+
+ DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
+ vf->op_rc, vfop->rc);
+
+ /* if this is the last nested op reset the wait_blocking flag
+ * to release any blocking wrappers, only after 'done()' is invoked
+ */
+ if (list_empty(&vf->op_list_head)) {
+ DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
+ vf->op_wait_blocking = false;
+ }
+
+ kfree(vfop);
+}
+
+static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
+ struct bnx2x_virtf *vf)
+{
+ /* can take a while if any port is running */
+ int cnt = 5000;
+
+ might_sleep();
+ while (cnt--) {
+ if (vf->op_wait_blocking == false) {
+#ifdef BNX2X_STOP_ON_ERROR
+ DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
+#endif
+ return 0;
+ }
+ usleep_range(1000, 2000);
+
+ if (bp->panic)
+ return -EIO;
+ }
+
+ /* timeout! */
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+
+ return -EBUSY;
+}
+
+static inline int bnx2x_vfop_transition(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ vfop_handler_t transition,
+ bool block)
+{
+ if (block)
+ vf->op_wait_blocking = true;
+ transition(bp, vf);
+ if (block)
+ return bnx2x_vfop_wait_blocking(bp, vf);
+ return 0;
+}
+
+/* VFOP queue construction helpers */
+void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_queue_init_params *init_params,
+ struct bnx2x_queue_setup_params *setup_params,
+ u16 q_idx, u16 sb_idx);
+
+void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q,
+ struct bnx2x_vfop_qctor_params *p,
+ unsigned long q_type);
+int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *macs,
+ int qid, bool drv_only);
+
+int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, u16 vid, bool add);
+
+int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct bnx2x_vfop_filters *vlans,
+ int qid, bool drv_only);
+
+int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid);
+
+int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid);
+
+int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ bnx2x_mac_addr_t *mcasts,
+ int mcast_num, bool drv_only);
+
+int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ int qid, unsigned long accept_flags);
+
+int bnx2x_vfop_close_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
+int bnx2x_vfop_release_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
+/* VF release ~ VF close + VF release-resources
+ *
+ * Release is the ultimate SW shutdown and is called whenever an
+ * irrecoverable error is encountered.
+ */
+void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block);
+int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
+u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
+
+/* FLR routines */
+
+/* VF FLR helpers */
+int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid);
+void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid);
+
+/* Handles an FLR (or VF_DISABLE) notification form the MCP */
+void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
+
+void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
+ u16 length);
+void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length);
+void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
+
+bool bnx2x_tlv_supported(u16 tlvtype);
+
+u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
+ struct pf_vf_bulletin_content *bulletin);
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+
+/* VF side vfpf channel functions */
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_release(struct bnx2x *bp);
+int bnx2x_vfpf_init(struct bnx2x *bp);
+void bnx2x_vfpf_close_vf(struct bnx2x *bp);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
+int bnx2x_vfpf_set_mac(struct bnx2x *bp);
+int bnx2x_vfpf_set_mcast(struct net_device *dev);
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
+
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len)
+{
+ strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+}
+
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp)
+{
+ return PXP_VF_ADDR_USDM_QUEUES_START +
+ bp->acquire_resp.resc.hw_qid[fp->index] *
+ sizeof(struct ustorm_queue_zone_data);
+}
+
+enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
+void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+int bnx2x_vf_pci_alloc(struct bnx2x *bp);
+void bnx2x_enable_sriov(struct bnx2x *bp);
+static inline int bnx2x_vf_headroom(struct bnx2x *bp)
+{
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+}
+
+#else /* CONFIG_BNX2X_SRIOV */
+
+static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
+ struct bnx2x_queue_sp_obj **q_obj) {}
+static inline void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid,
+ bool queue_work) {}
+static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
+static inline int bnx2x_iov_eq_sp_event(struct bnx2x *bp,
+ union event_ring_elem *elem) {return 1; }
+static inline void bnx2x_iov_sp_task(struct bnx2x *bp) {}
+static inline void bnx2x_vf_mbx(struct bnx2x *bp,
+ struct vf_pf_event_data *vfpf_event) {}
+static inline int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line) {return line; }
+static inline void bnx2x_iov_init_dq(struct bnx2x *bp) {}
+static inline int bnx2x_iov_alloc_mem(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_free_mem(struct bnx2x *bp) {}
+static inline int bnx2x_iov_chip_cleanup(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
+static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
+ int num_vfs_param) {return 0; }
+static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
+static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
+ u8 tx_count, u8 rx_count) {return 0; }
+static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
+static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
+static inline int bnx2x_vfpf_set_mac(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
+static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_headroom(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) {}
+static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
+ size_t buf_len) {}
+static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp) {return 0; }
+static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
+{
+ return PFVF_BULLETIN_UNCHANGED;
+}
+
+static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 89ec0667140..4397f8b76f2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
/* bnx2x_stats.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,7 +19,7 @@
#include "bnx2x_stats.h"
#include "bnx2x_cmn.h"
-
+#include "bnx2x_sriov.h"
/* Statistics */
@@ -79,6 +79,42 @@ static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
* Init service functions
*/
+static void bnx2x_dp_stats(struct bnx2x *bp)
+{
+ int i;
+
+ DP(BNX2X_MSG_STATS, "dumping stats:\n"
+ "fw_stats_req\n"
+ " hdr\n"
+ " cmd_num %d\n"
+ " reserved0 %d\n"
+ " drv_stats_counter %d\n"
+ " reserved1 %d\n"
+ " stats_counters_addrs %x %x\n",
+ bp->fw_stats_req->hdr.cmd_num,
+ bp->fw_stats_req->hdr.reserved0,
+ bp->fw_stats_req->hdr.drv_stats_counter,
+ bp->fw_stats_req->hdr.reserved1,
+ bp->fw_stats_req->hdr.stats_counters_addrs.hi,
+ bp->fw_stats_req->hdr.stats_counters_addrs.lo);
+
+ for (i = 0; i < bp->fw_stats_req->hdr.cmd_num; i++) {
+ DP(BNX2X_MSG_STATS,
+ "query[%d]\n"
+ " kind %d\n"
+ " index %d\n"
+ " funcID %d\n"
+ " reserved %d\n"
+ " address %x %x\n",
+ i, bp->fw_stats_req->query[i].kind,
+ bp->fw_stats_req->query[i].index,
+ bp->fw_stats_req->query[i].funcID,
+ bp->fw_stats_req->query[i].reserved,
+ bp->fw_stats_req->query[i].address.hi,
+ bp->fw_stats_req->query[i].address.lo);
+ }
+}
+
/* Post the next statistics ramrod. Protect it with the spin in
* order to ensure the strict order between statistics ramrods
* (each ramrod has a sequence number passed in a
@@ -103,7 +139,9 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
bp->fw_stats_req->hdr.drv_stats_counter);
-
+ /* adjust the ramrod to include VF queues statistics */
+ bnx2x_iov_adjust_stats_req(bp);
+ bnx2x_dp_stats(bp);
/* send FW stats ramrod */
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
@@ -174,7 +212,7 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
break;
}
cnt--;
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
}
return 1;
}
@@ -482,6 +520,12 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
static void bnx2x_stats_start(struct bnx2x *bp)
{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
+
if (bp->port.pmf)
bnx2x_port_stats_init(bp);
@@ -501,6 +545,11 @@ static void bnx2x_stats_pmf_start(struct bnx2x *bp)
static void bnx2x_stats_restart(struct bnx2x *bp)
{
+ /* vfs travel through here as part of the statistics FSM, but no action
+ * is required
+ */
+ if (IS_VF(bp))
+ return;
bnx2x_stats_comp(bp);
bnx2x_stats_start(bp);
}
@@ -832,19 +881,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
return 0;
}
-static int bnx2x_storm_stats_update(struct bnx2x *bp)
+static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
{
- struct tstorm_per_port_stats *tport =
- &bp->fw_stats_data->port.tstorm_port_statistics;
- struct tstorm_per_pf_stats *tfunc =
- &bp->fw_stats_data->pf.tstorm_pf_statistics;
- struct host_func_stats *fstats = &bp->func_stats;
- struct bnx2x_eth_stats *estats = &bp->eth_stats;
- struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
- int i;
u16 cur_stats_counter;
-
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
@@ -880,6 +920,23 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
le16_to_cpu(counters->tstats_counter), bp->stats_counter);
return -EAGAIN;
}
+ return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+ struct tstorm_per_port_stats *tport =
+ &bp->fw_stats_data->port.tstorm_port_statistics;
+ struct tstorm_per_pf_stats *tfunc =
+ &bp->fw_stats_data->pf.tstorm_pf_statistics;
+ struct host_func_stats *fstats = &bp->func_stats;
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
+ int i;
+
+ /* vfs stat counter is managed by pf */
+ if (IS_PF(bp) && bnx2x_storm_stats_validate_counters(bp))
+ return -EAGAIN;
estats->error_bytes_received_hi = 0;
estats->error_bytes_received_lo = 0;
@@ -953,8 +1010,8 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
total_broadcast_packets_received);
UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
- etherstatsoverrsizepkts);
- UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
+ etherstatsoverrsizepkts, 32);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard, 16);
SUB_EXTEND_USTAT(ucast_no_buff_pkts,
total_unicast_packets_received);
@@ -1033,15 +1090,15 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->total_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
- ADD_64(estats->total_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->total_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->total_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->total_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
- ADD_64(estats->error_bytes_received_hi,
- le32_to_cpu(tfunc->rcv_error_bytes.hi),
- estats->error_bytes_received_lo,
- le32_to_cpu(tfunc->rcv_error_bytes.lo));
+ ADD_64_LE(estats->error_bytes_received_hi,
+ tfunc->rcv_error_bytes.hi,
+ estats->error_bytes_received_lo,
+ tfunc->rcv_error_bytes.lo);
UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
@@ -1174,23 +1231,34 @@ static void bnx2x_stats_update(struct bnx2x *bp)
if (bnx2x_edebug_stats_stopped(bp))
return;
- if (*stats_comp != DMAE_COMP_VAL)
- return;
+ if (IS_PF(bp)) {
+ if (*stats_comp != DMAE_COMP_VAL)
+ return;
- if (bp->port.pmf)
- bnx2x_hw_stats_update(bp);
+ if (bp->port.pmf)
+ bnx2x_hw_stats_update(bp);
- if (bnx2x_storm_stats_update(bp)) {
- if (bp->stats_pending++ == 3) {
- BNX2X_ERR("storm stats were not updated for 3 times\n");
- bnx2x_panic();
+ if (bnx2x_storm_stats_update(bp)) {
+ if (bp->stats_pending++ == 3) {
+ BNX2X_ERR("storm stats were not updated for 3 times\n");
+ bnx2x_panic();
+ }
+ return;
}
- return;
+ } else {
+ /* vf doesn't collect HW statistics, and doesn't get completions
+ * perform only update
+ */
+ bnx2x_storm_stats_update(bp);
}
bnx2x_net_stats_update(bp);
bnx2x_drv_stats_update(bp);
+ /* vf is done */
+ if (IS_VF(bp))
+ return;
+
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index b4d7b26c7fe..364e37ecbc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2012 Broadcom Corporation
+ * Copyright (c) 2007-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -421,16 +421,19 @@ struct bnx2x_fw_port_stats_old {
new->s); \
} while (0)
-#define UPDATE_EXTEND_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT_X(s, t, size) \
do { \
- diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+ diff = le##size##_to_cpu(tclient->s) - \
+ le##size##_to_cpu(old_tclient->s); \
old_tclient->s = tclient->s; \
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
-#define UPDATE_EXTEND_E_TSTAT(s, t) \
+#define UPDATE_EXTEND_TSTAT(s, t) UPDATE_EXTEND_TSTAT_X(s, t, 32)
+
+#define UPDATE_EXTEND_E_TSTAT(s, t, size) \
do { \
- UPDATE_EXTEND_TSTAT(s, t); \
+ UPDATE_EXTEND_TSTAT_X(s, t, size); \
ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
} while (0)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
new file mode 100644
index 00000000000..36246129864
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -0,0 +1,1651 @@
+/* bnx2x_vfpf.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Shmulik Ravid <shmulikr@broadcom.com>
+ * Ariel Elior <ariele@broadcom.com>
+ */
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include <linux/crc32.h>
+
+/* place a given tlv on the tlv buffer at a given offset */
+void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
+ u16 length)
+{
+ struct channel_tlv *tl =
+ (struct channel_tlv *)(tlvs_list + offset);
+
+ tl->type = type;
+ tl->length = length;
+}
+
+/* Clear the mailbox and init the header of the first tlv */
+void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
+ u16 type, u16 length)
+{
+ DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
+ type);
+
+ /* Clear mailbox */
+ memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
+
+ /* init type and length */
+ bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
+
+ /* init first tlv header */
+ first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
+{
+ int i = 1;
+ struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+ while (tlv->type != CHANNEL_TLV_LIST_END) {
+ /* output tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+
+ /* advance to next tlv */
+ tlvs_list += tlv->length;
+
+ /* cast general tlv list pointer to channel tlv header*/
+ tlv = (struct channel_tlv *)tlvs_list;
+
+ i++;
+
+ /* break condition for this loop */
+ if (i > MAX_TLVS_IN_LIST) {
+ WARN(true, "corrupt tlvs");
+ return;
+ }
+ }
+
+ /* output last tlv */
+ DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
+ tlv->type, tlv->length);
+}
+
+/* test whether we support a tlv type */
+bool bnx2x_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+static inline int bnx2x_pfvf_status_codes(int rc)
+{
+ switch (rc) {
+ case 0:
+ return PFVF_STATUS_SUCCESS;
+ case -ENOMEM:
+ return PFVF_STATUS_NO_RESOURCE;
+ default:
+ return PFVF_STATUS_FAILURE;
+ }
+}
+
+int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
+{
+ struct cstorm_vf_zone_data __iomem *zone_data =
+ REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
+ int tout = 600, interval = 100; /* wait for 60 seconds */
+
+ if (*done) {
+ BNX2X_ERR("done was non zero before message to pf was sent\n");
+ WARN_ON(true);
+ return -EINVAL;
+ }
+
+ /* Write message address */
+ writel(U64_LO(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
+ writel(U64_HI(msg_mapping),
+ &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
+
+ /* make sure the address is written before FW accesses it */
+ wmb();
+
+ /* Trigger the PF FW */
+ writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ /* Wait for PF to complete */
+ while ((tout >= 0) && (!*done)) {
+ msleep(interval);
+ tout -= 1;
+
+ /* progress indicator - HV can take its own sweet time in
+ * answering VFs...
+ */
+ DP_CONT(BNX2X_MSG_IOV, ".");
+ }
+
+ if (!*done) {
+ BNX2X_ERR("PF response has timed out\n");
+ return -EAGAIN;
+ }
+ DP(BNX2X_MSG_SP, "Got a response from PF\n");
+ return 0;
+}
+
+int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
+{
+ u32 me_reg;
+ int tout = 10, interval = 100; /* Wait for 1 sec */
+
+ do {
+ /* pxp traps vf read of doorbells and returns me reg value */
+ me_reg = readl(bp->doorbells);
+ if (GOOD_ME_REG(me_reg))
+ break;
+
+ msleep(interval);
+
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
+ me_reg);
+ } while (tout-- > 0);
+
+ if (!GOOD_ME_REG(me_reg)) {
+ BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
+ return -EINVAL;
+ }
+
+ BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+
+ *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
+
+ return 0;
+}
+
+int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
+{
+ int rc = 0, attempts = 0;
+ struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
+ struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+ u32 vf_id;
+ bool resources_acquired = false;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vfdev_info.vf_id = vf_id;
+ req->vfdev_info.vf_os = 0;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = bp->igu_sb_cnt;
+ req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
+ req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = bp->pf2vf_bulletin_mapping;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ while (!resources_acquired) {
+ DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = bnx2x_send_msg2pf(bp,
+ &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+
+ /* PF timeout */
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to bp */
+ memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
+
+ attempts++;
+
+ /* test whether the PF accepted our request. If not, humble the
+ * the request and try again.
+ */
+ if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
+ DP(BNX2X_MSG_SP, "resources acquired\n");
+ resources_acquired = true;
+ } else if (bp->acquire_resp.hdr.status ==
+ PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP(BNX2X_MSG_SP,
+ "PF unwilling to fulfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs =
+ bp->acquire_resp.resc.num_txqs;
+ req->resc_request.num_rxqs =
+ bp->acquire_resp.resc.num_rxqs;
+ req->resc_request.num_sbs =
+ bp->acquire_resp.resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ bp->acquire_resp.resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ bp->acquire_resp.resc.num_vlan_filters;
+ req->resc_request.num_mc_filters =
+ bp->acquire_resp.resc.num_mc_filters;
+
+ /* Clear response buffer */
+ memset(&bp->vf2pf_mbox->resp, 0,
+ sizeof(union pfvf_tlvs));
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
+ bp->acquire_resp.hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* get HW info */
+ bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
+ bp->link_params.chip_id = bp->common.chip_id;
+ bp->db_size = bp->acquire_resp.pfdev_info.db_size;
+ bp->common.int_block = INT_BLOCK_IGU;
+ bp->common.chip_port_mode = CHIP_2_PORT_MODE;
+ bp->igu_dsb_id = -1;
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->common.flash_size = 0;
+ bp->flags |=
+ NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
+ bp->igu_sb_cnt = 1;
+ bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+ strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ sizeof(bp->fw_ver));
+
+ if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
+ memcpy(bp->dev->dev_addr,
+ bp->acquire_resp.resc.current_mac_addr,
+ ETH_ALEN);
+
+ return 0;
+}
+
+int bnx2x_vfpf_release(struct bnx2x *bp)
+{
+ struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ u32 rc = 0, vf_id;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ return -EAGAIN;
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send release request */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ /* PF timeout */
+ return rc;
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF released us */
+ DP(BNX2X_MSG_SP, "vf released\n");
+ } else {
+ /* PF reports error */
+ BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/* Tell PF about SB addresses */
+int bnx2x_vfpf_init(struct bnx2x *bp)
+{
+ struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
+
+ /* status blocks */
+ for_each_eth_queue(bp, i)
+ req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
+ status_blk_mapping);
+
+ /* statistics - requests only supports single queue for now */
+ req->stats_addr = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+
+ DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
+ return 0;
+}
+
+/* CLOSE VF - opposite to INIT_VF */
+void bnx2x_vfpf_close_vf(struct bnx2x *bp)
+{
+ struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int i, rc;
+ u32 vf_id;
+
+ /* If we haven't got a valid VF id, there is no sense to
+ * continue with sending messages
+ */
+ if (bnx2x_get_vf_id(bp, &vf_id))
+ goto free_irq;
+
+ /* Close the queues */
+ for_each_queue(bp, i)
+ bnx2x_vfpf_teardown_queue(bp, i);
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ req->vf_id = vf_id;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc)
+ BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
+
+ else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
+ resp->hdr.status);
+
+free_irq:
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
+
+ /* Release IRQs */
+ bnx2x_free_irq(bp);
+}
+
+/* ask the pf to open a queue for the vf */
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+{
+ struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u16 tpa_agg_size = 0, flags = 0;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
+
+ /* select tpa mode to request */
+ if (!fp->disable_tpa) {
+ flags |= VFPF_QUEUE_FLG_TPA;
+ flags |= VFPF_QUEUE_FLG_TPA_IPV6;
+ if (fp->mode == TPA_MODE_GRO)
+ flags |= VFPF_QUEUE_FLG_TPA_GRO;
+ tpa_agg_size = TPA_AGG_SIZE;
+ }
+
+ /* calculate queue flags */
+ flags |= VFPF_QUEUE_FLG_STATS;
+ flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
+ flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
+ flags |= VFPF_QUEUE_FLG_VLAN;
+ DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
+
+ /* Common */
+ req->vf_qid = fp_idx;
+ req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
+
+ /* Rx */
+ req->rxq.rcq_addr = fp->rx_comp_mapping;
+ req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+ req->rxq.rxq_addr = fp->rx_desc_mapping;
+ req->rxq.sge_addr = fp->rx_sge_mapping;
+ req->rxq.vf_sb = fp_idx;
+ req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
+ req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
+ req->rxq.mtu = bp->dev->mtu;
+ req->rxq.buf_sz = fp->rx_buf_size;
+ req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
+ req->rxq.tpa_agg_sz = tpa_agg_size;
+ req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
+ req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
+ (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+ req->rxq.flags = flags;
+ req->rxq.drop_flags = 0;
+ req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+ req->rxq.stat_id = -1; /* No stats at the moment */
+
+ /* Tx */
+ req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
+ req->txq.vf_sb = fp_idx;
+ req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
+ req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
+ req->txq.flags = flags;
+ req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
+ fp_idx);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
+ fp_idx, resp->hdr.status);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
+{
+ struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
+ sizeof(*req));
+
+ req->vf_qid = qidx;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+
+ if (rc) {
+ BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
+ rc);
+ return rc;
+ }
+
+ /* PF failed the transaction */
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* request pf to add a mac for the vf */
+int bnx2x_vfpf_set_mac(struct bnx2x *bp)
+{
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+ req->vf_qid = 0;
+ req->n_mac_vlan_filters = 1;
+ req->filters[0].flags =
+ VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
+
+ /* sample bulletin board for new mac */
+ bnx2x_sample_bulletin(bp);
+
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ return rc;
+ }
+
+ /* failure may mean PF was configured with a new mac for us */
+ while (resp->hdr.status == PFVF_STATUS_FAILURE) {
+ DP(BNX2X_MSG_IOV,
+ "vfpf SET MAC failed. Check bulletin board for new posts\n");
+
+ /* check if bulletin board was updated */
+ if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
+ /* copy mac from device to request */
+ memcpy(req->filters[0].mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
+ bp->vf2pf_mbox_mapping);
+ } else {
+ /* no new info in bulletin */
+ break;
+ }
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_set_mcast(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc, i = 0;
+ struct netdev_hw_addr *ha;
+
+ if (bp->state != BNX2X_STATE_OPEN) {
+ DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+ return -EINVAL;
+ }
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ /* Get Rx mode requested */
+ DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ bnx2x_mc_addr(ha));
+ memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
+ i++;
+ }
+
+ /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
+ * addresses tops
+ */
+ if (i >= PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ return -EINVAL;
+ }
+
+ req->n_multicast = i;
+ req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+ return rc;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
+ resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+{
+ int mode = bp->rx_mode;
+ struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+ sizeof(*req));
+
+ DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
+
+ switch (mode) {
+ case BNX2X_RX_MODE_NONE: /* no Rx */
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ default:
+ BNX2X_ERR("BAD rx mode (%d)\n", mode);
+ return -EINVAL;
+ }
+
+ req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
+ req->vf_qid = 0;
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc)
+ BNX2X_ERR("Sending a message failed: %d\n", rc);
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+/* General service functions */
+static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
+}
+
+static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
+{
+ u32 addr = BAR_CSTRORM_INTMEM +
+ CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
+
+ REG_WR8(bp, addr, 1);
+}
+
+static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_vf(bp, i)
+ storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
+}
+
+/* enable vf_pf mailbox (aka vf-pf-chanell) */
+void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
+{
+ bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
+
+ /* enable the mailbox in the FW */
+ storm_memset_vf_mbx_ack(bp, abs_vfid);
+ storm_memset_vf_mbx_valid(bp, abs_vfid);
+
+ /* enable the VF access to the mailbox */
+ bnx2x_vf_enable_access(bp, abs_vfid);
+}
+
+/* this works only on !E1h */
+static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
+ dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
+ u32 vf_addr_lo, u32 len32)
+{
+ struct dmae_command dmae;
+
+ if (CHIP_IS_E1x(bp)) {
+ BNX2X_ERR("Chip revision does not support VFs\n");
+ return DMAE_NOT_RDY;
+ }
+
+ if (!bp->dmae_ready) {
+ BNX2X_ERR("DMAE is not ready, can not copy\n");
+ return DMAE_NOT_RDY;
+ }
+
+ /* set opcode and fixed command fields */
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
+
+ if (from_vf) {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
+ (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
+ (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = vf_addr_lo;
+ dmae.src_addr_hi = vf_addr_hi;
+ dmae.dst_addr_lo = U64_LO(pf_addr);
+ dmae.dst_addr_hi = U64_HI(pf_addr);
+ } else {
+ dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
+ (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
+ (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
+
+ dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
+
+ dmae.src_addr_lo = U64_LO(pf_addr);
+ dmae.src_addr_hi = U64_HI(pf_addr);
+ dmae.dst_addr_lo = vf_addr_lo;
+ dmae.dst_addr_hi = vf_addr_hi;
+ }
+ dmae.len = len32;
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_DMAE);
+
+ /* issue the command and wait for completion */
+ return bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+ u64 vf_addr;
+ dma_addr_t pf_addr;
+ u16 length, type;
+ int rc;
+ struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+
+ /* prepare response */
+ type = mbx->first_tlv.tl.type;
+ length = type == CHANNEL_TLV_ACQUIRE ?
+ sizeof(struct pfvf_acquire_resp_tlv) :
+ sizeof(struct pfvf_general_resp_tlv);
+ bnx2x_add_tlv(bp, resp, 0, type, length);
+ resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+ bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ bnx2x_dp_tlv_list(bp, resp);
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ /* send response */
+ vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
+ mbx->first_tlv.resp_msg_offset;
+ pf_addr = mbx->msg_mapping +
+ offsetof(struct bnx2x_vf_mbx_msg, resp);
+
+ /* copy the response body, if there is one, before the header, as the vf
+ * is sensitive to the header being written
+ */
+ if (resp->hdr.tl.length > sizeof(u64)) {
+ length = resp->hdr.tl.length - sizeof(u64);
+ vf_addr += sizeof(u64);
+ pf_addr += sizeof(u64);
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ length/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy response body to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ vf_addr -= sizeof(u64);
+ pf_addr -= sizeof(u64);
+ }
+
+ /* ack the FW */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ mmiowb();
+
+ /* initiate dmae to send the response */
+ mbx->flags &= ~VF_MSG_INPROCESS;
+
+ /* copy the response header including status-done field,
+ * must be last dmae, must be after FW is acked
+ */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+ U64_HI(vf_addr),
+ U64_LO(vf_addr),
+ sizeof(u64)/4);
+
+ /* unlock channel mutex */
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ if (rc) {
+ BNX2X_ERR("Failed to copy response status to VF %d\n",
+ vf->abs_vfid);
+ goto mbx_error;
+ }
+ return;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf, false); /* non blocking */
+}
+
+static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx, int vfop_status)
+{
+ int i;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
+ struct pf_vf_resc *resc = &resp->resc;
+ u8 status = bnx2x_pfvf_status_codes(vfop_status);
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* fill in pfdev info */
+ resp->pfdev_info.chip_num = bp->common.chip_id;
+ resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
+ resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
+ /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
+ sizeof(resp->pfdev_info.fw_ver));
+
+ if (status == PFVF_STATUS_NO_RESOURCE ||
+ status == PFVF_STATUS_SUCCESS) {
+ /* set resources numbers, if status equals NO_RESOURCE these
+ * are max possible numbers
+ */
+ resc->num_rxqs = vf_rxq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_txqs = vf_txq_count(vf) ? :
+ bnx2x_vf_max_queue_cnt(bp, vf);
+ resc->num_sbs = vf_sb_count(vf);
+ resc->num_mac_filters = vf_mac_rules_cnt(vf);
+ resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
+ resc->num_mc_filters = 0;
+
+ if (status == PFVF_STATUS_SUCCESS) {
+ /* fill in the allocated resources */
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
+
+ for_each_vfq(vf, i)
+ resc->hw_qid[i] =
+ vfq_qzone_id(vf, vfq_get(vf, i));
+
+ for_each_vf_sb(vf, i) {
+ resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
+ resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
+ }
+
+ /* if a mac has been set for this vf, supply it */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ memcpy(resc->current_mac_addr, bulletin->mac,
+ ETH_ALEN);
+ }
+ }
+ }
+
+ DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
+ vf->abs_vfid,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.pf_cap,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters,
+ resc->num_mc_filters,
+ resp->pfdev_info.fw_ver);
+
+ DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
+ for (i = 0; i < vf_rxq_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
+ DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
+ for (i = 0; i < vf_sb_count(vf); i++)
+ DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
+ resc->hw_sbs[i].hw_sb_id,
+ resc->hw_sbs[i].sb_qid);
+ DP_CONT(BNX2X_MSG_IOV, "]\n");
+
+ /* send the response */
+ vf->op_rc = vfop_status;
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int rc;
+ struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
+
+ /* log vfdef info */
+ DP(BNX2X_MSG_IOV,
+ "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
+ vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
+ acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
+ acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
+ acquire->resc_request.num_vlan_filters,
+ acquire->resc_request.num_mc_filters);
+
+ /* acquire the resources */
+ rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
+
+ /* store address of vf's bulletin board */
+ vf->bulletin_map = acquire->bulletin_addr;
+
+ /* response */
+ bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
+}
+
+static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_init_tlv *init = &mbx->msg->req.init;
+
+ /* record ghost addresses from vf message */
+ vf->spq_map = init->spq_addr;
+ vf->fw_stat_map = init->stats_addr;
+ vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+
+ /* response */
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+/* convert MBX queue-flags to standard SP queue-flags */
+static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
+ unsigned long *sp_q_flags)
+{
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
+ __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
+ __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
+ __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
+ __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
+ __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
+ __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
+ __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+}
+
+static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ /* verify vf_qid */
+ if (setup_q->vf_qid >= vf_rxq_count(vf)) {
+ BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
+ setup_q->vf_qid, vf_rxq_count(vf));
+ vf->op_rc = -EINVAL;
+ goto response;
+ }
+
+ /* tx queues must be setup alongside rx queues thus if the rx queue
+ * is not marked as valid there's nothing to do.
+ */
+ if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
+ struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
+ unsigned long q_type = 0;
+
+ struct bnx2x_queue_init_params *init_p;
+ struct bnx2x_queue_setup_params *setup_p;
+
+ /* reinit the VF operation context */
+ memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
+ setup_p = &vf->op_params.qctor.prep_qsetup;
+ init_p = &vf->op_params.qctor.qstate.params.init;
+
+ /* activate immediately */
+ __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
+
+ if (setup_q->param_valid & VFPF_TXQ_VALID) {
+ struct bnx2x_txq_setup_params *txq_params =
+ &setup_p->txq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+ /* save sb resource index */
+ q->sb_idx = setup_q->txq.vf_sb;
+
+ /* tx init */
+ init_p->tx.hc_rate = setup_q->txq.hc_rate;
+ init_p->tx.sb_cq_index = setup_q->txq.sb_index;
+
+ bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ &init_p->tx.flags);
+
+ /* tx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ &setup_p->flags);
+
+ /* tx setup - general, nothing */
+
+ /* tx setup - tx */
+ txq_params->dscr_map = setup_q->txq.txq_addr;
+ txq_params->sb_cq_index = setup_q->txq.sb_index;
+ txq_params->traffic_type = setup_q->txq.traffic_type;
+
+ bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+
+ if (setup_q->param_valid & VFPF_RXQ_VALID) {
+ struct bnx2x_rxq_setup_params *rxq_params =
+ &setup_p->rxq_params;
+
+ __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+
+ /* Note: there is no support for different SBs
+ * for TX and RX
+ */
+ q->sb_idx = setup_q->rxq.vf_sb;
+
+ /* rx init */
+ init_p->rx.hc_rate = setup_q->rxq.hc_rate;
+ init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
+ bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ &init_p->rx.flags);
+
+ /* rx setup - flags */
+ bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ &setup_p->flags);
+
+ /* rx setup - general */
+ setup_p->gen_params.mtu = setup_q->rxq.mtu;
+
+ /* rx setup - rx */
+ rxq_params->drop_flags = setup_q->rxq.drop_flags;
+ rxq_params->dscr_map = setup_q->rxq.rxq_addr;
+ rxq_params->sge_map = setup_q->rxq.sge_addr;
+ rxq_params->rcq_map = setup_q->rxq.rcq_addr;
+ rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
+ rxq_params->buf_sz = setup_q->rxq.buf_sz;
+ rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
+ rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
+ rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
+ rxq_params->cache_line_log =
+ setup_q->rxq.cache_line_log;
+ rxq_params->sb_cq_index = setup_q->rxq.sb_index;
+
+ bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
+ q->index, q->sb_idx);
+ }
+ /* complete the preparations */
+ bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
+
+ vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
+ if (vf->op_rc)
+ goto response;
+ return;
+ }
+response:
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+enum bnx2x_vfop_filters_state {
+ BNX2X_VFOP_MBX_Q_FILTERS_MACS,
+ BNX2X_VFOP_MBX_Q_FILTERS_VLANS,
+ BNX2X_VFOP_MBX_Q_FILTERS_RXMODE,
+ BNX2X_VFOP_MBX_Q_FILTERS_MCAST,
+ BNX2X_VFOP_MBX_Q_FILTERS_DONE
+};
+
+static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *tlv,
+ struct bnx2x_vfop_filters **pfl,
+ u32 type_flag)
+{
+ int i, j;
+ struct bnx2x_vfop_filters *fl = NULL;
+ size_t fsz;
+
+ fsz = tlv->n_mac_vlan_filters * sizeof(struct bnx2x_vfop_filter) +
+ sizeof(struct bnx2x_vfop_filters);
+
+ fl = kzalloc(fsz, GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&fl->head);
+
+ for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
+ struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
+
+ if ((msg_filter->flags & type_flag) != type_flag)
+ continue;
+ if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+ fl->filters[j].mac = msg_filter->mac;
+ fl->filters[j].type = BNX2X_VFOP_FILTER_MAC;
+ } else {
+ fl->filters[j].vid = msg_filter->vlan_tag;
+ fl->filters[j].type = BNX2X_VFOP_FILTER_VLAN;
+ }
+ fl->filters[j].add =
+ (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
+ true : false;
+ list_add_tail(&fl->filters[j++].link, &fl->head);
+ }
+ if (list_empty(&fl->head))
+ kfree(fl);
+ else
+ *pfl = fl;
+
+ return 0;
+}
+
+static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
+ struct vfpf_q_mac_vlan_filter *filter)
+{
+ DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
+ if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
+ DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
+ if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
+ DP_CONT(msglvl, ", MAC=%pM", filter->mac);
+ DP_CONT(msglvl, "\n");
+}
+
+static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ int i;
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
+ for (i = 0; i < filters->n_mac_vlan_filters; i++)
+ bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
+ &filters->filters[i]);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
+ DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
+
+ if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
+ for (i = 0; i < filters->n_multicast; i++)
+ DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
+}
+
+#define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
+#define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
+
+static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ int rc;
+
+ struct vfpf_set_q_filters_tlv *msg =
+ &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
+
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ enum bnx2x_vfop_filters_state state = vfop->state;
+
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vfop_mbx_qfilters,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "STATE: %d\n", state);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ switch (state) {
+ case BNX2X_VFOP_MBX_Q_FILTERS_MACS:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_VLANS;
+
+ /* check for any vlan/mac changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build mac list */
+ struct bnx2x_vfop_filters *fl = NULL;
+
+ vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_MAC_FILTER);
+ if (vfop->rc)
+ goto op_err;
+
+ if (fl) {
+ /* set mac list */
+ rc = bnx2x_vfop_mac_list_cmd(bp, vf, &cmd, fl,
+ msg->vf_qid,
+ false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_VLANS:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_RXMODE;
+
+ /* check for any vlan/mac changes */
+ if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
+ /* build vlan list */
+ struct bnx2x_vfop_filters *fl = NULL;
+
+ vfop->rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (vfop->rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vfop_vlan_list_cmd(bp, vf, &cmd, fl,
+ msg->vf_qid,
+ false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_RXMODE:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_MCAST;
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
+ unsigned long accept = 0;
+
+ /* covert VF-PF if mask to bnx2x accept flags */
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
+ __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
+
+ if (msg->rx_mask &
+ VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST)
+ __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_UNICAST)
+ __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_ALL_MULTICAST)
+ __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept);
+
+ if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_BROADCAST)
+ __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
+
+ /* A packet arriving the vf's mac should be accepted
+ * with any vlan
+ */
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+
+ /* set rx-mode */
+ rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
+ msg->vf_qid, accept);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ /* fall through */
+
+ case BNX2X_VFOP_MBX_Q_FILTERS_MCAST:
+ /* next state */
+ vfop->state = BNX2X_VFOP_MBX_Q_FILTERS_DONE;
+
+ if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
+ /* set mcasts */
+ rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, msg->multicast,
+ msg->n_multicast, false);
+ if (rc) {
+ vfop->rc = rc;
+ goto op_err;
+ }
+ return;
+ }
+ /* fall through */
+op_done:
+ case BNX2X_VFOP_MBX_Q_FILTERS_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+op_err:
+ BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
+ vf->abs_vfid, msg->vf_qid, vfop->rc);
+ goto op_done;
+
+ default:
+ bnx2x_vfop_default(state);
+ }
+}
+
+static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ if (vfop) {
+ bnx2x_vfop_opset(BNX2X_VFOP_MBX_Q_FILTERS_MACS,
+ bnx2x_vfop_mbx_qfilters, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mbx_qfilters,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ /* if a mac was already set for this VF via the set vf mac ndo, we only
+ * accept mac configurations of that mac. Why accept them at all?
+ * because PF may have been unable to configure the mac at the time
+ * since queue was not set up.
+ */
+ if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
+ /* once a mac was set by ndo can only accept a single mac... */
+ if (filters->n_mac_vlan_filters > 1) {
+ BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
+ vf->abs_vfid);
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+
+ /* ...and only the mac set by the ndo */
+ if (filters->n_mac_vlan_filters == 1 &&
+ memcmp(filters->filters->mac, bulletin->mac, ETH_ALEN)) {
+ BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
+ vf->abs_vfid);
+
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+ }
+
+ /* verify vf_qid */
+ if (filters->vf_qid > vf_rxq_count(vf))
+ goto response;
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
+ vf->abs_vfid,
+ filters->vf_qid);
+
+ /* print q_filter message */
+ bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
+
+ vf->op_rc = bnx2x_vfop_mbx_qfilters_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ goto response;
+ return;
+
+response:
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int qid = mbx->msg->req.q_op.vf_qid;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
+ vf->abs_vfid, qid);
+
+ vf->op_rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qid);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
+
+ vf->op_rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
+
+ vf->op_rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
+/* dispatch request */
+static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ int i;
+
+ /* check if tlv type is known */
+ if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
+
+ /* switch on the opcode */
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ bnx2x_vf_mbx_acquire(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_INIT:
+ bnx2x_vf_mbx_init_vf(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_SETUP_Q:
+ bnx2x_vf_mbx_setup_q(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_SET_Q_FILTERS:
+ bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_TEARDOWN_Q:
+ bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ bnx2x_vf_mbx_close_vf(bp, vf, mbx);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ bnx2x_vf_mbx_release_vf(bp, vf, mbx);
+ break;
+ }
+
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ BNX2X_ERR("unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+ for (i = 0; i < 20; i++)
+ DP_CONT(BNX2X_MSG_IOV, "%x ",
+ mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+
+ /* test whether we can respond to the VF (do we have an address
+ * for it?)
+ */
+ if (vf->state == VF_ACQUIRED) {
+ /* mbx_resp uses the op_rc of the VF */
+ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
+
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just unlock the channel and be done with.
+ */
+ bnx2x_unlock_vf_pf_channel(bp, vf,
+ mbx->first_tlv.tl.type);
+ }
+ }
+}
+
+/* handle new vf-pf message */
+void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
+{
+ struct bnx2x_virtf *vf;
+ struct bnx2x_vf_mbx *mbx;
+ u8 vf_idx;
+ int rc;
+
+ DP(BNX2X_MSG_IOV,
+ "vf pf event received: vfid %d, address_hi %x, address lo %x",
+ vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
+ /* Sanity checks consider removing later */
+
+ /* check if the vf_id is valid */
+ if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
+ BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
+ vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
+ goto mbx_done;
+ }
+ vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
+ mbx = BP_VF_MBX(bp, vf_idx);
+
+ /* verify an event is not currently being processed -
+ * debug failsafe only
+ */
+ if (mbx->flags & VF_MSG_INPROCESS) {
+ BNX2X_ERR("Previous message is still being processed, vf_id %d\n",
+ vfpf_event->vf_id);
+ goto mbx_done;
+ }
+ vf = BP_VF(bp, vf_idx);
+
+ /* save the VF message address */
+ mbx->vf_addr_hi = vfpf_event->msg_addr_hi;
+ mbx->vf_addr_lo = vfpf_event->msg_addr_lo;
+ DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
+ mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
+
+ /* dmae to get the VF request */
+ rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping, vf->abs_vfid,
+ mbx->vf_addr_hi, mbx->vf_addr_lo,
+ sizeof(union vfpf_tlvs)/4);
+ if (rc) {
+ BNX2X_ERR("Failed to copy request VF %d\n", vf->abs_vfid);
+ goto mbx_error;
+ }
+
+ /* process the VF message header */
+ mbx->first_tlv = mbx->msg->req.first_tlv;
+
+ /* dispatch the request (will prepare the response) */
+ bnx2x_vf_mbx_request(bp, vf, mbx);
+ goto mbx_done;
+
+mbx_error:
+ bnx2x_vf_release(bp, vf, false); /* non blocking */
+mbx_done:
+ return;
+}
+
+/* propagate local bulletin board to vf */
+int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
+ dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
+ vf * BULLETIN_CONTENT_SIZE;
+ dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
+ int rc;
+
+ /* can only update vf after init took place */
+ if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
+ bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
+ return 0;
+
+ /* increment bulletin board version and compute crc */
+ bulletin->version++;
+ bulletin->length = BULLETIN_CONTENT_SIZE;
+ bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
+
+ /* propagate bulletin board via dmae to vm memory */
+ rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
+ bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
+ U64_LO(vf_addr), bulletin->length / 4);
+ return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
new file mode 100644
index 00000000000..bfc80baec00
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -0,0 +1,360 @@
+/* bnx2x_vfpf.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2011-2013 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Ariel Elior <ariele@broadcom.com>
+ */
+#ifndef VF_PF_IF_H
+#define VF_PF_IF_H
+
+#ifdef CONFIG_BNX2X_SRIOV
+
+/* Common definitions for all HVs */
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+};
+
+struct hw_sb_info {
+ u8 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
+};
+
+/* HW VF-PF channel definitions
+ * A.K.A VF-PF mailbox
+ */
+#define TLV_BUFFER_SIZE 1024
+#define PF_VF_BULLETIN_SIZE 512
+
+#define VFPF_QUEUE_FLG_TPA 0x0001
+#define VFPF_QUEUE_FLG_TPA_IPV6 0x0002
+#define VFPF_QUEUE_FLG_TPA_GRO 0x0004
+#define VFPF_QUEUE_FLG_CACHE_ALIGN 0x0008
+#define VFPF_QUEUE_FLG_STATS 0x0010
+#define VFPF_QUEUE_FLG_OV 0x0020
+#define VFPF_QUEUE_FLG_VLAN 0x0040
+#define VFPF_QUEUE_FLG_COS 0x0080
+#define VFPF_QUEUE_FLG_HC 0x0100
+#define VFPF_QUEUE_FLG_DHC 0x0200
+
+#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
+#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
+#define VFPF_QUEUE_DROP_TTL0 (1 << 2)
+#define VFPF_QUEUE_DROP_UDP_CS_ERR (1 << 3)
+
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
+#define BULLETIN_CRC_SEED 0
+
+enum {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 resp_msg_offset;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_general_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+/* Acquire */
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+ /* the following fields are for debug purposes */
+ u8 vf_id; /* ME register value */
+ u8 vf_os; /* e.g. Linux, W2K8 */
+ u8 padding[2];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ aligned_u64 bulletin_addr;
+};
+
+/* simple operation request on queue */
+struct vfpf_q_op_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 vf_qid;
+ u8 padding[3];
+};
+
+/* acquire response tlv - carries the allocated resources */
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 pf_cap;
+#define PFVF_CAP_RSS 0x00000001
+#define PFVF_CAP_DHC 0x00000002
+#define PFVF_CAP_TPA 0x00000004
+ char fw_ver[32];
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 padding;
+ } pfdev_info;
+ struct pf_vf_resc {
+ /* in case of status NO_RESOURCE in message hdr, pf will fill
+ * this struct with suggested amount of resources for next
+ * acquire request
+ */
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 permanent_mac_addr[ETH_ALEN];
+ u8 current_mac_addr[ETH_ALEN];
+ u8 padding[2];
+ } resc;
+};
+
+/* Init VF */
+struct vfpf_init_tlv {
+ struct vfpf_first_tlv first_tlv;
+ aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
+ aligned_u64 spq_addr;
+ aligned_u64 stats_addr;
+};
+
+/* Setup Queue */
+struct vfpf_setup_q_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_rxq_params {
+ /* physical addresses */
+ aligned_u64 rcq_addr;
+ aligned_u64 rcq_np_addr;
+ aligned_u64 rxq_addr;
+ aligned_u64 sge_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ /* rx buffer info */
+ u16 mtu;
+ u16 buf_sz;
+ u16 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+
+ /* valid iff VFPF_QUEUE_FLG_TPA */
+ u16 sge_buf_sz;
+ u16 tpa_agg_sz;
+ u8 max_sge_pkt;
+
+ u8 drop_flags; /* VFPF_QUEUE_DROP_X, for Linux VMs
+ * all the flags are turned off
+ */
+
+ u8 cache_line_log; /* VFPF_QUEUE_FLG_CACHE_ALIGN */
+ u8 padding;
+ } rxq;
+
+ struct vf_pf_txq_params {
+ /* physical addresses */
+ aligned_u64 txq_addr;
+
+ /* sb + hc info */
+ u8 vf_sb; /* index in hw_sbs[] */
+ u8 sb_index; /* Index in the SB */
+ u16 hc_rate; /* desired interrupts per sec. */
+ /* valid iff VFPF_QUEUE_FLG_HC */
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 stat_id; /* valid iff VFPF_QUEUE_FLG_STATS */
+ u8 traffic_type; /* see in setup_context() */
+ u8 padding;
+ } txq;
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 param_valid;
+#define VFPF_RXQ_VALID 0x01
+#define VFPF_TXQ_VALID 0x02
+ u8 padding[2];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+};
+
+/* configure queue filters */
+struct vfpf_set_q_filters_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u32 flags;
+#define VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED 0x01
+#define VFPF_SET_Q_FILTERS_MULTICAST_CHANGED 0x02
+#define VFPF_SET_Q_FILTERS_RX_MASK_CHANGED 0x04
+
+ u8 vf_qid; /* index in hw_qid[] */
+ u8 n_mac_vlan_filters;
+ u8 n_multicast;
+ u8 padding;
+
+#define PFVF_MAX_MAC_FILTERS 16
+#define PFVF_MAX_VLAN_FILTERS 16
+#define PFVF_MAX_FILTERS (PFVF_MAX_MAC_FILTERS +\
+ PFVF_MAX_VLAN_FILTERS)
+ struct vfpf_q_mac_vlan_filter filters[PFVF_MAX_FILTERS];
+
+#define PFVF_MAX_MULTICAST_PER_VF 32
+ u8 multicast[PFVF_MAX_MULTICAST_PER_VF][ETH_ALEN];
+
+ u32 rx_mask; /* see mask constants at the top of the file */
+};
+
+/* close VF (disable VF) */
+struct vfpf_close_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id; /* for debug */
+ u8 padding[2];
+};
+
+/* release the VF's acquired resources */
+struct vfpf_release_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 vf_id;
+ u8 padding[2];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_init_tlv init;
+ struct vfpf_close_tlv close;
+ struct vfpf_q_op_tlv q_op;
+ struct vfpf_setup_q_tlv setup_q;
+ struct vfpf_set_q_filters_tlv set_q_filters;
+ struct vfpf_release_tlv release;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+/* This is a structure which is allocated in the VF, which the PF may update
+ * when it deems it necessary to do so. The bulletin board is sampled
+ * periodically by the VF. A copy per VF is maintained in the PF (to prevent
+ * loss of data upon multiple updates (or the need for read modify write)).
+ */
+struct pf_vf_bulletin_size {
+ u8 size[PF_VF_BULLETIN_SIZE];
+};
+
+struct pf_vf_bulletin_content {
+ u32 crc; /* crc of structure to ensure is not in
+ * mid-update
+ */
+ u16 version;
+ u16 length;
+
+ aligned_u64 valid_bitmap; /* bitmap indicating which fields
+ * hold valid values
+ */
+
+#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
+ * is available for it
+ */
+
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+union pf_vf_bulletin {
+ struct pf_vf_bulletin_content content;
+ struct pf_vf_bulletin_size size;
+};
+
+#define MAX_TLVS_IN_LIST 50
+
+enum channel_tlvs {
+ CHANNEL_TLV_NONE,
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_INIT,
+ CHANNEL_TLV_SETUP_Q,
+ CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_TEARDOWN_Q,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_PF_RELEASE_VF,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_FLR,
+ CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_MAX
+};
+
+#endif /* CONFIG_BNX2X_SRIOV */
+#endif /* VF_PF_IF_H */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index df8c30d1a52..149a3a03849 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4816,6 +4816,8 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
return err;
}
+ ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
+
return 0;
}
@@ -5136,6 +5138,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return ret;
+ ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ;
return 0;
}
@@ -5387,6 +5390,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
}
cnic_shutdown_rings(dev);
cp->stop_cm(dev);
+ cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ;
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5421,11 +5425,9 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
- cdev = kzalloc(alloc_size , GFP_KERNEL);
- if (cdev == NULL) {
- netdev_err(dev, "allocate dev struct failure\n");
+ cdev = kzalloc(alloc_size, GFP_KERNEL);
+ if (cdev == NULL)
return NULL;
- }
cdev->netdev = dev;
cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 2a35436f909..0c9367a0f57 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -179,6 +179,7 @@ struct cnic_eth_dev {
#define CNIC_DRV_STATE_NO_ISCSI_OOO 0x00000004
#define CNIC_DRV_STATE_NO_ISCSI 0x00000008
#define CNIC_DRV_STATE_NO_FCOE 0x00000010
+#define CNIC_DRV_STATE_HANDLES_IRQ 0x00000020
u32 chip_id;
u32 max_kwqe_pending;
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 3a1c8a3cf7c..e9b35da375c 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2385,7 +2385,7 @@ static int sbmac_mii_probe(struct net_device *dev)
return -ENXIO;
}
- phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0,
+ phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll,
PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bdb086934cd..fdb9b565541 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2012 Broadcom Corporation.
+ * Copyright (C) 2005-2013 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -44,6 +44,7 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <linux/ssb/ssb_driver_gige.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
@@ -93,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 128
+#define TG3_MIN_NUM 130
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "December 03, 2012"
+#define DRV_MODULE_RELDATE "February 14, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -263,6 +264,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
TG3_DRV_DATA_FLAG_5705_10_100},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
@@ -330,6 +332,10 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -570,7 +576,9 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
{
tp->write32_mbox(tp, off, val);
- if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
+ (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
+ !tg3_flag(tp, ICH_WORKAROUND)))
tp->read32_mbox(tp, off);
}
@@ -580,7 +588,8 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
writel(val, mbox);
if (tg3_flag(tp, TXD_MBOX_HWBUG))
writel(val, mbox);
- if (tg3_flag(tp, MBOX_WRITE_REORDER))
+ if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
+ tg3_flag(tp, FLUSH_POSTED_WRITES))
readl(mbox);
}
@@ -609,7 +618,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
return;
@@ -634,7 +643,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
{
unsigned long flags;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
(off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
*val = 0;
return;
@@ -662,7 +671,7 @@ static void tg3_ape_lock_init(struct tg3 *tp)
int i;
u32 regbase, bit;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
regbase = TG3_APE_LOCK_GRANT;
else
regbase = TG3_APE_PER_LOCK_GRANT;
@@ -698,7 +707,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return 0;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -717,7 +726,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
return -EINVAL;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5761) {
req = TG3_APE_LOCK_REQ;
gnt = TG3_APE_LOCK_GRANT;
} else {
@@ -755,7 +764,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
switch (locknum) {
case TG3_APE_LOCK_GPIO:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
return;
case TG3_APE_LOCK_GRC:
case TG3_APE_LOCK_MEM:
@@ -774,7 +783,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
gnt = TG3_APE_LOCK_GRANT;
else
gnt = TG3_APE_PER_LOCK_GRANT;
@@ -1088,7 +1097,8 @@ static void tg3_switch_clocks(struct tg3 *tp)
#define PHY_BUSY_LOOPS 5000
-static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 *val)
{
u32 frame_val;
unsigned int loops;
@@ -1104,7 +1114,7 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
*val = 0x0;
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1141,7 +1151,13 @@ static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
return ret;
}
-static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+ return __tg3_readphy(tp, tp->phy_addr, reg, val);
+}
+
+static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
+ u32 val)
{
u32 frame_val;
unsigned int loops;
@@ -1159,7 +1175,7 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
tg3_ape_lock(tp, tp->phy_ape_lock);
- frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+ frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
MI_COM_PHY_ADDR_MASK);
frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
MI_COM_REG_ADDR_MASK);
@@ -1194,6 +1210,11 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
return ret;
}
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+ return __tg3_writephy(tp, tp->phy_addr, reg, val);
+}
+
static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
{
int err;
@@ -1458,7 +1479,7 @@ static void tg3_mdio_start(struct tg3 *tp)
udelay(80);
if (tg3_flag(tp, MDIOBUS_INITED) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
}
@@ -1473,7 +1494,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tp->phy_addr = tp->pci_fn + 1;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
else
is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
@@ -1561,7 +1582,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tg3_flag_set(tp, MDIOBUS_INITED);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
return 0;
@@ -1778,7 +1799,12 @@ static int tg3_poll_fw(struct tg3 *tp)
int i;
u32 val;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* We don't use firmware. */
+ return 0;
+ }
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* Wait up to 20ms for init done. */
for (i = 0; i < 200; i++) {
if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
@@ -1807,7 +1833,7 @@ static int tg3_poll_fw(struct tg3 *tp)
netdev_info(tp->dev, "No firmware running\n");
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
/* The 57765 A0 needs a little more
* time to do some important work.
*/
@@ -1937,7 +1963,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else if (phydev->speed == SPEED_1000 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+ tg3_asic_rev(tp) != ASIC_REV_5785)
mac_mode |= MAC_MODE_PORT_MODE_GMII;
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
@@ -1964,7 +1990,7 @@ static void tg3_adjust_link(struct net_device *dev)
udelay(40);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5785) {
if (phydev->speed == SPEED_10)
tw32(MAC_MI_STAT,
MAC_MI_STAT_10MBPS_MODE |
@@ -2013,8 +2039,8 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
/* Attach the MAC to the PHY. */
- phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
- phydev->dev_flags, phydev->interface);
+ phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
+ tg3_adjust_link, phydev->interface);
if (IS_ERR(phydev)) {
dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
@@ -2156,7 +2182,7 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
MII_TG3_MISC_SHDW_SCR5_DLPTLM |
MII_TG3_MISC_SHDW_SCR5_SDTL |
MII_TG3_MISC_SHDW_SCR5_C125OE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+ if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
@@ -2311,8 +2337,8 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
u32 val;
if (tp->link_config.active_speed == SPEED_1000 &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_flag(tp, 57765_CLASS)) &&
!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
@@ -2516,7 +2542,7 @@ static int tg3_phy_reset(struct tg3 *tp)
u32 val, cpmuctrl;
int err;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
udelay(40);
@@ -2531,9 +2557,9 @@ static int tg3_phy_reset(struct tg3 *tp)
tg3_link_report(tp);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
err = tg3_phy_reset_5703_4_5(tp);
if (err)
return err;
@@ -2541,8 +2567,8 @@ static int tg3_phy_reset(struct tg3 *tp)
}
cpmuctrl = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
cpmuctrl = tr32(TG3_CPMU_CTRL);
if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
tw32(TG3_CPMU_CTRL,
@@ -2560,8 +2586,8 @@ static int tg3_phy_reset(struct tg3 *tp)
tw32(TG3_CPMU_CTRL, cpmuctrl);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
CPMU_LSPD_1000MB_MACCLK_12_5) {
@@ -2639,11 +2665,14 @@ out:
val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
/* adjust output voltage */
tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
}
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
+ tg3_phydsp_write(tp, 0xffb, 0x4000);
+
tg3_phy_toggle_automdix(tp, 1);
tg3_phy_set_wirespeed(tp);
return 0;
@@ -2669,8 +2698,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
{
u32 status, shift;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
else
status = tr32(TG3_CPMU_DRV_STATUS);
@@ -2679,8 +2708,8 @@ static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
status &= ~(TG3_GPIO_MSG_MASK << shift);
status |= (newstat << shift);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
else
tw32(TG3_CPMU_DRV_STATUS, status);
@@ -2693,9 +2722,9 @@ static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
return -EIO;
@@ -2718,8 +2747,8 @@ static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
u32 grc_local_ctrl;
if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+ tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
return;
grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
@@ -2742,8 +2771,8 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
if (!tg3_flag(tp, IS_NIC))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
(GRC_LCLCTRL_GPIO_OE0 |
GRC_LCLCTRL_GPIO_OE1 |
@@ -2775,7 +2804,7 @@ static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
u32 grc_local_ctrl = 0;
/* Workaround to prevent overdrawing Amps. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
grc_local_ctrl,
@@ -2847,9 +2876,9 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_frob_aux_power_5717(tp, include_wol ?
tg3_flag(tp, WOL_ENABLE) != 0 : 0);
return;
@@ -2901,7 +2930,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
u32 val;
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
u32 serdes_cfg = tr32(MAC_SERDES_CFG);
@@ -2913,7 +2942,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
return;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_bmcr_reset(tp);
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
@@ -2952,16 +2981,16 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
/* The PHY should not be powered down on some chips because
* of bugs.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5780 &&
(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
!tp->pci_fn))
return;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5761_AX) {
val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
val |= CPMU_LSPD_1000MB_MACCLK_12_5;
@@ -3344,7 +3373,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
!tg3_flag(tp, 57765_PLUS))
tw32(NVRAM_ADDR, phy_addr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
!tg3_flag(tp, 5755_PLUS) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
@@ -3429,7 +3458,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -3447,6 +3476,13 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
udelay(10);
} else {
+ /*
+ * There is only an Rx CPU for the 5750 derivative in the
+ * BCM4785.
+ */
+ if (tg3_flag(tp, IS_SSB_CORE))
+ return 0;
+
for (i = 0; i < 10000; i++) {
tw32(offset + CPU_STATE, 0xffffffff);
tw32(offset + CPU_MODE, CPU_MODE_HALT);
@@ -3600,7 +3636,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
info.fw_len = tp->fw->size - 12;
info.fw_data = &fw_data[3];
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
} else {
@@ -3658,8 +3694,8 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
for (i = 0; i < 12; i++) {
tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
@@ -3778,7 +3814,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_setup_phy(tp, 0);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val;
val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3820,8 +3856,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
mac_mode = MAC_MODE_PORT_MODE_MII;
mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
SPEED_100 : SPEED_10;
if (tg3_5700_link_polarity(tp, speed))
@@ -3854,8 +3889,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
}
if (!tg3_flag(tp, WOL_SPEED_100MB) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 base_val;
base_val = tp->pci_clock_ctrl;
@@ -3866,13 +3901,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
CLOCK_CTRL_PWRDOWN_PLL133, 40);
} else if (tg3_flag(tp, 5780_CLASS) ||
tg3_flag(tp, CPMU_PRESENT) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
/* do nothing */
} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
u32 newbits1, newbits2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_ALTCLK);
@@ -3894,8 +3929,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
if (!tg3_flag(tp, 5705_PLUS)) {
u32 newbits3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_44MHZ_CORE);
@@ -3914,8 +3949,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_frob_aux_power(tp, true);
/* Workaround for unstable PLL clock */
- if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
- (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+ if ((!tg3_flag(tp, IS_SSB_CORE)) &&
+ ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
+ (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
u32 val = tr32(0x7d00);
val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
@@ -4006,8 +4042,8 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
err = tg3_writephy(tp, MII_CTRL1000, new_adv);
@@ -4036,7 +4072,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
if (err)
val = 0;
- switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+ switch (tg3_asic_rev(tp)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
case ASIC_REV_57766:
@@ -4049,6 +4085,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
/* Fall through */
case ASIC_REV_5720:
+ case ASIC_REV_5762:
if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
MII_TG3_DSP_CH34TP2_HIBW01);
@@ -4183,8 +4220,8 @@ static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
return false;
if (tgtadv &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
@@ -4268,9 +4305,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
/* Some third-party PHYs need to be reset on link going
* down.
*/
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5705) &&
tp->link_up) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -4312,8 +4349,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
return err;
}
}
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
/* 5701 {A0,B0} CRC bug workaround */
tg3_writephy(tp, 0x15, 0x0a75);
tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
@@ -4330,8 +4367,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
tg3_writephy(tp, MII_TG3_IMASK, ~0);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -4435,6 +4472,15 @@ relink:
if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
tg3_phy_copper_begin(tp);
+ if (tg3_flag(tp, ROBOSWITCH)) {
+ current_link_up = 1;
+ /* FIXME: when BCM5325 switch is used use 100 MBit/s */
+ current_speed = SPEED_1000;
+ current_duplex = DUPLEX_FULL;
+ tp->link_config.active_speed = current_speed;
+ tp->link_config.active_duplex = current_duplex;
+ }
+
tg3_readphy(tp, MII_BMSR, &bmsr);
if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
@@ -4453,11 +4499,31 @@ relink:
else
tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+ /* In order for the 5750 core in BCM4785 chip to work properly
+ * in RGMII mode, the Led Control Register must be set up.
+ */
+ if (tg3_flag(tp, RGMII_MODE)) {
+ u32 led_ctrl = tr32(MAC_LED_CTRL);
+ led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
+
+ if (tp->link_config.active_speed == SPEED_10)
+ led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
+ else if (tp->link_config.active_speed == SPEED_100)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_100MBPS_ON);
+ else if (tp->link_config.active_speed == SPEED_1000)
+ led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON);
+
+ tw32(MAC_LED_CTRL, led_ctrl);
+ udelay(40);
+ }
+
tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
if (current_link_up == 1 &&
tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -4469,7 +4535,7 @@ relink:
* ??? send/receive packets...
*/
if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
- tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
@@ -4488,7 +4554,7 @@ relink:
}
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
current_link_up == 1 &&
tp->link_config.active_speed == SPEED_1000 &&
(tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
@@ -4943,8 +5009,8 @@ static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
port_a = 1;
current_link_up = 0;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
workaround = 1;
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
port_a = 0;
@@ -5273,7 +5339,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5342,8 +5408,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
bmcr = new_bmcr;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5714) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5714) {
if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
bmsr |= BMSR_LSTATUS;
else
@@ -5478,7 +5543,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
else
err = tg3_setup_copper_phy(tp, force_reset);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
u32 scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
@@ -5496,7 +5561,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
(6 << TX_LENGTHS_IPG_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -5785,10 +5851,8 @@ static void tg3_dump_state(struct tg3 *tp)
u32 *regs;
regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
- if (!regs) {
- netdev_err(tp->dev, "Failed allocating register dump buffer\n");
+ if (!regs)
return;
- }
if (tg3_flag(tp, PCI_EXPRESS)) {
/* Read up to but not including private PCI registers */
@@ -7122,7 +7186,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
dma_addr_t new_addr = 0;
int ret = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5701)
new_skb = skb_copy(skb, GFP_ATOMIC);
else {
int more_headroom = 4 - ((unsigned long)skb->data & 3);
@@ -7296,7 +7360,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
@@ -7452,7 +7516,7 @@ static void tg3_mac_loopback(struct tg3 *tp, bool enable)
if (tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ tg3_asic_rev(tp) == ASIC_REV_5700)
tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
}
@@ -7511,7 +7575,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
udelay(40);
if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ tg3_asic_rev(tp) == ASIC_REV_5785) {
tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
MII_TG3_FET_PTEST_FRC_TX_LINK |
MII_TG3_FET_PTEST_FRC_TX_LOCK);
@@ -7535,7 +7599,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
else
mac_mode |= MAC_MODE_PORT_MODE_MII;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700) {
u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
if (masked_phy_id == TG3_PHY_ID_BCM5401)
@@ -8213,7 +8277,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
/* Set MAX PCI retry to zero. */
val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE))
val |= PCISTATE_RETRY_SAME_DMA;
/* Allow reads and writes to the APE register and memory space. */
@@ -8285,7 +8349,7 @@ static int tg3_chip_reset(struct tg3 *tp)
*/
tg3_save_pci_state(tp);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 5755_PLUS))
tw32(GRC_FASTBOOT_PC, 0);
@@ -8320,7 +8384,7 @@ static int tg3_chip_reset(struct tg3 *tp)
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
@@ -8330,19 +8394,19 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS)) {
/* Force PCIe 1.0a mode */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS) &&
tr32(TG3_PCIE_PHY_TSTCTL) ==
(TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
tw32(GRC_MISC_CFG, (1 << 29));
val |= (1 << 29);
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
tw32(GRC_VCPU_EXT_CTRL,
tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
@@ -8385,7 +8449,7 @@ static int tg3_chip_reset(struct tg3 *tp)
if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
u16 val16;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
int j;
u32 cfg_val;
@@ -8426,23 +8490,33 @@ static int tg3_chip_reset(struct tg3 *tp)
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
tg3_stop_fw(tp);
tw32(0x5000, 0x400);
}
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /*
+ * BCM4785: In order to avoid repercussions from using
+ * potentially defective internal ROM, stop the Rx RISC CPU,
+ * which is not required.
+ */
+ tg3_stop_fw(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ }
+
tw32(GRC_MODE, tp->grc_mode);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
val = tr32(0xc4);
tw32(0xc4, val | (1 << 15));
}
if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
@@ -8468,15 +8542,15 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_mdio_start(tp);
if (tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
val = tr32(0x7c00);
tw32(0x7c00, val | (1 << 25));
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720) {
val = tr32(TG3_CPMU_CLCK_ORIDE);
tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
}
@@ -8687,7 +8761,8 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
- else if (tg3_flag(tp, 57765_CLASS))
+ else if (tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8703,7 +8778,8 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
@@ -8809,12 +8885,12 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787)
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
else
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
@@ -8994,9 +9070,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Enable MAC control of LPI */
if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
- tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
- TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
- TG3_CPMU_EEE_LNKIDL_UART_IDL);
+ val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+ TG3_CPMU_EEE_LNKIDL_UART_IDL;
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
+ val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
+
+ tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
tw32_f(TG3_CPMU_EEE_CTRL,
TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
@@ -9006,7 +9085,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_CPMU_EEEMD_LPI_IN_RX |
TG3_CPMU_EEEMD_EEE_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ if (tg3_asic_rev(tp) != ASIC_REV_5717)
val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
if (tg3_flag(tp, ENABLE_APE))
@@ -9032,7 +9111,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_legacy(tp, RESET_KIND_INIT);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
val = tr32(TG3_CPMU_CTRL);
val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
tw32(TG3_CPMU_CTRL, val);
@@ -9053,7 +9132,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_CPMU_HST_ACC, val);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
PCIE_PWR_MGMT_L1_THRESH_4MS;
@@ -9083,7 +9162,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_CLASS)) {
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of PL PCIE block registers. */
@@ -9098,8 +9177,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
- u32 grc_mode = tr32(GRC_MODE);
+ if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
+ u32 grc_mode;
+
+ /* Fix transmit hangs */
+ val = tr32(TG3_CPMU_PADRNG_CTL);
+ val |= TG3_CPMU_PADRNG_CTL_RDIV2;
+ tw32(TG3_CPMU_PADRNG_CTL, val);
+
+ grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of DL PCIE block registers. */
val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
@@ -9131,7 +9217,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
tg3_flag(tp, PCIX_MODE)) {
val = tr32(TG3PCI_PCISTATE);
val |= PCISTATE_RETRY_SAME_DMA;
@@ -9149,7 +9235,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3PCI_PCISTATE, val);
}
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
/* Enable some hw fixes. */
val = tr32(TG3PCI_MSI_DATA);
val |= (1 << 26) | (1 << 28) | (1 << 29);
@@ -9168,14 +9254,15 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 57765_PLUS)) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
- if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
if (!tg3_flag(tp, 57765_CLASS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5762)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
+ tg3_asic_rev(tp) != ASIC_REV_5761) {
/* This value is determined during the probe time DMA
* engine test, tg3_test_dma.
*/
@@ -9215,9 +9302,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Initialize MBUF/DESC pool. */
if (tg3_flag(tp, 5750_PLUS)) {
/* Do nothing. */
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
else
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
@@ -9255,11 +9342,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->bufmgr_config.dma_high_water);
val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
tw32(BUFMGR_MODE, val);
for (i = 0; i < 2000; i++) {
@@ -9272,7 +9359,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return -ENODEV;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
tg3_setup_rxbd_thresholds(tp);
@@ -9310,7 +9397,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
(tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -9323,7 +9410,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
- tg3_flag(tp, 57765_CLASS))
+ tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -9365,7 +9453,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
(6 << TX_LENGTHS_IPG_SHIFT) |
(32 << TX_LENGTHS_SLOT_TIME_SHIFT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val |= tr32(MAC_TX_LENGTHS) &
(TX_LENGTHS_JMB_FRM_LEN_MSK |
TX_LENGTHS_CNT_DWN_VAL_MSK);
@@ -9385,20 +9474,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9409,26 +9498,43 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ tp->dma_limit = 0;
+ if (tp->dev->mtu <= ETH_DATA_LEN) {
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
+ }
+ }
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
if (tg3_flag(tp, 57765_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS)) {
- val = tr32(TG3_RDMA_RSRVCTRL_REG);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ u32 tgtreg;
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tgtreg = TG3_RDMA_RSRVCTRL_REG2;
+ else
+ tgtreg = TG3_RDMA_RSRVCTRL_REG;
+
+ val = tr32(tgtreg);
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -9436,14 +9542,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
}
- tw32(TG3_RDMA_RSRVCTRL_REG,
- val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+ tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
- val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
- tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
+ u32 tgtreg;
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
+ else
+ tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
+
+ val = tr32(tgtreg);
+ tw32(tgtreg, val |
TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
}
@@ -9520,7 +9633,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
if (!tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
udelay(40);
@@ -9538,11 +9651,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
GRC_LCLCTRL_GPIO_OUTPUT3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5755)
gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
tp->grc_local_ctrl &= ~gpio_mask;
@@ -9577,11 +9690,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
WDMAC_MODE_LNGREAD_ENAB);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
if (tg3_flag(tp, TSO_CAPABLE) &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
/* nothing */
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -9593,7 +9706,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, 5755_PLUS))
val |= WDMAC_MODE_STATUS_TAG_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
val |= WDMAC_MODE_BURST_ALL_DATA;
tw32_f(WDMAC_MODE, val);
@@ -9604,10 +9717,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
&pcix_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703) {
pcix_cmd &= ~PCI_X_CMD_MAX_READ;
pcix_cmd |= PCI_X_CMD_READ_2K;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
pcix_cmd |= PCI_X_CMD_READ_2K;
}
@@ -9618,7 +9731,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(RDMAC_MODE, rdmac_mode);
udelay(40);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5719) {
for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
break;
@@ -9635,7 +9748,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (!tg3_flag(tp, 5705_PLUS))
tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
tw32(SNDDATAC_MODE,
SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
else
@@ -9658,7 +9771,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(SNDBDI_MODE, val);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
err = tg3_load_5701_a0_firmware_fix(tp);
if (err)
return err;
@@ -9673,10 +9786,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->tx_mode = TX_MODE_ENABLE;
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762) {
val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
tp->tx_mode &= ~val;
tp->tx_mode |= tr32(MAC_TX_MODE) & val;
@@ -9727,8 +9841,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
- !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+ if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
+ !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
/* Set drive transmission level to 1.2V */
/* only if the signal pre-emphasis bit is not set */
val = tr32(MAC_SERDES_CFG);
@@ -9736,7 +9850,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val |= 0x880;
tw32(MAC_SERDES_CFG, val);
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
tw32(MAC_SERDES_CFG, 0x616000);
}
@@ -9749,14 +9863,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val = 2;
tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
/* Use hardware link auto-negotiation */
tg3_flag_set(tp, HW_AUTONEG);
}
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ tg3_asic_rev(tp) == ASIC_REV_5714) {
u32 tmp;
tmp = tr32(SERDES_RX_CTRL);
@@ -10010,9 +10124,9 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
} else {
u32 val = tr32(HOSTCC_FLOW_ATTN);
@@ -10060,10 +10174,15 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ /* BCM4785: Flush posted writes from GbE to host memory. */
+ tr32(HOSTCC_MODE);
+ }
+
if (!tg3_flag(tp, TAGGED_STATUS)) {
/* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip
@@ -10181,7 +10300,7 @@ restart_timer:
static void tg3_timer_init(struct tg3 *tp)
{
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ tg3_asic_rev(tp) != ASIC_REV_5717 &&
!tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
@@ -10762,7 +10881,7 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -10832,8 +10951,8 @@ static u64 tg3_calc_crc_errors(struct tg3 *tp)
struct tg3_hw_stats *hw_stats = tp->hw_stats;
if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)) {
u32 val;
if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
@@ -12357,11 +12476,12 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
- else if (tg3_flag(tp, 57765_CLASS))
+ else if (tg3_flag(tp, 57765_CLASS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
else if (tg3_flag(tp, 5705_PLUS))
mem_tbl = mem_tbl_5705;
@@ -12473,7 +12593,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
} else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
else if (tg3_flag(tp, HW_TSO_1) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ tg3_asic_rev(tp) == ASIC_REV_5705) {
mss |= (TG3_TSO_TCP_OPT_LEN << 9);
} else {
base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
@@ -12659,7 +12779,7 @@ static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
* errata. Also, the MAC loopback test is deprecated for
* all newer ASIC revisions.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT)) {
tg3_mac_loopback(tp, true);
@@ -12937,7 +13057,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+ err = __tg3_readphy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &mii_regval);
spin_unlock_bh(&tp->lock);
data->val_out = mii_regval;
@@ -12953,7 +13074,8 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EAGAIN;
spin_lock_bh(&tp->lock);
- err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+ err = __tg3_writephy(tp, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
spin_unlock_bh(&tp->lock);
return err;
@@ -13144,7 +13266,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
reset_phy = 1;
err = tg3_restart_hw(tp, reset_phy);
@@ -13257,7 +13379,7 @@ static void tg3_get_nvram_info(struct tg3 *tp)
tw32(NVRAM_CFG1, nvcfg1);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
tg3_flag(tp, 5780_CLASS)) {
switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
@@ -13698,6 +13820,22 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
nvcfg1 = tr32(NVRAM_CFG1);
nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ switch (nvmpinstrp) {
+ case FLASH_5762_EEPROM_HD:
+ nvmpinstrp = FLASH_5720_EEPROM_HD;
+ break;
+ case FLASH_5762_EEPROM_LD:
+ nvmpinstrp = FLASH_5720_EEPROM_LD;
+ break;
+ }
+ }
+
switch (nvmpinstrp) {
case FLASH_5720_EEPROM_HD:
case FLASH_5720_EEPROM_LD:
@@ -13743,7 +13881,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13789,7 +13928,8 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tp->nvram_size = TG3_NVRAM_SIZE_1MB;
break;
default:
- tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
break;
}
break;
@@ -13801,11 +13941,30 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp)
tg3_nvram_get_pagesize(tp, nvcfg1);
if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ u32 val;
+
+ if (tg3_nvram_read(tp, 0, &val))
+ return;
+
+ if (val != TG3_EEPROM_MAGIC &&
+ (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
+ tg3_flag_set(tp, NO_NVRAM);
+ }
}
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
static void tg3_nvram_init(struct tg3 *tp)
{
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
+ tg3_flag_clear(tp, NVRAM);
+ tg3_flag_clear(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
tw32_f(GRC_EEPROM_ADDR,
(EEPROM_ADDR_FSM_RESET |
(EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -13818,8 +13977,8 @@ static void tg3_nvram_init(struct tg3 *tp)
tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
udelay(100);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701) {
tg3_flag_set(tp, NVRAM);
if (tg3_nvram_lock(tp)) {
@@ -13832,25 +13991,26 @@ static void tg3_nvram_init(struct tg3 *tp)
tp->nvram_size = 0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ if (tg3_asic_rev(tp) == ASIC_REV_5752)
tg3_get_5752_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5755)
tg3_get_5755_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_get_5787_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5761)
tg3_get_5761_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719)
tg3_get_5717_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_get_5720_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -13963,7 +14123,7 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_flag_set(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, WOL_CAP);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
tg3_flag_clear(tp, EEPROM_WRITE_PROT);
tg3_flag_set(tp, IS_NIC);
@@ -13990,13 +14150,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
ver >>= NIC_SRAM_DATA_VER_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_asic_rev(tp) != ASIC_REV_5703 &&
(ver > 0) && (ver < 0x100))
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785)
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
@@ -14044,18 +14204,16 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
/* Default to PHY_1_MODE if 0 (MAC_MODE) is
* read on some older 5700/5701 bootcode.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5701)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
break;
case SHASTA_EXT_LED_SHARED:
tp->led_ctrl = LED_CTRL_MODE_SHARED;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
@@ -14066,19 +14224,19 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
case SHASTA_EXT_LED_COMBO:
tp->led_ctrl = LED_CTRL_MODE_COMBO;
- if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
LED_CTRL_MODE_PHY_2);
break;
}
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+ if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) &&
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
tp->led_ctrl = LED_CTRL_MODE_PHY_2;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
@@ -14122,13 +14280,13 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
if ((tg3_flag(tp, 57765_PLUS) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
if (tg3_flag(tp, PCI_EXPRESS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
!tg3_flag(tp, 57765_PLUS)) {
u32 cfg3;
@@ -14152,6 +14310,39 @@ done:
device_set_wakeup_capable(&tp->pdev->dev, false);
}
+static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+ int i, err;
+ u32 val2, off = offset * 8;
+
+ err = tg3_nvram_lock(tp);
+ if (err)
+ return err;
+
+ tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
+ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
+ APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
+ tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
+ udelay(10);
+
+ for (i = 0; i < 100; i++) {
+ val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
+ if (val2 & APE_OTP_STATUS_CMD_DONE) {
+ *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
+ break;
+ }
+ udelay(10);
+ }
+
+ tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
+
+ tg3_nvram_unlock(tp);
+ if (val2 & APE_OTP_STATUS_CMD_DONE)
+ return 0;
+
+ return -EBUSY;
+}
+
static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
{
int i;
@@ -14298,10 +14489,19 @@ static int tg3_phy_probe(struct tg3 *tp)
* subsys device table.
*/
p = tg3_lookup_by_subsys(tp);
- if (!p)
+ if (p) {
+ tp->phy_id = p->phy_id;
+ } else if (!tg3_flag(tp, IS_SSB_CORE)) {
+ /* For now we saw the IDs 0xbc050cd0,
+ * 0xbc050f80 and 0xbc050c30 on devices
+ * connected to an BCM4785 and there are
+ * probably more. Just assume that the phy is
+ * supported when it is connected to a SSB core
+ * for now.
+ */
return -ENODEV;
+ }
- tp->phy_id = p->phy_id;
if (!tp->phy_id ||
tp->phy_id == TG3_PHY_ID_BCM8002)
tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
@@ -14309,12 +14509,13 @@ static int tg3_phy_probe(struct tg3 *tp)
}
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
- (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
- tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+ (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5717 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
+ (tg3_asic_rev(tp) == ASIC_REV_57765 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
tg3_phy_init_link_config(tp);
@@ -14424,7 +14625,7 @@ out_not_found:
return;
out_no_vpd:
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
strcpy(tp->board_part_number, "BCM5717");
@@ -14432,7 +14633,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM5718");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
strcpy(tp->board_part_number, "BCM57780");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
@@ -14443,7 +14644,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57788");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
strcpy(tp->board_part_number, "BCM57761");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
@@ -14458,7 +14659,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
strcpy(tp->board_part_number, "BCM57762");
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
@@ -14469,7 +14670,7 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57786");
else
goto nomatch;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
nomatch:
@@ -14691,6 +14892,8 @@ static void tg3_read_dash_ver(struct tg3 *tp)
if (tg3_flag(tp, APE_HAS_NCSI))
fwtype = "NCSI";
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
+ fwtype = "SMASH";
else
fwtype = "DASH";
@@ -14704,6 +14907,31 @@ static void tg3_read_dash_ver(struct tg3 *tp)
(apedata & APE_FW_VERSION_BLDMSK));
}
+static void tg3_read_otp_ver(struct tg3 *tp)
+{
+ u32 val, val2;
+
+ if (tg3_asic_rev(tp) != ASIC_REV_5762)
+ return;
+
+ if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
+ !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
+ TG3_OTP_MAGIC0_VALID(val)) {
+ u64 val64 = (u64) val << 32 | val2;
+ u32 ver = 0;
+ int i, vlen;
+
+ for (i = 0; i < 7; i++) {
+ if ((val64 & 0xff) == 0)
+ break;
+ ver = val64 & 0xff;
+ val64 >>= 8;
+ }
+ vlen = strlen(tp->fw_ver);
+ snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
+ }
+}
+
static void tg3_read_fw_ver(struct tg3 *tp)
{
u32 val;
@@ -14714,6 +14942,7 @@ static void tg3_read_fw_ver(struct tg3 *tp)
if (tg3_flag(tp, NO_NVRAM)) {
strcat(tp->fw_ver, "sb");
+ tg3_read_otp_ver(tp);
return;
}
@@ -14788,7 +15017,7 @@ static struct pci_dev *tg3_find_peer(struct tg3 *tp)
static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
{
tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+ if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
u32 reg;
/* All devices that use the alternate
@@ -14800,7 +15029,10 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
reg = TG3PCI_GEN2_PRODID_ASICREV;
else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -14822,46 +15054,47 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
/* Wrong chip ID in 5752 A0. This code can be removed later
* as A0 is not in production.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tg3_flag_set(tp, 5717_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766)
tg3_flag_set(tp, 57765_CLASS);
- if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, 5755_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tg3_flag_set(tp, 5780_CLASS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5906 ||
tg3_flag(tp, 5755_PLUS) ||
tg3_flag(tp, 5780_CLASS))
tg3_flag_set(tp, 5750_PLUS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
tg3_flag(tp, 5750_PLUS))
tg3_flag_set(tp, 5705_PLUS);
}
@@ -14871,13 +15104,13 @@ static bool tg3_10_100_only_device(struct tg3 *tp,
{
u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
- (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+ if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
+ (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET))
return true;
if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5705) {
if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
return true;
} else {
@@ -14938,8 +15171,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* enable this workaround if the 5703 is on the secondary
* bus of these ICH bridges.
*/
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
- (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
+ (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -14979,7 +15212,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
}
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5701) {
static struct tg3_dev_id {
u32 vendor;
u32 device;
@@ -15039,29 +15272,29 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} while (bridge);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
+ tg3_asic_rev(tp) == ASIC_REV_5714)
tp->pdev_peer = tg3_find_peer(tp);
/* Determine TSO capabilities */
- if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
else if (tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, HW_TSO_3);
else if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, HW_TSO_2);
else if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, HW_TSO_1);
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
- tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
+ tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
tg3_flag_clear(tp, TSO_BUG);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+ } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
tg3_flag_set(tp, TSO_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
tp->fw_needed = FIRMWARE_TG3TSO;
@@ -15083,22 +15316,22 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->fw_needed = NULL;
}
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSI);
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
- tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+ if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5750_BX ||
+ (tg3_asic_rev(tp) == ASIC_REV_5714 &&
+ tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
tp->pdev_peer == tp->pdev))
tg3_flag_clear(tp, SUPPORT_MSI);
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_set(tp, 1SHOT_MSI);
}
@@ -15114,25 +15347,26 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rxq_max = TG3_RSS_MAX_NUM_QS;
tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
tp->txq_max = tp->irq_max - 1;
}
if (tg3_flag(tp, 5755_PLUS) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ tg3_asic_rev(tp) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719)
tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
- tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+ tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
tg3_flag_set(tp, USE_JUMBO_BDFLAG);
if (!tg3_flag(tp, 5705_PLUS) ||
@@ -15150,20 +15384,19 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
- ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tg3_flag_clear(tp, HW_TSO_2);
tg3_flag_clear(tp, TSO_CAPABLE);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
tg3_flag_set(tp, CLKREQ_BUG);
- } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
tg3_flag_set(tp, L1PLLPD_EN);
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
/* BCM5785 devices are effectively PCIe devices, and should
* follow PCIe codepaths, but do not have a PCIe capabilities
* section.
@@ -15196,7 +15429,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&tp->pci_cacheline_sz);
pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
&tp->pci_lat_timer);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
tp->pci_lat_timer < 64) {
tp->pci_lat_timer = 64;
pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
@@ -15206,7 +15439,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Important! -- It is critical that the PCI-X hw workaround
* situation is decided before the first MMIO register access.
*/
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+ if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
/* 5700 BX chips need to have their TX producer index
* mailboxes written twice to workaround a bug.
*/
@@ -15248,7 +15481,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, PCI_32BIT);
/* Chip-specific fixup from Broadcom driver */
- if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+ if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
(!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
@@ -15265,9 +15498,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Various workaround register access methods */
if (tg3_flag(tp, PCIX_TARGET_HWBUG))
tp->write32 = tg3_write_indirect_reg32;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
(tg3_flag(tp, PCI_EXPRESS) &&
- tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
/*
* Back to back register writes can cause problems on these
* chips, the workaround is to read back all reg writes
@@ -15299,7 +15532,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pci_cmd &= ~PCI_COMMAND_MEMORY;
pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->read32_mbox = tg3_read32_mbox_5906;
tp->write32_mbox = tg3_write32_mbox_5906;
tp->write32_tx_mbox = tg3_write32_mbox_5906;
@@ -15308,8 +15541,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->write32 == tg3_write_indirect_reg32 ||
(tg3_flag(tp, PCIX_MODE) &&
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+ (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701)))
tg3_flag_set(tp, SRAM_USE_CONFIG);
/* The memory arbiter has to be enabled in order for SRAM accesses
@@ -15321,7 +15554,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tg3_flag(tp, PCIX_MODE)) {
pci_read_config_dword(tp->pdev,
@@ -15329,21 +15562,23 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&val);
tp->pci_fn = val & 0x7;
}
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720) {
tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
- if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
- NIC_SRAM_CPMUSTAT_SIG) {
- tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
- tp->pci_fn = tp->pci_fn ? 1 : 0;
- }
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
- tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
- if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
- NIC_SRAM_CPMUSTAT_SIG) {
+ if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
+ val = tr32(TG3_CPMU_STATUS);
+
+ if (tg3_asic_rev(tp) == ASIC_REV_5717)
+ tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
+ else
tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
TG3_CPMU_STATUS_FSHFT_5719;
- }
+ }
+
+ if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
+ tp->write32_tx_mbox = tg3_write_flush_reg32;
+ tp->write32_rx_mbox = tg3_write_flush_reg32;
}
/* Get eeprom hw config before calling tg3_set_power_state().
@@ -15381,18 +15616,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* It is also used as eeprom write protect on LOMs.
*/
tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
tg3_flag(tp, EEPROM_WRITE_PROT))
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
GRC_LCLCTRL_GPIO_OUTPUT1);
/* Unused GPIO3 must be driven as output on 5752 because there
* are no pull-up resistors on unused GPIO pins.
*/
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ else if (tg3_asic_rev(tp) == ASIC_REV_5752)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780 ||
tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
@@ -15406,6 +15641,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
GRC_LCLCTRL_GPIO_OUTPUT0;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->grc_local_ctrl |=
+ tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
+
/* Switch out of Vaux if it is a NIC */
tg3_pwrsrc_switch_to_vmain(tp);
@@ -15416,42 +15655,42 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tg3_flag_set(tp, JUMBO_RING_ENABLE);
/* Determine WakeOnLan speed to use. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
tg3_flag_clear(tp, WOL_SPEED_100MB);
} else {
tg3_flag_set(tp, WOL_SPEED_100MB);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ if (tg3_asic_rev(tp) == ASIC_REV_5906)
tp->phy_flags |= TG3_PHYFLG_IS_FET;
/* A few boards don't want Ethernet@WireSpeed phy feature */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
- (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5705 &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
+ (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
(tp->phy_flags & TG3_PHYFLG_IS_FET) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
- GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+ if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
+ tg3_chip_rev(tp) == CHIPREV_5704_AX)
tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
if (tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+ tg3_asic_rev(tp) != ASIC_REV_5785 &&
+ tg3_asic_rev(tp) != ASIC_REV_57780 &&
!tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
+ tg3_asic_rev(tp) == ASIC_REV_5787 ||
+ tg3_asic_rev(tp) == ASIC_REV_5784 ||
+ tg3_asic_rev(tp) == ASIC_REV_5761) {
if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
@@ -15461,8 +15700,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->phy_flags |= TG3_PHYFLG_BER_BUG;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) {
tp->phy_otp = tg3_read_otp_phycfg(tp);
if (tp->phy_otp == 0)
tp->phy_otp = TG3_OTP_DEFAULT;
@@ -15474,20 +15713,20 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->mi_mode = MAC_MI_MODE_BASE;
tp->coalesce_mode = 0;
- if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+ if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
+ tg3_chip_rev(tp) != CHIPREV_5700_BX)
tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
/* Set these bits to enable statistics workaround. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
tp->coalesce_mode |= HOSTCC_MODE_ATTN;
tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
tg3_flag_set(tp, USE_PHYLIB);
err = tg3_mdio_init(tp);
@@ -15496,7 +15735,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
GRC_MODE_WORD_SWAP_B2HRX_DATA |
GRC_MODE_B2HRX_ENABLE |
@@ -15516,12 +15756,10 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
!tg3_flag(tp, PCIX_TARGET_HWBUG)) {
- u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
-
- if (chiprevid == CHIPREV_ID_5701_A0 ||
- chiprevid == CHIPREV_ID_5701_B0 ||
- chiprevid == CHIPREV_ID_5701_B2 ||
- chiprevid == CHIPREV_ID_5701_B5) {
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
+ tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
void __iomem *sram_base;
/* Write some dummy words into the SRAM status block
@@ -15544,13 +15782,13 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
(grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
tg3_flag_set(tp, IS_5788);
if (!tg3_flag(tp, IS_5788) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tg3_asic_rev(tp) != ASIC_REV_5700)
tg3_flag_set(tp, TAGGED_STATUS);
if (tg3_flag(tp, TAGGED_STATUS)) {
tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
@@ -15583,7 +15821,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
else
tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
@@ -15593,7 +15831,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* change bit implementation, so we must use the
* status register in those cases.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+ if (tg3_asic_rev(tp) == ASIC_REV_5700)
tg3_flag_set(tp, USE_LINKCHG_REG);
else
tg3_flag_clear(tp, USE_LINKCHG_REG);
@@ -15603,7 +15841,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* upon subsystem IDs.
*/
if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ tg3_asic_rev(tp) == ASIC_REV_5701 &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
tg3_flag_set(tp, USE_LINKCHG_REG);
@@ -15617,7 +15855,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -15634,9 +15872,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Increment the rx prod index on the rx std ring by at most
* 8 for these chips to workaround hw errata.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
+ tg3_asic_rev(tp) == ASIC_REV_5752 ||
+ tg3_asic_rev(tp) == ASIC_REV_5755)
tp->rx_std_max_post = 8;
if (tg3_flag(tp, ASPM_WORKAROUND))
@@ -15658,7 +15896,6 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
addr = of_get_property(dp, "local-mac-address", &len);
if (addr && len == 6) {
memcpy(dev->dev_addr, addr, 6);
- memcpy(dev->perm_addr, dev->dev_addr, 6);
return 0;
}
return -ENODEV;
@@ -15669,7 +15906,6 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
struct net_device *dev = tp->dev;
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
- memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
return 0;
}
#endif
@@ -15679,14 +15915,21 @@ static int tg3_get_device_address(struct tg3 *tp)
struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
+ int err;
#ifdef CONFIG_SPARC
if (!tg3_get_macaddr_sparc(tp))
return 0;
#endif
+ if (tg3_flag(tp, IS_SSB_CORE)) {
+ err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
+ if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ return 0;
+ }
+
mac_offset = 0x7c;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+ if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
tg3_flag(tp, 5780_CLASS)) {
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
mac_offset = 0xcc;
@@ -15699,7 +15942,7 @@ static int tg3_get_device_address(struct tg3 *tp)
mac_offset = 0xcc;
if (tp->pci_fn > 1)
mac_offset += 0x18c;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
mac_offset = 0x10;
/* First try to get it from MAC address mailbox. */
@@ -15746,7 +15989,6 @@ static int tg3_get_device_address(struct tg3 *tp)
#endif
return -EINVAL;
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return 0;
}
@@ -15768,8 +16010,8 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
/* On 5703 and later chips, the boundary bits have no
* effect.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701 &&
!tg3_flag(tp, PCI_EXPRESS))
goto out;
@@ -16007,14 +16249,14 @@ static int tg3_test_dma(struct tg3 *tp)
/* DMA read watermark not used on PCIE */
tp->dma_rwctrl |= 0x00180000;
} else if (!tg3_flag(tp, PCIX_MODE)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+ if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
+ tg3_asic_rev(tp) == ASIC_REV_5750)
tp->dma_rwctrl |= 0x003f0000;
else
tp->dma_rwctrl |= 0x003f000f;
} else {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704) {
u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
u32 read_water = 0x7;
@@ -16023,35 +16265,37 @@ static int tg3_test_dma(struct tg3 *tp)
* better performance.
*/
if (tg3_flag(tp, 40BIT_DMA_BUG) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl |= 0x8000;
else if (ccval == 0x6 || ccval == 0x7)
tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703)
read_water = 4;
/* Set bit 23 to enable PCIX hw bug fix */
tp->dma_rwctrl |=
(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
(1 << 23);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
/* 5780 always in PCIX mode */
tp->dma_rwctrl |= 0x00144000;
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+ } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
/* 5714 always in PCIX mode */
tp->dma_rwctrl |= 0x00148000;
} else {
tp->dma_rwctrl |= 0x001b000f;
}
}
+ if (tg3_flag(tp, ONE_DMA_AT_ONCE))
+ tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+ if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
+ tg3_asic_rev(tp) == ASIC_REV_5704)
tp->dma_rwctrl &= 0xfffffff0;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
+ tg3_asic_rev(tp) == ASIC_REV_5701) {
/* Remove this if it causes problems for some boards. */
tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
@@ -16075,8 +16319,8 @@ static int tg3_test_dma(struct tg3 *tp)
tg3_switch_clocks(tp);
#endif
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+ if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
+ tg3_asic_rev(tp) != ASIC_REV_5701)
goto out;
/* It is best to perform DMA test with maximum write burst size
@@ -16195,7 +16439,7 @@ static void tg3_init_bufmgr_config(struct tg3 *tp)
DEFAULT_MB_MACRX_LOW_WATER_5705;
tp->bufmgr_config.mbuf_high_water =
DEFAULT_MB_HIGH_WATER_5705;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ if (tg3_asic_rev(tp) == ASIC_REV_5906) {
tp->bufmgr_config.mbuf_mac_rx_low_water =
DEFAULT_MB_MACRX_LOW_WATER_5906;
tp->bufmgr_config.mbuf_high_water =
@@ -16253,6 +16497,7 @@ static char *tg3_phy_string(struct tg3 *tp)
case TG3_PHY_ID_BCM57765: return "57765";
case TG3_PHY_ID_BCM5719C: return "5719C";
case TG3_PHY_ID_BCM5720C: return "5720C";
+ case TG3_PHY_ID_BCM5762: return "5762C";
case TG3_PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
@@ -16389,6 +16634,18 @@ static int tg3_init_one(struct pci_dev *pdev,
else
tp->msg_enable = TG3_DEF_MSG_ENABLE;
+ if (pdev_is_ssb_gige_core(pdev)) {
+ tg3_flag_set(tp, IS_SSB_CORE);
+ if (ssb_gige_must_flush_posted_writes(pdev))
+ tg3_flag_set(tp, FLUSH_POSTED_WRITES);
+ if (ssb_gige_one_dma_at_once(pdev))
+ tg3_flag_set(tp, ONE_DMA_AT_ONCE);
+ if (ssb_gige_have_roboswitch(pdev))
+ tg3_flag_set(tp, ROBOSWITCH);
+ if (ssb_gige_is_rgmii(pdev))
+ tg3_flag_set(tp, RGMII_MODE);
+ }
+
/* The word/byte swap controls here control register access byte
* swapping. DMA data byte swapping is controlled in the GRC_MODE
* setting below.
@@ -16429,7 +16686,10 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
tg3_flag_set(tp, ENABLE_APE);
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
@@ -16501,7 +16761,7 @@ static int tg3_init_one(struct pci_dev *pdev,
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
- if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+ if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (tg3_flag(tp, 5755_PLUS))
@@ -16521,11 +16781,11 @@ static int tg3_init_one(struct pci_dev *pdev,
if (features & NETIF_F_IPV6_CSUM)
features |= NETIF_F_TSO6;
if (tg3_flag(tp, HW_TSO_3) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ tg3_asic_rev(tp) == ASIC_REV_5761 ||
+ (tg3_asic_rev(tp) == ASIC_REV_5784 &&
+ tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
+ tg3_asic_rev(tp) == ASIC_REV_5785 ||
+ tg3_asic_rev(tp) == ASIC_REV_57780)
features |= NETIF_F_TSO_ECN;
}
@@ -16537,14 +16797,14 @@ static int tg3_init_one(struct pci_dev *pdev,
* MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
* loopback for the remaining devices.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
!tg3_flag(tp, CPMU_PRESENT))
/* Add the loopback capability */
features |= NETIF_F_LOOPBACK;
dev->hw_features |= features;
- if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+ if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
!tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
tg3_flag_set(tp, MAX_RXPEND_64);
@@ -16623,8 +16883,9 @@ static int tg3_init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
if (tg3_flag(tp, 5717_PLUS)) {
@@ -16634,6 +16895,8 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_timer_init(tp);
+ tg3_carrier_off(tp);
+
err = register_netdev(dev);
if (err) {
dev_err(&pdev->dev, "Cannot register net device, aborting\n");
@@ -16642,7 +16905,7 @@ static int tg3_init_one(struct pci_dev *pdev,
netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
tp->board_part_number,
- tp->pci_chip_rev_id,
+ tg3_chip_rev_id(tp),
tg3_bus_string(tp, str),
dev->dev_addr);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index d330e81f579..8d7d4c2ab5d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2012 Broadcom Corporation.
+ * Copyright (C) 2007-2013 Broadcom Corporation.
*/
#ifndef _T3_H
@@ -65,6 +65,9 @@
#define TG3PCI_DEVICE_TIGON3_57766 0x1686
#define TG3PCI_DEVICE_TIGON3_57786 0x16b3
#define TG3PCI_DEVICE_TIGON3_57782 0x16b7
+#define TG3PCI_DEVICE_TIGON3_5762 0x1687
+#define TG3PCI_DEVICE_TIGON3_5725 0x1643
+#define TG3PCI_DEVICE_TIGON3_5727 0x16f3
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -117,9 +120,7 @@
#define MISC_HOST_CTRL_TAGGED_STATUS 0x00000200
#define MISC_HOST_CTRL_CHIPREV 0xffff0000
#define MISC_HOST_CTRL_CHIPREV_SHIFT 16
-#define GET_CHIP_REV_ID(MISC_HOST_CTRL) \
- (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
- MISC_HOST_CTRL_CHIPREV_SHIFT)
+
#define CHIPREV_ID_5700_A0 0x7000
#define CHIPREV_ID_5700_A1 0x7001
#define CHIPREV_ID_5700_B0 0x7100
@@ -159,7 +160,8 @@
#define CHIPREV_ID_57765_A0 0x57785000
#define CHIPREV_ID_5719_A0 0x05719000
#define CHIPREV_ID_5720_A0 0x05720000
-#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
+#define CHIPREV_ID_5762_A0 0x05762000
+
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
#define ASIC_REV_5703 0x01
@@ -182,7 +184,7 @@
#define ASIC_REV_5719 0x5719
#define ASIC_REV_5720 0x5720
#define ASIC_REV_57766 0x57766
-#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
+#define ASIC_REV_5762 0x5762
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
#define CHIPREV_5700_CX 0x72
@@ -195,7 +197,6 @@
#define CHIPREV_5784_AX 0x57840
#define CHIPREV_5761_AX 0x57610
#define CHIPREV_57765_AX 0x577650
-#define GET_METAL_REV(CHIP_REV_ID) ((CHIP_REV_ID) & 0xff)
#define METAL_REV_A0 0x00
#define METAL_REV_A1 0x01
#define METAL_REV_B0 0x00
@@ -774,7 +775,7 @@
#define SG_DIG_AUTONEG_ERROR 0x00000001
#define TG3_TX_TSTAMP_LSB 0x000005c0
#define TG3_TX_TSTAMP_MSB 0x000005c4
-#define TG3_TSTAMP_MASK 0x7fffffffffffffff
+#define TG3_TSTAMP_MASK 0x7fffffffffffffffLL
/* 0x5c8 --> 0x600 unused */
#define MAC_TX_MAC_STATE_BASE 0x00000600 /* 16 bytes */
#define MAC_RX_MAC_STATE_BASE 0x00000610 /* 20 bytes */
@@ -1159,6 +1160,8 @@
#define CPMU_MUTEX_GNT_DRIVER 0x00001000
#define TG3_CPMU_PHY_STRAP 0x00003664
#define TG3_CPMU_PHY_STRAP_IS_SERDES 0x00000020
+#define TG3_CPMU_PADRNG_CTL 0x00003668
+#define TG3_CPMU_PADRNG_CTL_RDIV2 0x00040000
/* 0x3664 --> 0x36b0 unused */
#define TG3_CPMU_EEE_MODE 0x000036b0
@@ -1178,6 +1181,7 @@
#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
+#define TG3_CPMU_EEE_LNKIDL_APE_TX_MT 0x00000002
/* 0x36c0 --> 0x36d0 unused */
#define TG3_CPMU_EEE_CTRL 0x000036d0
@@ -1400,7 +1404,10 @@
#define RDMAC_STATUS_FIFOURUN 0x00000080
#define RDMAC_STATUS_FIFOOREAD 0x00000100
#define RDMAC_STATUS_LNGREAD 0x00000200
-/* 0x4808 --> 0x4900 unused */
+/* 0x4808 --> 0x4890 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG2 0x00004890
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL2 0x000048a0
#define TG3_RDMA_RSRVCTRL_REG 0x00004900
#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
@@ -1850,6 +1857,7 @@
#define FLASH_VENDOR_SST_SMALL 0x00000001
#define FLASH_VENDOR_SST_LARGE 0x02000001
#define NVRAM_CFG1_5752VENDOR_MASK 0x03c00003
+#define NVRAM_CFG1_5762VENDOR_MASK 0x03e00003
#define FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ 0x00000000
#define FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ 0x02000000
#define FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED 0x02000003
@@ -1910,6 +1918,8 @@
#define FLASH_5717VENDOR_ST_45USPT 0x03400001
#define FLASH_5720_EEPROM_HD 0x00000001
#define FLASH_5720_EEPROM_LD 0x00000003
+#define FLASH_5762_EEPROM_HD 0x02000001
+#define FLASH_5762_EEPROM_LD 0x02000003
#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
@@ -2365,6 +2375,20 @@
#define APE_LOCK_REQ_DRIVER 0x00001000
#define TG3_APE_LOCK_GRANT 0x004c
#define APE_LOCK_GRANT_DRIVER 0x00001000
+#define TG3_APE_OTP_CTRL 0x00e8
+#define APE_OTP_CTRL_PROG_EN 0x200000
+#define APE_OTP_CTRL_CMD_RD 0x000000
+#define APE_OTP_CTRL_START 0x000001
+#define TG3_APE_OTP_STATUS 0x00ec
+#define APE_OTP_STATUS_CMD_DONE 0x000001
+#define TG3_APE_OTP_ADDR 0x00f0
+#define APE_OTP_ADDR_CPU_ENABLE 0x80000000
+#define TG3_APE_OTP_RD_DATA 0x00f8
+
+#define OTP_ADDRESS_MAGIC0 0x00000050
+#define TG3_OTP_MAGIC0_VALID(val) \
+ ((((val) & 0xf0000000) == 0xa0000000) ||\
+ (((val) & 0x0f000000) == 0x0a000000))
/* APE shared memory. Accessible through BAR1 */
#define TG3_APE_SHMEM_BASE 0x4000
@@ -3030,6 +3054,11 @@ enum TG3_FLAGS {
TG3_FLAG_57765_PLUS,
TG3_FLAG_57765_CLASS,
TG3_FLAG_5717_PLUS,
+ TG3_FLAG_IS_SSB_CORE,
+ TG3_FLAG_FLUSH_POSTED_WRITES,
+ TG3_FLAG_ROBOSWITCH,
+ TG3_FLAG_ONE_DMA_AT_ONCE,
+ TG3_FLAG_RGMII_MODE,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -3206,6 +3235,7 @@ struct tg3 {
#define TG3_PHY_ID_BCM57765 0x5c0d8a40
#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
#define TG3_PHY_ID_BCM5720C 0x5c0d8b60
+#define TG3_PHY_ID_BCM5762 0x85803780
#define TG3_PHY_ID_BCM5906 0xdc00ac40
#define TG3_PHY_ID_BCM8002 0x60010140
#define TG3_PHY_ID_INVALID 0xffffffff
@@ -3230,6 +3260,7 @@ struct tg3 {
(X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
(X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
(X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+ (X) == TG3_PHY_ID_BCM5720C || (X) == TG3_PHY_ID_BCM5762 || \
(X) == TG3_PHY_ID_BCM8002)
u32 phy_flags;
@@ -3320,10 +3351,22 @@ struct tg3 {
const struct firmware *fw;
u32 fw_len; /* includes BSS */
-#if IS_ENABLED(CONFIG_HWMON)
struct device *hwmon_dev;
-#endif
bool link_up;
};
+/* Accessor macros for chip and asic attributes
+ *
+ * nb: Using static inlines equivalent to the accessor macros generates
+ * larger object code with gcc 4.7.
+ * Using statement expression macros to check tp with
+ * typecheck(struct tg3 *, tp) also creates larger objects.
+ */
+#define tg3_chip_rev_id(tp) \
+ ((tp)->pci_chip_rev_id)
+#define tg3_asic_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 12)
+#define tg3_chip_rev(tp) \
+ ((tp)->pci_chip_rev_id >> 8)
+
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index b9d4bb9530e..79039439bfd 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -287,7 +287,7 @@ static int macb_mii_probe(struct net_device *dev)
}
/* attach the mac to the phy */
- ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
bp->phy_interface);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index f7f02900f65..a170065b597 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1463,7 +1463,6 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index c8fdeaae56c..20d2085f61c 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -131,7 +131,7 @@ static void t1_set_rxmode(struct net_device *dev)
static void link_report(struct port_info *p)
{
if (!netif_carrier_ok(p->dev))
- printk(KERN_INFO "%s: link down\n", p->dev->name);
+ netdev_info(p->dev, "link down\n");
else {
const char *s = "10Mbps";
@@ -141,9 +141,9 @@ static void link_report(struct port_info *p)
case SPEED_100: s = "100Mbps"; break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
- p->dev->name, s,
- p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ netdev_info(p->dev, "link up, %s, %s-duplex\n",
+ s, p->link_config.duplex == DUPLEX_FULL
+ ? "full" : "half");
}
}
@@ -976,19 +976,13 @@ static const struct net_device_ops cxgb_netdev_ops = {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
-
int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct board_info *bi;
struct adapter *adapter = NULL;
struct port_info *pi;
- if (!version_printed) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
- DRV_VERSION);
- ++version_printed;
- }
+ pr_info_once("%s - version %s\n", DRV_DESCRIPTION, DRV_VERSION);
err = pci_enable_device(pdev);
if (err)
@@ -1124,8 +1118,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < bi->port_number; ++i) {
err = register_netdev(adapter->port[i].dev);
if (err)
- pr_warning("%s: cannot register net device %s, skipping\n",
- pci_name(pdev), adapter->port[i].dev->name);
+ pr_warn("%s: cannot register net device %s, skipping\n",
+ pci_name(pdev), adapter->port[i].dev->name);
else {
/*
* Change the name we use for messages to the name of
@@ -1143,10 +1137,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_adapter_res;
}
- printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
- bi->desc, adapter->params.chip_revision,
- adapter->params.pci.is_pcix ? "PCIX" : "PCI",
- adapter->params.pci.speed, adapter->params.pci.width);
+ pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
+ adapter->name, bi->desc, adapter->params.chip_revision,
+ adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+ adapter->params.pci.speed, adapter->params.pci.width);
/*
* Set the T1B ASIC and memory clocks.
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index d84872e8817..48297692515 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1822,8 +1822,8 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely(skb->len < ETH_HLEN ||
skb->len > dev->mtu + eth_hdr_len(skb->data))) {
- pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
- skb->len, eth_hdr_len(skb->data), dev->mtu);
+ netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
+ skb->len, eth_hdr_len(skb->data), dev->mtu);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1831,7 +1831,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol == IPPROTO_UDP) {
if (unlikely(skb_checksum_help(skb))) {
- pr_debug("%s: unable to do udp checksum\n", dev->name);
+ netdev_dbg(dev, "unable to do udp checksum\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index f15ee326d5c..2b5e62193ce 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -29,6 +29,9 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -153,7 +156,7 @@ struct workqueue_struct *cxgb3_wq;
static void link_report(struct net_device *dev)
{
if (!netif_carrier_ok(dev))
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
else {
const char *s = "10Mbps";
const struct port_info *p = netdev_priv(dev);
@@ -170,8 +173,9 @@ static void link_report(struct net_device *dev)
break;
}
- printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
- p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+ netdev_info(dev, "link up, %s, %s-duplex\n",
+ s, p->link_config.duplex == DUPLEX_FULL
+ ? "full" : "half");
}
}
@@ -318,10 +322,10 @@ void t3_os_phymod_changed(struct adapter *adap, int port_id)
const struct port_info *pi = netdev_priv(dev);
if (pi->phy.modtype == phy_modtype_none)
- printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
+ netdev_info(dev, "PHY module unplugged\n");
else
- printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
- mod_str[pi->phy.modtype]);
+ netdev_info(dev, "%s PHY module inserted\n",
+ mod_str[pi->phy.modtype]);
}
static void cxgb_set_rxmode(struct net_device *dev)
@@ -1422,8 +1426,7 @@ static int cxgb_open(struct net_device *dev)
if (is_offload(adapter) && !ofld_disable) {
err = offload_open(dev);
if (err)
- printk(KERN_WARNING
- "Could not initialize offload capabilities\n");
+ pr_warn("Could not initialize offload capabilities\n");
}
netif_set_real_num_tx_queues(dev, pi->nqsets);
@@ -3132,14 +3135,13 @@ static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
if (!test_bit(i, &adap->registered_device_map))
continue;
- printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
- dev->name, ai->desc, pi->phy.desc,
- is_offload(adap) ? "R" : "", adap->params.rev, buf,
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
+ netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
+ ai->desc, pi->phy.desc,
+ is_offload(adap) ? "R" : "", adap->params.rev, buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
if (adap->name == dev->name && adap->params.vpd.mclk)
- printk(KERN_INFO
- "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
+ pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
adap->name, t3_mc7_size(&adap->cm) >> 20,
t3_mc7_size(&adap->pmtx) >> 20,
t3_mc7_size(&adap->pmrx) >> 20,
@@ -3177,24 +3179,18 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int version_printed;
-
int i, err, pci_using_dac = 0;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
struct port_info *pi;
- if (!version_printed) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
- ++version_printed;
- }
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
if (!cxgb3_wq) {
cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
if (!cxgb3_wq) {
- printk(KERN_ERR DRV_NAME
- ": cannot initialize work queue\n");
+ pr_err("cannot initialize work queue\n");
return -ENOMEM;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 942dace361d..4232767862b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -30,6 +30,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/list.h>
#include <linux/slab.h>
#include <net/neighbour.h>
@@ -62,9 +64,8 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x10000;
static void cxgb_neigh_update(struct neighbour *neigh);
-static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
- struct dst_entry *new, struct neighbour *new_neigh,
- const void *daddr);
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh, const void *daddr);
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -182,14 +183,17 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
struct net_device *dev = adapter->port[i];
if (!memcmp(dev->dev_addr, mac, ETH_ALEN)) {
+ rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
- rcu_read_lock();
dev = __vlan_find_dev_deep(dev, vlan);
- rcu_read_unlock();
} else if (netif_is_bond_slave(dev)) {
- while (dev->master)
- dev = dev->master;
+ struct net_device *upper_dev;
+
+ while ((upper_dev =
+ netdev_master_upper_dev_get_rcu(dev)))
+ dev = upper_dev;
}
+ rcu_read_unlock();
return dev;
}
}
@@ -232,8 +236,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
if ((val >> S_MAXRXDATA) != 0x3f60) {
val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
val |= V_MAXRXDATA(0x3f60);
- printk(KERN_INFO
- "%s, iscsi set MaxRxData to 16224 (0x%x).\n",
+ pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
adapter->name, val);
t3_write_reg(adapter, A_TP_PARA_REG2, val);
}
@@ -253,8 +256,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
for (i = 0; i < 4; i++)
val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
- printk(KERN_INFO
- "%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u.\n",
+ pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
adapter->name, val, uiip->pgsz_factor[0],
uiip->pgsz_factor[1], uiip->pgsz_factor[2],
uiip->pgsz_factor[3]);
@@ -706,8 +708,7 @@ static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_smt_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -718,8 +719,7 @@ static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -730,8 +730,7 @@ static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
struct cpl_rte_write_rpl *rpl = cplhdr(skb);
if (rpl->status != CPL_ERR_NONE)
- printk(KERN_ERR
- "Unexpected RTE_WRITE_RPL status %u for entry %u\n",
+ pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
rpl->status, GET_TID(rpl));
return CPL_RET_BUF_DONE;
@@ -751,7 +750,7 @@ static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
t3c_tid->
ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_ACT_OPEN_RPL);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -769,7 +768,7 @@ static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[p->opcode] (dev, skb,
t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -787,7 +786,7 @@ static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[p->opcode]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, p->opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -814,7 +813,7 @@ static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_PASS_ACCEPT_REQ);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -908,7 +907,7 @@ static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
(dev, skb, t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, CPL_ACT_ESTABLISH);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -954,7 +953,7 @@ static int do_term(struct t3cdev *dev, struct sk_buff *skb)
return t3c_tid->client->handlers[opcode] (dev, skb,
t3c_tid->ctx);
} else {
- printk(KERN_ERR "%s: received clientless CPL command 0x%x\n",
+ pr_err("%s: received clientless CPL command 0x%x\n",
dev->name, opcode);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -970,10 +969,9 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
}
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->old_neigh,
- nr->new, nr->new_neigh,
+ cxgb_redirect(nr->old, nr->new, nr->neigh,
nr->daddr);
- cxgb_neigh_update(nr->new_neigh);
+ cxgb_neigh_update(nr->neigh);
break;
}
default:
@@ -991,8 +989,7 @@ static struct notifier_block nb = {
*/
static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
{
- printk(KERN_ERR "%s: received bad CPL command 0x%x\n", dev->name,
- *skb->data);
+ pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
}
@@ -1010,8 +1007,8 @@ void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
if (opcode < NUM_CPL_CMDS)
cpl_handlers[opcode] = h ? h : do_bad_cpl;
else
- printk(KERN_ERR "T3C: handler registration for "
- "opcode %x failed\n", opcode);
+ pr_err("T3C: handler registration for opcode %x failed\n",
+ opcode);
}
EXPORT_SYMBOL(t3_register_cpl_handler);
@@ -1030,9 +1027,8 @@ static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
if (ret & CPL_RET_UNKNOWN_TID) {
union opcode_tid *p = cplhdr(skb);
- printk(KERN_ERR "%s: CPL message (opcode %u) had "
- "unknown TID %u\n", dev->name, opcode,
- G_TID(ntohl(p->opcode_tid)));
+ pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
+ dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
}
#endif
if (ret & CPL_RET_BUF_DONE)
@@ -1096,7 +1092,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb) {
- printk(KERN_ERR "%s: cannot allocate skb!\n", __func__);
+ pr_err("%s: cannot allocate skb!\n", __func__);
return;
}
skb->priority = CPL_PRIORITY_CONTROL;
@@ -1111,11 +1107,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
tdev->send(tdev, skb);
}
-static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
- struct dst_entry *new, struct neighbour *new_neigh,
+static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
+ struct neighbour *neigh,
const void *daddr)
{
- struct net_device *olddev, *newdev;
+ struct net_device *dev;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1123,29 +1119,17 @@ static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = old_neigh->dev;
- newdev = new_neigh->dev;
+ dev = neigh->dev;
- if (!is_offloading(olddev))
- return;
- if (!is_offloading(newdev)) {
- printk(KERN_WARNING "%s: Redirect to non-offload "
- "device ignored.\n", __func__);
+ if (!is_offloading(dev))
return;
- }
- tdev = dev2t3cdev(olddev);
+ tdev = dev2t3cdev(dev);
BUG_ON(!tdev);
- if (tdev != dev2t3cdev(newdev)) {
- printk(KERN_WARNING "%s: Redirect to different "
- "offload device ignored.\n", __func__);
- return;
- }
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new, newdev, daddr);
+ e = t3_l2t_get(tdev, new, dev, daddr);
if (!e) {
- printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
- __func__);
+ pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
return;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index dd901c5061b..9d67eb794c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1278,7 +1278,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* update port statistics */
- if (skb->ip_summed == CHECKSUM_COMPLETE)
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
qs->port_stats[SGE_PSTAT_TX_CSUM]++;
if (skb_shinfo(skb)->gso_size)
qs->port_stats[SGE_PSTAT_TSO]++;
@@ -2130,8 +2130,10 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
- if (cpl->vlan_valid)
+ if (cpl->vlan_valid) {
+ qs->port_stats[SGE_PSTAT_VLANEX]++;
__vlan_hwaccel_put_tag(skb, ntohs(cpl->vlan));
+ }
napi_gro_frags(&qs->napi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 3dee68612c9..c74a898fcd4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3725,8 +3725,6 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
memcpy(adapter->port[i]->dev_addr, hw_addr,
ETH_ALEN);
- memcpy(adapter->port[i]->perm_addr, hw_addr,
- ETH_ALEN);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c306df7d456..c6c05bfef0e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4027,8 +4027,7 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
VFRES_NEQ, VFRES_NETHCTRL,
VFRES_NIQFLINT, VFRES_NIQ,
VFRES_TC, VFRES_NVI,
- FW_PFVF_CMD_CMASK_GET(
- FW_PFVF_CMD_CMASK_MASK),
+ FW_PFVF_CMD_CMASK_MASK,
pfvfres_pmask(
adapter, pf, vf),
VFRES_NEXACTF,
@@ -5142,7 +5141,7 @@ static int __init cxgb4_init_module(void)
/* Debugfs support is optional, just warn if this fails */
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!cxgb4_debugfs_root)
- pr_warning("could not create debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 22f3af5166b..4ce62031f62 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3603,7 +3603,6 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
p->lport = j;
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
- memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
adap->port[i]->dev_id = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 611396c4b38..68eaa9c88c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -466,7 +466,6 @@ static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
- memcpy(adapter->port[pidx]->perm_addr, hw_addr, ETH_ALEN);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0188df70571..56b46ab2d4c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -33,6 +33,8 @@
* SOFTWARE.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
@@ -196,11 +198,10 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
break;
}
- printk(KERN_INFO "%s: link up, %s, full-duplex, %s PAUSE\n",
- dev->name, s, fc);
+ netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
} else {
netif_carrier_off(dev);
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
@@ -2465,8 +2466,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
static int cxgb4vf_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int version_printed;
-
int pci_using_dac;
int err, pidx;
unsigned int pmask;
@@ -2478,10 +2477,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Print our driver banner the first time we're called to initialize a
* device.
*/
- if (version_printed == 0) {
- printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
- version_printed = 1;
- }
+ pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
/*
* Initialize generic PCI device state.
@@ -2920,18 +2916,15 @@ static int __init cxgb4vf_module_init(void)
* Vet our module parameters.
*/
if (msi != MSI_MSIX && msi != MSI_MSI) {
- printk(KERN_WARNING KBUILD_MODNAME
- ": bad module parameter msi=%d; must be %d"
- " (MSI-X or MSI) or %d (MSI)\n",
- msi, MSI_MSIX, MSI_MSI);
+ pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
+ msi, MSI_MSIX, MSI_MSI);
return -EINVAL;
}
/* Debugfs support is optional, just warn if this fails */
cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
- printk(KERN_WARNING KBUILD_MODNAME ": could not create"
- " debugfs entry, continuing\n");
+ pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4vf_driver);
if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 92170d50d9d..9488032d6d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1477,8 +1477,10 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(skb, rxq->rspq.idx);
- if (pkt->vlan_ex)
+ if (pkt->vlan_ex) {
__vlan_hwaccel_put_tag(skb, be16_to_cpu(pkt->vlan));
+ rxq->stats.vlan_ex++;
+ }
ret = napi_gro_frags(&rxq->rspq.napi);
if (ret == GRO_HELD)
@@ -1501,7 +1503,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
+ const struct cpl_rx_pkt *pkt = (void *)rsp;
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 78c55213eaf..354cbb78ed5 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -710,8 +710,8 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int ep93xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 64866ff1aea..ec1a233622c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -865,7 +865,6 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
}
memcpy(netdev->dev_addr, addr, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -1491,7 +1490,8 @@ static int enic_request_intr(struct enic *enic)
for (i = 0; i < enic->rq_count; i++) {
intr = enic_msix_rq_intr(enic, i);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-rx-%d", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix_rq;
enic->msix[intr].devid = &enic->napi[i];
@@ -1499,20 +1499,23 @@ static int enic_request_intr(struct enic *enic)
for (i = 0; i < enic->wq_count; i++) {
intr = enic_msix_wq_intr(enic, i);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-tx-%d", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix_wq;
enic->msix[intr].devid = enic;
}
intr = enic_msix_err_intr(enic);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-err", netdev->name);
enic->msix[intr].isr = enic_isr_msix_err;
enic->msix[intr].devid = enic;
intr = enic_msix_notify_intr(enic);
- sprintf(enic->msix[intr].devname,
+ snprintf(enic->msix[intr].devname,
+ sizeof(enic->msix[intr].devname),
"%.11s-notify", netdev->name);
enic->msix[intr].isr = enic_isr_msix_notify;
enic->msix[intr].devid = enic;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index c73472c369c..8cdf02503d1 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -434,9 +434,10 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
board_info_t *dm = to_dm9000_board(dev);
- strcpy(info->driver, CARDNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, to_platform_device(dm->dev)->name);
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ sizeof(info->bus_info));
}
static u32 dm9000_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig
index 37940279ded..68262aa57d0 100644
--- a/drivers/net/ethernet/dec/Kconfig
+++ b/drivers/net/ethernet/dec/Kconfig
@@ -17,21 +17,5 @@ config NET_VENDOR_DEC
your specific card in the following questions.
if NET_VENDOR_DEC
-
-config EWRK3
- tristate "EtherWORKS 3 (DE203, DE204, DE205) support"
- depends on ISA
- select CRC32
- ---help---
- This driver supports the DE203, DE204 and DE205 network (Ethernet)
- cards. If this is for you, say Y and read
- <file:Documentation/networking/ewrk3.txt> in the kernel source as
- well as the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ewrk3.
-
source "drivers/net/ethernet/dec/tulip/Kconfig"
-
endif # NET_VENDOR_DEC
diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile
index 1b01ed8d42c..32993fccbbf 100644
--- a/drivers/net/ethernet/dec/Makefile
+++ b/drivers/net/ethernet/dec/Makefile
@@ -2,5 +2,4 @@
# Makefile for the Digital Equipment Inc. network device drivers.
#
-obj-$(CONFIG_EWRK3) += ewrk3.o
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c
deleted file mode 100644
index 9f992b95edd..00000000000
--- a/drivers/net/ethernet/dec/ewrk3.c
+++ /dev/null
@@ -1,1961 +0,0 @@
-/* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux.
-
- Written 1994 by David C. Davies.
-
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of
- the GNU General Public License, incorporated herein by reference.
-
- This driver is written for the Digital Equipment Corporation series
- of EtherWORKS ethernet cards:
-
- DE203 Turbo (BNC)
- DE204 Turbo (TP)
- DE205 Turbo (TP BNC)
-
- The driver has been tested on a relatively busy network using the DE205
- card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s
- (7.8Mb/s) to a DECstation 5000/200.
-
- The author may be reached at davies@maniac.ultranet.com.
-
- =========================================================================
- This driver has been written substantially from scratch, although its
- inheritance of style and stack interface from 'depca.c' and in turn from
- Donald Becker's 'lance.c' should be obvious.
-
- The DE203/4/5 boards all use a new proprietary chip in place of the
- LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422).
- Use the depca.c driver in the standard distribution for the LANCE based
- cards from DIGITAL; this driver will not work with them.
-
- The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O
- only makes all the card accesses through I/O transactions and no high
- (shared) memory is used. This mode provides a >48% performance penalty
- and is deprecated in this driver, although allowed to provide initial
- setup when hardstrapped.
-
- The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is
- no point in using any mode other than the 2kB mode - their performances
- are virtually identical, although the driver has been tested in the 2kB
- and 32kB modes. I would suggest you uncomment the line:
-
- FORCE_2K_MODE;
-
- to allow the driver to configure the card as a 2kB card at your current
- base address, thus leaving more room to clutter your system box with
- other memory hungry boards.
-
- As many ISA and EISA cards can be supported under this driver as you
- wish, limited primarily by the available IRQ lines, rather than by the
- available I/O addresses (24 ISA, 16 EISA). I have checked different
- configurations of multiple depca cards and ewrk3 cards and have not
- found a problem yet (provided you have at least depca.c v0.38) ...
-
- The board IRQ setting must be at an unused IRQ which is auto-probed
- using Donald Becker's autoprobe routines. All these cards are at
- {5,10,11,15}.
-
- No 16MB memory limitation should exist with this driver as DMA is not
- used and the common memory area is in low memory on the network card (my
- current system has 20MB and I've not had problems yet).
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) edit the source code near line 1898 to reflect the I/O address and
- IRQ you're using.
- 3) compile ewrk3.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the ewrk3 configuration turned off and reboot.
- 5) insmod ewrk3.o
- [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y]
- [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards]
- 6) run the net startup bits for your new eth?? interface manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- Note that autoprobing is not allowed in loadable modules - the system is
- already up and running and you're messing with interrupts.
-
- To unload a module, turn off the associated interface
- 'ifconfig eth?? down' then 'rmmod ewrk3'.
-
- Promiscuous mode has been turned off in this driver, but all the
- multicast address bits have been turned on. This improved the send
- performance on a busy network by about 13%.
-
- Ioctl's have now been provided (primarily because I wanted to grab some
- packet size statistics). They are patterned after 'plipconfig.c' from a
- suggestion by Alan Cox. Using these ioctls, you can enable promiscuous
- mode, add/delete multicast addresses, change the hardware address, get
- packet size distribution statistics and muck around with the control and
- status register. I'll add others if and when the need arises.
-
- TO DO:
- ------
-
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 26-aug-94 Initial writing. ALPHA code release.
- 0.11 31-aug-94 Fixed: 2k mode memory base calc.,
- LeMAC version calc.,
- IRQ vector assignments during autoprobe.
- 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card.
- Fixed up MCA hash table algorithm.
- 0.20 4-sep-94 Added IOCTL functionality.
- 0.21 14-sep-94 Added I/O mode.
- 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0.
- 0.22 16-sep-94 Added more IOCTLs & tidied up.
- 0.23 21-sep-94 Added transmit cut through.
- 0.24 31-oct-94 Added uid checks in some ioctls.
- 0.30 1-nov-94 BETA code release.
- 0.31 5-dec-94 Added check/allocate region code.
- 0.32 16-jan-95 Broadcast packet fix.
- 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- 0.40 27-Dec-95 Rationalise MODULE and autoprobe code.
- Rewrite for portability & updated.
- ALPHA support from <jestabro@amt.tay1.dec.com>
- Added verify_area() calls in ewrk3_ioctl() from
- suggestion by <heiko@colossus.escape.de>.
- Add new multicasting code.
- 0.41 20-Jan-96 Fix IRQ set up problem reported by
- <kenneth@bbs.sas.ntu.ac.sg>.
- 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
- 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com>
- 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net>
- 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com>
- 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com>
- 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua>
- ioctl locking, signature search cleanup <akropel1@rochester.rr.com>
-
- =========================================================================
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/ethtool.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/uaccess.h>
-
-#include "ewrk3.h"
-
-#define DRV_NAME "ewrk3"
-#define DRV_VERSION "0.48"
-
-static char version[] __initdata =
-DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n";
-
-#ifdef EWRK3_DEBUG
-static int ewrk3_debug = EWRK3_DEBUG;
-#else
-static int ewrk3_debug = 1;
-#endif
-
-#define EWRK3_NDA 0xffe0 /* No Device Address */
-
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-#ifndef EWRK3_SIGNATURE
-#define EWRK3_SIGNATURE {"DE203","DE204","DE205",""}
-#define EWRK3_STRLEN 8
-#endif
-
-#ifndef EWRK3_RAM_BASE_ADDRESSES
-#define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000}
-#endif
-
-/*
- ** Sets up the I/O area for the autoprobe.
- */
-#define EWRK3_IO_BASE 0x100 /* Start address for probe search */
-#define EWRK3_IOP_INC 0x20 /* I/O address increment */
-#define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */
-
-#ifndef MAX_NUM_EWRK3S
-#define MAX_NUM_EWRK3S 21
-#endif
-
-#ifndef EWRK3_EISA_IO_PORTS
-#define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-#endif
-
-#ifndef MAX_EISA_SLOTS
-#define MAX_EISA_SLOTS 16
-#define EISA_SLOT_INC 0x1000
-#endif
-
-#define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */
-
-/*
- ** EtherWORKS 3 shared memory window sizes
- */
-#define IO_ONLY 0x00
-#define SHMEM_2K 0x800
-#define SHMEM_32K 0x8000
-#define SHMEM_64K 0x10000
-
-/*
- ** EtherWORKS 3 IRQ ENABLE/DISABLE
- */
-#define ENABLE_IRQs { \
- icr |= lp->irq_mask;\
- outb(icr, EWRK3_ICR); /* Enable the IRQs */\
-}
-
-#define DISABLE_IRQs { \
- icr = inb(EWRK3_ICR);\
- icr &= ~lp->irq_mask;\
- outb(icr, EWRK3_ICR); /* Disable the IRQs */\
-}
-
-/*
- ** EtherWORKS 3 START/STOP
- */
-#define START_EWRK3 { \
- csr = inb(EWRK3_CSR);\
- csr &= ~(CSR_TXD|CSR_RXD);\
- outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\
-}
-
-#define STOP_EWRK3 { \
- csr = (CSR_TXD|CSR_RXD);\
- outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\
-}
-
-/*
- ** The EtherWORKS 3 private structure
- */
-#define EWRK3_PKT_STAT_SZ 16
-#define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase EWRK3_PKT_STAT_SZ */
-
-struct ewrk3_stats {
- u32 bins[EWRK3_PKT_STAT_SZ];
- u32 unicast;
- u32 multicast;
- u32 broadcast;
- u32 excessive_collisions;
- u32 tx_underruns;
- u32 excessive_underruns;
-};
-
-struct ewrk3_private {
- char adapter_name[80]; /* Name exported to /proc/ioports */
- u_long shmem_base; /* Shared memory start address */
- void __iomem *shmem;
- u_long shmem_length; /* Shared memory window length */
- struct ewrk3_stats pktStats; /* Private stats counters */
- u_char irq_mask; /* Adapter IRQ mask bits */
- u_char mPage; /* Maximum 2kB Page number */
- u_char lemac; /* Chip rev. level */
- u_char hard_strapped; /* Don't allow a full open */
- u_char txc; /* Transmit cut through */
- void __iomem *mctbl; /* Pointer to the multicast table */
- u_char led_mask; /* Used to reserve LED access for ethtool */
- spinlock_t hw_lock;
-};
-
-/*
- ** Force the EtherWORKS 3 card to be in 2kB MODE
- */
-#define FORCE_2K_MODE { \
- shmem_length = SHMEM_2K;\
- outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\
-}
-
-/*
- ** Public Functions
- */
-static int ewrk3_open(struct net_device *dev);
-static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t ewrk3_interrupt(int irq, void *dev_id);
-static int ewrk3_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static const struct ethtool_ops ethtool_ops_203;
-static const struct ethtool_ops ethtool_ops;
-
-/*
- ** Private functions
- */
-static int ewrk3_hw_init(struct net_device *dev, u_long iobase);
-static void ewrk3_init(struct net_device *dev);
-static int ewrk3_rx(struct net_device *dev);
-static int ewrk3_tx(struct net_device *dev);
-static void ewrk3_timeout(struct net_device *dev);
-
-static void EthwrkSignature(char *name, char *eeprom_image);
-static int DevicePresent(u_long iobase);
-static void SetMulticastFilter(struct net_device *dev);
-static int EISA_signature(char *name, s32 eisa_id);
-
-static int Read_EEPROM(u_long iobase, u_char eaddr);
-static int Write_EEPROM(short data, u_long iobase, u_char eaddr);
-static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType);
-
-static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq);
-static int isa_probe(struct net_device *dev, u_long iobase);
-static int eisa_probe(struct net_device *dev, u_long iobase);
-
-static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12};
-
-static char name[EWRK3_STRLEN + 1];
-static int num_ewrks3s;
-
-/*
- ** Miscellaneous defines...
- */
-#define INIT_EWRK3 {\
- outb(EEPROM_INIT, EWRK3_IOPR);\
- mdelay(1);\
-}
-
-#ifndef MODULE
-struct net_device * __init ewrk3_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- err = ewrk3_probe1(dev, dev->base_addr, dev->irq);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-
-}
-#endif
-
-static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
-{
- int err;
-
- dev->base_addr = iobase;
- dev->irq = irq;
-
- /* Address PROM pattern */
- err = isa_probe(dev, iobase);
- if (err != 0)
- err = eisa_probe(dev, iobase);
-
- if (err)
- return err;
-
- err = register_netdev(dev);
- if (err)
- release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
-
- return err;
-}
-
-static const struct net_device_ops ewrk3_netdev_ops = {
- .ndo_open = ewrk3_open,
- .ndo_start_xmit = ewrk3_queue_pkt,
- .ndo_stop = ewrk3_close,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_do_ioctl = ewrk3_ioctl,
- .ndo_tx_timeout = ewrk3_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init
-ewrk3_hw_init(struct net_device *dev, u_long iobase)
-{
- struct ewrk3_private *lp;
- int i, status = 0;
- u_long mem_start, shmem_length;
- u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0;
- u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0;
-
- /*
- ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot.
- ** This also disables the EISA_ENABLE bit in the EISA Control Register.
- */
- if (iobase > 0x400)
- eisa_cr = inb(EISA_CR);
- INIT_EWRK3;
-
- nicsr = inb(EWRK3_CSR);
-
- icr = inb(EWRK3_ICR);
- icr &= 0x70;
- outb(icr, EWRK3_ICR); /* Disable all the IRQs */
-
- if (nicsr != (CSR_TXD | CSR_RXD))
- return -ENXIO;
-
- /* Check that the EEPROM is alive and well and not living on Pluto... */
- for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) {
- union {
- short val;
- char c[2];
- } tmp;
-
- tmp.val = (short) Read_EEPROM(iobase, (i >> 1));
- eeprom_image[i] = tmp.c[0];
- eeprom_image[i + 1] = tmp.c[1];
- chksum += eeprom_image[i] + eeprom_image[i + 1];
- }
-
- if (chksum != 0) { /* Bad EEPROM Data! */
- printk("%s: Device has a bad on-board EEPROM.\n", dev->name);
- return -ENXIO;
- }
-
- EthwrkSignature(name, eeprom_image);
- if (*name == '\0')
- return -ENXIO;
-
- dev->base_addr = iobase;
-
- if (iobase > 0x400) {
- outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */
- }
- lemac = eeprom_image[EEPROM_CHIPVER];
- cmr = inb(EWRK3_CMR);
-
- if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) ||
- ((lemac == LeMAC2) && !(cmr & CMR_HS))) {
- printk("%s: %s at %#4lx", dev->name, name, iobase);
- hard_strapped = 1;
- } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) {
- /* EISA slot address */
- printk("%s: %s at %#4lx (EISA slot %ld)",
- dev->name, name, iobase, ((iobase >> 12) & 0x0f));
- } else { /* ISA port address */
- printk("%s: %s at %#4lx", dev->name, name, iobase);
- }
-
- printk(", h/w address ");
- if (lemac != LeMAC2)
- DevicePresent(iobase); /* need after EWRK3_INIT */
- status = get_hw_addr(dev, eeprom_image, lemac);
- printk("%pM\n", dev->dev_addr);
-
- if (status) {
- printk(" which has an EEPROM CRC error.\n");
- return -ENXIO;
- }
-
- if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */
- cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS);
- if (eeprom_image[EEPROM_MISC0] & READ_AHEAD)
- cmr |= CMR_RA;
- if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND)
- cmr |= CMR_WB;
- if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL)
- cmr |= CMR_POLARITY;
- if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK)
- cmr |= CMR_LINK;
- if (eeprom_image[EEPROM_MISC0] & _0WS_ENA)
- cmr |= CMR_0WS;
- }
- if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM)
- cmr |= CMR_DRAM;
- outb(cmr, EWRK3_CMR);
-
- cr = inb(EWRK3_CR); /* Set up the Control Register */
- cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD;
- if (cr & SETUP_APD)
- cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS;
- cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS;
- cr |= eeprom_image[EEPROM_MISC0] & ENA_16;
- outb(cr, EWRK3_CR);
-
- /*
- ** Determine the base address and window length for the EWRK3
- ** RAM from the memory base register.
- */
- mem_start = inb(EWRK3_MBR);
- shmem_length = 0;
- if (mem_start != 0) {
- if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) {
- mem_start *= SHMEM_64K;
- shmem_length = SHMEM_64K;
- } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) {
- mem_start *= SHMEM_32K;
- shmem_length = SHMEM_32K;
- } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) {
- mem_start = mem_start * SHMEM_2K + 0x80000;
- shmem_length = SHMEM_2K;
- } else {
- return -ENXIO;
- }
- }
- /*
- ** See the top of this source code for comments about
- ** uncommenting this line.
- */
-/* FORCE_2K_MODE; */
-
- if (hard_strapped) {
- printk(" is hard strapped.\n");
- } else if (mem_start) {
- printk(" has a %dk RAM window", (int) (shmem_length >> 10));
- printk(" at 0x%.5lx", mem_start);
- } else {
- printk(" is in I/O only mode");
- }
-
- lp = netdev_priv(dev);
- lp->shmem_base = mem_start;
- lp->shmem = ioremap(mem_start, shmem_length);
- if (!lp->shmem)
- return -ENOMEM;
- lp->shmem_length = shmem_length;
- lp->lemac = lemac;
- lp->hard_strapped = hard_strapped;
- lp->led_mask = CR_LED;
- spin_lock_init(&lp->hw_lock);
-
- lp->mPage = 64;
- if (cmr & CMR_DRAM)
- lp->mPage <<= 1; /* 2 DRAMS on module */
-
- sprintf(lp->adapter_name, "%s (%s)", name, dev->name);
-
- lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM;
-
- if (!hard_strapped) {
- /*
- ** Enable EWRK3 board interrupts for autoprobing
- */
- icr |= ICR_IE; /* Enable interrupts */
- outb(icr, EWRK3_ICR);
-
- /* The DMA channel may be passed in on this parameter. */
- dev->dma = 0;
-
- /* To auto-IRQ we enable the initialization-done and DMA err,
- interrupts. For now we will always get a DMA error. */
- if (dev->irq < 2) {
-#ifndef MODULE
- u_char irqnum;
- unsigned long irq_mask;
-
-
- irq_mask = probe_irq_on();
-
- /*
- ** Trigger a TNE interrupt.
- */
- icr |= ICR_TNEM;
- outb(1, EWRK3_TDQ); /* Write to the TX done queue */
- outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */
-
- irqnum = irq[((icr & IRQ_SEL) >> 4)];
-
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
- if ((dev->irq) && (irqnum == dev->irq)) {
- printk(" and uses IRQ%d.\n", dev->irq);
- } else {
- if (!dev->irq) {
- printk(" and failed to detect IRQ line.\n");
- } else if ((irqnum == 1) && (lemac == LeMAC2)) {
- printk(" and an illegal IRQ line detected.\n");
- } else {
- printk(", but incorrect IRQ line detected.\n");
- }
- iounmap(lp->shmem);
- return -ENXIO;
- }
-
- DISABLE_IRQs; /* Mask all interrupts */
-
-#endif /* MODULE */
- } else {
- printk(" and requires IRQ%d.\n", dev->irq);
- }
- }
-
- if (ewrk3_debug > 1) {
- printk(version);
- }
- /* The EWRK3-specific entries in the device structure. */
- dev->netdev_ops = &ewrk3_netdev_ops;
- if (lp->adapter_name[4] == '3')
- SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
- else
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
- dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
-
- dev->mem_start = 0;
-
- return 0;
-}
-
-
-static int ewrk3_open(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int status = 0;
- u_char icr, csr;
-
- /*
- ** Stop the TX and RX...
- */
- STOP_EWRK3;
-
- if (!lp->hard_strapped) {
- if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) {
- printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq);
- status = -EAGAIN;
- } else {
-
- /*
- ** Re-initialize the EWRK3...
- */
- ewrk3_init(dev);
-
- if (ewrk3_debug > 1) {
- printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq);
- printk(" physical address: %pM\n", dev->dev_addr);
- if (lp->shmem_length == 0) {
- printk(" no shared memory, I/O only mode\n");
- } else {
- printk(" start of shared memory: 0x%08lx\n", lp->shmem_base);
- printk(" window length: 0x%04lx\n", lp->shmem_length);
- }
- printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1));
- printk(" csr: 0x%02x\n", inb(EWRK3_CSR));
- printk(" cr: 0x%02x\n", inb(EWRK3_CR));
- printk(" icr: 0x%02x\n", inb(EWRK3_ICR));
- printk(" cmr: 0x%02x\n", inb(EWRK3_CMR));
- printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC));
- }
- netif_start_queue(dev);
- /*
- ** Unmask EWRK3 board interrupts
- */
- icr = inb(EWRK3_ICR);
- ENABLE_IRQs;
-
- }
- } else {
- printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name);
- printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n");
- return -EINVAL;
- }
-
- return status;
-}
-
-/*
- ** Initialize the EtherWORKS 3 operating conditions
- */
-static void ewrk3_init(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_char csr, page;
- u_long iobase = dev->base_addr;
- int i;
-
- /*
- ** Enable any multicasts
- */
- set_multicast_list(dev);
-
- /*
- ** Set hardware MAC address. Address is initialized from the EEPROM
- ** during startup but may have since been changed by the user.
- */
- for (i=0; i<ETH_ALEN; i++)
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
-
- /*
- ** Clean out any remaining entries in all the queues here
- */
- while (inb(EWRK3_TQ));
- while (inb(EWRK3_TDQ));
- while (inb(EWRK3_RQ));
- while (inb(EWRK3_FMQ));
-
- /*
- ** Write a clean free memory queue
- */
- for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */
- outb(page, EWRK3_FMQ); /* to the Free Memory Queue */
- }
-
- START_EWRK3; /* Enable the TX and/or RX */
-}
-
-/*
- * Transmit timeout
- */
-
-static void ewrk3_timeout(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_char icr, csr;
- u_long iobase = dev->base_addr;
-
- if (!lp->hard_strapped)
- {
- printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n",
- dev->name, inb(EWRK3_CSR));
-
- /*
- ** Mask all board interrupts
- */
- DISABLE_IRQs;
-
- /*
- ** Stop the TX and RX...
- */
- STOP_EWRK3;
-
- ewrk3_init(dev);
-
- /*
- ** Unmask EWRK3 board interrupts
- */
- ENABLE_IRQs;
-
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
- }
-}
-
-/*
- ** Writes a socket buffer to the free page queue
- */
-static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- void __iomem *buf = NULL;
- u_char icr;
- u_char page;
-
- spin_lock_irq (&lp->hw_lock);
- DISABLE_IRQs;
-
- /* if no resources available, exit, request packet be queued */
- if (inb (EWRK3_FMQC) == 0) {
- printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n",
- dev->name);
- printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n",
- dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR),
- inb (EWRK3_FMQC));
- goto err_out;
- }
-
- /*
- ** Get a free page from the FMQ
- */
- if ((page = inb (EWRK3_FMQ)) >= lp->mPage) {
- printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n",
- (u_char) page);
- goto err_out;
- }
-
-
- /*
- ** Set up shared memory window and pointer into the window
- */
- if (lp->shmem_length == IO_ONLY) {
- outb (page, EWRK3_IOPR);
- } else if (lp->shmem_length == SHMEM_2K) {
- buf = lp->shmem;
- outb (page, EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_32K) {
- buf = (((short) page << 11) & 0x7800) + lp->shmem;
- outb ((page >> 4), EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_64K) {
- buf = (((short) page << 11) & 0xf800) + lp->shmem;
- outb ((page >> 5), EWRK3_MPR);
- } else {
- printk (KERN_ERR "%s: Oops - your private data area is hosed!\n",
- dev->name);
- BUG ();
- }
-
- /*
- ** Set up the buffer control structures and copy the data from
- ** the socket buffer to the shared memory .
- */
- if (lp->shmem_length == IO_ONLY) {
- int i;
- u_char *p = skb->data;
- outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA);
- outb ((char) (skb->len & 0xff), EWRK3_DATA);
- outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA);
- outb ((char) 0x04, EWRK3_DATA);
- for (i = 0; i < skb->len; i++) {
- outb (*p++, EWRK3_DATA);
- }
- outb (page, EWRK3_TQ); /* Start sending pkt */
- } else {
- writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */
- buf += 1;
- writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */
- buf += 1;
- if (lp->txc) {
- writeb(((skb->len >> 8) & 0xff) | XCT, buf);
- buf += 1;
- writeb (0x04, buf); /* index byte */
- buf += 1;
- writeb (0x00, (buf + skb->len)); /* Write the XCT flag */
- memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */
- outb (page, EWRK3_TQ); /* Start sending pkt */
- memcpy_toio (buf + PRELOAD,
- skb->data + PRELOAD,
- skb->len - PRELOAD);
- writeb (0xff, (buf + skb->len)); /* Write the XCT flag */
- } else {
- writeb ((skb->len >> 8) & 0xff, buf);
- buf += 1;
- writeb (0x04, buf); /* index byte */
- buf += 1;
- memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */
- outb (page, EWRK3_TQ); /* Start sending pkt */
- }
- }
-
- ENABLE_IRQs;
- spin_unlock_irq (&lp->hw_lock);
-
- dev->stats.tx_bytes += skb->len;
- dev_kfree_skb (skb);
-
- /* Check for free resources: stop Tx queue if there are none */
- if (inb (EWRK3_FMQC) == 0)
- netif_stop_queue (dev);
-
- return NETDEV_TX_OK;
-
-err_out:
- ENABLE_IRQs;
- spin_unlock_irq (&lp->hw_lock);
- return NETDEV_TX_BUSY;
-}
-
-/*
- ** The EWRK3 interrupt handler.
- */
-static irqreturn_t ewrk3_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct ewrk3_private *lp;
- u_long iobase;
- u_char icr, cr, csr;
-
- lp = netdev_priv(dev);
- iobase = dev->base_addr;
-
- /* get the interrupt information */
- csr = inb(EWRK3_CSR);
-
- /*
- ** Mask the EWRK3 board interrupts and turn on the LED
- */
- spin_lock(&lp->hw_lock);
- DISABLE_IRQs;
-
- cr = inb(EWRK3_CR);
- cr |= lp->led_mask;
- outb(cr, EWRK3_CR);
-
- if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */
- ewrk3_rx(dev);
-
- if (csr & CSR_TNE) /* Tx interrupt (packet sent) */
- ewrk3_tx(dev);
-
- /*
- ** Now deal with the TX/RX disable flags. These are set when there
- ** are no more resources. If resources free up then enable these
- ** interrupts, otherwise mask them - failure to do this will result
- ** in the system hanging in an interrupt loop.
- */
- if (inb(EWRK3_FMQC)) { /* any resources available? */
- lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */
- csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */
- outb(csr, EWRK3_CSR);
- netif_wake_queue(dev);
- } else {
- lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */
- }
-
- /* Unmask the EWRK3 board interrupts and turn off the LED */
- cr &= ~(lp->led_mask);
- outb(cr, EWRK3_CR);
- ENABLE_IRQs;
- spin_unlock(&lp->hw_lock);
- return IRQ_HANDLED;
-}
-
-/* Called with lp->hw_lock held */
-static int ewrk3_rx(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, status = 0;
- u_char page;
- void __iomem *buf = NULL;
-
- while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */
- if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */
- /*
- ** Set up shared memory window and pointer into the window
- */
- if (lp->shmem_length == IO_ONLY) {
- outb(page, EWRK3_IOPR);
- } else if (lp->shmem_length == SHMEM_2K) {
- buf = lp->shmem;
- outb(page, EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_32K) {
- buf = (((short) page << 11) & 0x7800) + lp->shmem;
- outb((page >> 4), EWRK3_MPR);
- } else if (lp->shmem_length == SHMEM_64K) {
- buf = (((short) page << 11) & 0xf800) + lp->shmem;
- outb((page >> 5), EWRK3_MPR);
- } else {
- status = -1;
- printk("%s: Oops - your private data area is hosed!\n", dev->name);
- }
-
- if (!status) {
- char rx_status;
- int pkt_len;
-
- if (lp->shmem_length == IO_ONLY) {
- rx_status = inb(EWRK3_DATA);
- pkt_len = inb(EWRK3_DATA);
- pkt_len |= ((u_short) inb(EWRK3_DATA) << 8);
- } else {
- rx_status = readb(buf);
- buf += 1;
- pkt_len = readw(buf);
- buf += 3;
- }
-
- if (!(rx_status & R_ROK)) { /* There was an error. */
- dev->stats.rx_errors++; /* Update the error stats. */
- if (rx_status & R_DBE)
- dev->stats.rx_frame_errors++;
- if (rx_status & R_CRC)
- dev->stats.rx_crc_errors++;
- if (rx_status & R_PLL)
- dev->stats.rx_fifo_errors++;
- } else {
- struct sk_buff *skb;
- skb = netdev_alloc_skb(dev,
- pkt_len + 2);
-
- if (skb != NULL) {
- unsigned char *p;
- skb_reserve(skb, 2); /* Align to 16 bytes */
- p = skb_put(skb, pkt_len);
-
- if (lp->shmem_length == IO_ONLY) {
- *p = inb(EWRK3_DATA); /* dummy read */
- for (i = 0; i < pkt_len; i++) {
- *p++ = inb(EWRK3_DATA);
- }
- } else {
- memcpy_fromio(p, buf, pkt_len);
- }
-
- for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) {
- if (pkt_len < i * EWRK3_PKT_BIN_SZ) {
- lp->pktStats.bins[i]++;
- i = EWRK3_PKT_STAT_SZ;
- }
- }
- p = skb->data; /* Look at the dest addr */
- if (is_multicast_ether_addr(p)) {
- if (is_broadcast_ether_addr(p)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(p,
- dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- }
- /*
- ** Notify the upper protocol layers that there is another
- ** packet to handle
- */
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
-
- /*
- ** Update stats
- */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- } else {
- printk("%s: Insufficient memory; nuking packet.\n", dev->name);
- dev->stats.rx_dropped++; /* Really, deferred. */
- break;
- }
- }
- }
- /*
- ** Return the received buffer to the free memory queue
- */
- outb(page, EWRK3_FMQ);
- } else {
- printk("ewrk3_rx(): Illegal page number, page %d\n", page);
- printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC));
- }
- }
- return status;
-}
-
-/*
-** Buffer sent - check for TX buffer errors.
-** Called with lp->hw_lock held
-*/
-static int ewrk3_tx(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char tx_status;
-
- while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */
- if (tx_status & T_VSTS) { /* The status is valid */
- if (tx_status & T_TXE) {
- dev->stats.tx_errors++;
- if (tx_status & T_NCL)
- dev->stats.tx_carrier_errors++;
- if (tx_status & T_LCL)
- dev->stats.tx_window_errors++;
- if (tx_status & T_CTU) {
- if ((tx_status & T_COLL) ^ T_XUR) {
- lp->pktStats.tx_underruns++;
- } else {
- lp->pktStats.excessive_underruns++;
- }
- } else if (tx_status & T_COLL) {
- if ((tx_status & T_COLL) ^ T_XCOLL) {
- dev->stats.collisions++;
- } else {
- lp->pktStats.excessive_collisions++;
- }
- }
- } else {
- dev->stats.tx_packets++;
- }
- }
- }
-
- return 0;
-}
-
-static int ewrk3_close(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char icr, csr;
-
- netif_stop_queue(dev);
-
- if (ewrk3_debug > 1) {
- printk("%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, inb(EWRK3_CSR));
- }
- /*
- ** We stop the EWRK3 here... mask interrupts and stop TX & RX
- */
- DISABLE_IRQs;
-
- STOP_EWRK3;
-
- /*
- ** Clean out the TX and RX queues here (note that one entry
- ** may get added to either the TXD or RX queues if the TX or RX
- ** just starts processing a packet before the STOP_EWRK3 command
- ** is received. This will be flushed in the ewrk3_open() call).
- */
- while (inb(EWRK3_TQ));
- while (inb(EWRK3_TDQ));
- while (inb(EWRK3_RQ));
-
- if (!lp->hard_strapped) {
- free_irq(dev->irq, dev);
- }
- return 0;
-}
-
-/*
- ** Set or clear the multicast filter for this adapter.
- */
-static void set_multicast_list(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char csr;
-
- csr = inb(EWRK3_CSR);
-
- if (lp->shmem_length == IO_ONLY) {
- lp->mctbl = NULL;
- } else {
- lp->mctbl = lp->shmem + PAGE0_HTE;
- }
-
- csr &= ~(CSR_PME | CSR_MCE);
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- csr |= CSR_PME;
- outb(csr, EWRK3_CSR);
- } else {
- SetMulticastFilter(dev);
- csr |= CSR_MCE;
- outb(csr, EWRK3_CSR);
- }
-}
-
-/*
- ** Calculate the hash code and update the logical address filter
- ** from a list of ethernet multicast addresses.
- ** Little endian crc one liner from Matt Thomas, DEC.
- **
- ** Note that when clearing the table, the broadcast bit must remain asserted
- ** to receive broadcast messages.
- */
-static void SetMulticastFilter(struct net_device *dev)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u_long iobase = dev->base_addr;
- int i;
- char bit, byte;
- short __iomem *p = lp->mctbl;
- u16 hashcode;
- u32 crc;
-
- spin_lock_irq(&lp->hw_lock);
-
- if (lp->shmem_length == IO_ONLY) {
- outb(0, EWRK3_IOPR);
- outw(PAGE0_HTE, EWRK3_PIR1);
- } else {
- outb(0, EWRK3_MPR);
- }
-
- if (dev->flags & IFF_ALLMULTI) {
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- if (lp->shmem_length == IO_ONLY) {
- outb(0xff, EWRK3_DATA);
- } else { /* memset didn't work here */
- writew(0xffff, p);
- p++;
- i++;
- }
- }
- } else {
- /* Clear table except for broadcast bit */
- if (lp->shmem_length == IO_ONLY) {
- for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) {
- outb(0x00, EWRK3_DATA);
- }
- outb(0x80, EWRK3_DATA);
- i++; /* insert the broadcast bit */
- for (; i < (HASH_TABLE_LEN >> 3); i++) {
- outb(0x00, EWRK3_DATA);
- }
- } else {
- memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3);
- writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1);
- }
-
- /* Update table */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
-
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
-
- if (lp->shmem_length == IO_ONLY) {
- u_char tmp;
-
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- tmp = inb(EWRK3_DATA);
- tmp |= bit;
- outw(PAGE0_HTE + byte, EWRK3_PIR1);
- outb(tmp, EWRK3_DATA);
- } else {
- writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte);
- }
- }
- }
-
- spin_unlock_irq(&lp->hw_lock);
-}
-
-/*
- ** ISA bus I/O device probe
- */
-static int __init isa_probe(struct net_device *dev, u_long ioaddr)
-{
- int i = num_ewrks3s, maxSlots;
- int ret = -ENODEV;
-
- u_long iobase;
-
- if (ioaddr >= 0x400)
- goto out;
-
- if (ioaddr == 0) { /* Autoprobing */
- iobase = EWRK3_IO_BASE; /* Get the first slot address */
- maxSlots = 24;
- } else { /* Probe a specific location */
- iobase = ioaddr;
- maxSlots = i + 1;
- }
-
- for (; (i < maxSlots) && (dev != NULL);
- iobase += EWRK3_IOP_INC, i++)
- {
- if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) {
- if (DevicePresent(iobase) == 0) {
- int irq = dev->irq;
- ret = ewrk3_hw_init(dev, iobase);
- if (!ret)
- break;
- dev->irq = irq;
- }
- release_region(iobase, EWRK3_TOTAL_SIZE);
- }
- }
- out:
-
- return ret;
-}
-
-/*
- ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually
- ** the motherboard.
- */
-static int __init eisa_probe(struct net_device *dev, u_long ioaddr)
-{
- int i, maxSlots;
- u_long iobase;
- int ret = -ENODEV;
-
- if (ioaddr < 0x1000)
- goto out;
-
- iobase = ioaddr;
- i = (ioaddr >> 12);
- maxSlots = i + 1;
-
- for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) {
- if (EISA_signature(name, EISA_ID) == 0) {
- if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) &&
- DevicePresent(iobase) == 0) {
- int irq = dev->irq;
- ret = ewrk3_hw_init(dev, iobase);
- if (!ret)
- break;
- dev->irq = irq;
- }
- release_region(iobase, EWRK3_TOTAL_SIZE);
- }
- }
-
- out:
- return ret;
-}
-
-
-/*
- ** Read the EWRK3 EEPROM using this routine
- */
-static int Read_EEPROM(u_long iobase, u_char eaddr)
-{
- int i;
-
- outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
- outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
-
- return inw(EWRK3_EPROM1); /* 16 bits data return */
-}
-
-/*
- ** Write the EWRK3 EEPROM using this routine
- */
-static int Write_EEPROM(short data, u_long iobase, u_char eaddr)
-{
- int i;
-
- outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
- outw(data, EWRK3_EPROM1); /* write data to register */
- outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */
- outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */
- for (i = 0; i < 75000; i++)
- inb(EWRK3_CSR); /* wait 15msec */
- outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */
- for (i = 0; i < 5000; i++)
- inb(EWRK3_CSR); /* wait 1msec */
-
- return 0;
-}
-
-/*
- ** Look for a particular board name in the on-board EEPROM.
- */
-static void __init EthwrkSignature(char *name, char *eeprom_image)
-{
- int i;
- char *signatures[] = EWRK3_SIGNATURE;
-
- for (i=0; *signatures[i] != '\0'; i++)
- if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) )
- break;
-
- if (*signatures[i] != '\0') {
- memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN);
- name[EWRK3_STRLEN] = '\0';
- } else
- name[0] = '\0';
-}
-
-/*
- ** Look for a special sequence in the Ethernet station address PROM that
- ** is common across all EWRK3 products.
- **
- ** Search the Ethernet address ROM for the signature. Since the ROM address
- ** counter can start at an arbitrary point, the search must include the entire
- ** probe sequence length plus the (length_of_the_signature - 1).
- ** Stop the search IMMEDIATELY after the signature is found so that the
- ** PROM address counter is correctly positioned at the start of the
- ** ethernet address for later read out.
- */
-
-static int __init DevicePresent(u_long iobase)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- }
- dev;
- short sigLength;
- char data;
- int i, j, status = 0;
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
- data = inb(EWRK3_APROM);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) {
- j = 1;
- } else {
- j = 0;
- }
- }
- }
-
- if (j != sigLength) {
- status = -ENODEV; /* search failed */
- }
- return status;
-}
-
-static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType)
-{
- int i, j, k;
- u_short chksum;
- u_char crc, lfsr, sd, status = 0;
- u_long iobase = dev->base_addr;
- u16 tmp;
-
- if (chipType == LeMAC2) {
- for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) {
- sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j];
- outb(dev->dev_addr[j], EWRK3_PAR0 + j);
- for (k = 0; k < 8; k++, sd >>= 1) {
- lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7;
- crc = (crc >> 1) + lfsr;
- }
- }
- if (crc != eeprom_image[EEPROM_PA_CRC])
- status = -1;
- } else {
- for (i = 0, k = 0; i < ETH_ALEN;) {
- k <<= 1;
- if (k > 0xffff)
- k -= 0xffff;
-
- k += (u_char) (tmp = inb(EWRK3_APROM));
- dev->dev_addr[i] = (u_char) tmp;
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
- i++;
- k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8);
- dev->dev_addr[i] = (u_char) tmp;
- outb(dev->dev_addr[i], EWRK3_PAR0 + i);
- i++;
-
- if (k > 0xffff)
- k -= 0xffff;
- }
- if (k == 0xffff)
- k = 0;
- chksum = inb(EWRK3_APROM);
- chksum |= (inb(EWRK3_APROM) << 8);
- if (k != chksum)
- status = -1;
- }
-
- return status;
-}
-
-/*
- ** Look for a particular board name in the EISA configuration space
- */
-static int __init EISA_signature(char *name, s32 eisa_id)
-{
- u_long i;
- char *signatures[] = EWRK3_SIGNATURE;
- char ManCode[EWRK3_STRLEN];
- union {
- s32 ID;
- char Id[4];
- } Eisa;
- int status = 0;
-
- *name = '\0';
- for (i = 0; i < 4; i++) {
- Eisa.Id[i] = inb(eisa_id + i);
- }
-
- ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40);
- ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40);
- ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30);
- ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30);
- ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30);
- ManCode[5] = '\0';
-
- for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) {
- if (strstr(ManCode, signatures[i]) != NULL) {
- strcpy(name, ManCode);
- status = 1;
- }
- }
-
- return status; /* return the device name string */
-}
-
-static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
-{
- int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL);
-
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->fw_version, "%d", fwrev);
- strcpy(info->bus_info, "N/A");
- info->eedump_len = EEPROM_MAX;
-}
-
-static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- u8 cr = inb(EWRK3_CR);
-
- switch (lp->adapter_name[4]) {
- case '3': /* DE203 */
- ecmd->supported = SUPPORTED_BNC;
- ecmd->port = PORT_BNC;
- break;
-
- case '4': /* DE204 */
- ecmd->supported = SUPPORTED_TP;
- ecmd->port = PORT_TP;
- break;
-
- case '5': /* DE205 */
- ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI;
- ecmd->autoneg = !(cr & CR_APD);
- /*
- ** Port is only valid if autoneg is disabled
- ** and even then we don't know if AUI is jumpered.
- */
- if (!ecmd->autoneg)
- ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP;
- break;
- }
-
- ecmd->supported |= SUPPORTED_10baseT_Half;
- ethtool_cmd_speed_set(ecmd, SPEED_10);
- ecmd->duplex = DUPLEX_HALF;
- return 0;
-}
-
-static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- unsigned long flags;
- u8 cr;
-
- /* DE205 is the only card with anything to set */
- if (lp->adapter_name[4] != '5')
- return -EOPNOTSUPP;
-
- /* Sanity-check parameters */
- if (ecmd->speed != SPEED_10)
- return -EINVAL;
- if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC)
- return -EINVAL; /* AUI is not software-selectable */
- if (ecmd->transceiver != XCVR_INTERNAL)
- return -EINVAL;
- if (ecmd->duplex != DUPLEX_HALF)
- return -EINVAL;
- if (ecmd->phy_address != 0)
- return -EINVAL;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- cr = inb(EWRK3_CR);
-
- /* If Autoneg is set, change to Auto Port mode */
- /* Otherwise, disable Auto Port and set port explicitly */
- if (ecmd->autoneg) {
- cr &= ~CR_APD;
- } else {
- cr |= CR_APD;
- if (ecmd->port == PORT_TP)
- cr &= ~CR_PSEL; /* Force TP */
- else
- cr |= CR_PSEL; /* Force BNC */
- }
-
- /* Commit the changes */
- outb(cr, EWRK3_CR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- return 0;
-}
-
-static u32 ewrk3_get_link(struct net_device *dev)
-{
- unsigned long iobase = dev->base_addr;
- u8 cmr = inb(EWRK3_CMR);
- /* DE203 has BNC only and link status does not apply */
- /* On DE204 this is always valid since TP is the only port. */
- /* On DE205 this reflects TP status even if BNC or AUI is selected. */
- return !(cmr & CMR_LINK);
-}
-
-static int ewrk3_set_phys_id(struct net_device *dev,
- enum ethtool_phys_id_state state)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- unsigned long iobase = dev->base_addr;
- u8 cr;
-
- spin_lock_irq(&lp->hw_lock);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- /* Prevent ISR from twiddling the LED */
- lp->led_mask = 0;
- spin_unlock_irq(&lp->hw_lock);
- return 2; /* cycle on/off twice per second */
-
- case ETHTOOL_ID_ON:
- cr = inb(EWRK3_CR);
- outb(cr | CR_LED, EWRK3_CR);
- break;
-
- case ETHTOOL_ID_OFF:
- cr = inb(EWRK3_CR);
- outb(cr & ~CR_LED, EWRK3_CR);
- break;
-
- case ETHTOOL_ID_INACTIVE:
- lp->led_mask = CR_LED;
- cr = inb(EWRK3_CR);
- outb(cr & ~CR_LED, EWRK3_CR);
- }
- spin_unlock_irq(&lp->hw_lock);
-
- return 0;
-}
-
-static const struct ethtool_ops ethtool_ops_203 = {
- .get_drvinfo = ewrk3_get_drvinfo,
- .get_settings = ewrk3_get_settings,
- .set_settings = ewrk3_set_settings,
- .set_phys_id = ewrk3_set_phys_id,
-};
-
-static const struct ethtool_ops ethtool_ops = {
- .get_drvinfo = ewrk3_get_drvinfo,
- .get_settings = ewrk3_get_settings,
- .set_settings = ewrk3_set_settings,
- .get_link = ewrk3_get_link,
- .set_phys_id = ewrk3_set_phys_id,
-};
-
-/*
- ** Perform IOCTL call functions here. Some are privileged operations and the
- ** effective uid is checked in those cases.
- */
-static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct ewrk3_private *lp = netdev_priv(dev);
- struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru;
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- u_char csr;
- unsigned long flags;
- union ewrk3_addr {
- u_char addr[HASH_TABLE_LEN * ETH_ALEN];
- u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
- };
-
- union ewrk3_addr *tmp;
-
- /* All we handle are private IOCTLs */
- if (cmd != EWRK3IOCTL)
- return -EOPNOTSUPP;
-
- tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL);
- if(tmp==NULL)
- return -ENOMEM;
-
- switch (ioc->cmd) {
- case EWRK3_GET_HWADDR: /* Get the hardware address */
- for (i = 0; i < ETH_ALEN; i++) {
- tmp->addr[i] = dev->dev_addr[i];
- }
- ioc->len = ETH_ALEN;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
-
- case EWRK3_SET_HWADDR: /* Set the hardware address */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= (CSR_TXD | CSR_RXD);
- outb(csr, EWRK3_CSR); /* Disable the TX and RX */
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) {
- status = -EFAULT;
- break;
- }
- spin_lock_irqsave(&lp->hw_lock, flags);
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp->addr[i];
- outb(tmp->addr[i], EWRK3_PAR0 + i);
- }
-
- csr = inb(EWRK3_CSR);
- csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_SET_PROM: /* Set Promiscuous Mode */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= CSR_PME;
- csr &= ~CSR_MCE;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr &= ~CSR_PME;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_MCA: /* Get the multicast address table */
- spin_lock_irqsave(&lp->hw_lock, flags);
- if (lp->shmem_length == IO_ONLY) {
- outb(0, EWRK3_IOPR);
- outw(PAGE0_HTE, EWRK3_PIR1);
- for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
- tmp->addr[i] = inb(EWRK3_DATA);
- }
- } else {
- outb(0, EWRK3_MPR);
- memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3));
- }
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- ioc->len = (HASH_TABLE_LEN >> 3);
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
-
- break;
- case EWRK3_SET_MCA: /* Set a multicast address */
- if (capable(CAP_NET_ADMIN)) {
- if (ioc->len > HASH_TABLE_LEN) {
- status = -EINVAL;
- break;
- }
- if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) {
- status = -EFAULT;
- break;
- }
- set_multicast_list(dev);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_MCA: /* Clear all multicast addresses */
- if (capable(CAP_NET_ADMIN)) {
- set_multicast_list(dev);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_MCA_EN: /* Enable multicast addressing */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- csr = inb(EWRK3_CSR);
- csr |= CSR_MCE;
- csr &= ~CSR_PME;
- outb(csr, EWRK3_CSR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_STATS: { /* Get the driver statistics */
- struct ewrk3_stats *tmp_stats =
- kmalloc(sizeof(lp->pktStats), GFP_KERNEL);
- if (!tmp_stats) {
- status = -ENOMEM;
- break;
- }
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- ioc->len = sizeof(lp->pktStats);
- if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats)))
- status = -EFAULT;
- kfree(tmp_stats);
- break;
- }
- case EWRK3_CLR_STATS: /* Zero out the driver statistics */
- if (capable(CAP_NET_ADMIN)) {
- spin_lock_irqsave(&lp->hw_lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->hw_lock,flags);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_CSR: /* Get the CSR Register contents */
- tmp->addr[0] = inb(EWRK3_CSR);
- ioc->len = 1;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
- case EWRK3_SET_CSR: /* Set the CSR Register contents */
- if (capable(CAP_NET_ADMIN)) {
- if (copy_from_user(tmp->addr, ioc->data, 1)) {
- status = -EFAULT;
- break;
- }
- outb(tmp->addr[0], EWRK3_CSR);
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_EEPROM: /* Get the EEPROM contents */
- if (capable(CAP_NET_ADMIN)) {
- for (i = 0; i < (EEPROM_MAX >> 1); i++) {
- tmp->val[i] = (short) Read_EEPROM(iobase, i);
- }
- i = EEPROM_MAX;
- tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */
- for (j = 0; j < ETH_ALEN; j++) {
- tmp->addr[i++] = inb(EWRK3_PAR0 + j);
- }
- ioc->len = EEPROM_MAX + 1 + ETH_ALEN;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_SET_EEPROM: /* Set the EEPROM contents */
- if (capable(CAP_NET_ADMIN)) {
- if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) {
- status = -EFAULT;
- break;
- }
- for (i = 0; i < (EEPROM_MAX >> 1); i++) {
- Write_EEPROM(tmp->val[i], iobase, i);
- }
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_GET_CMR: /* Get the CMR Register contents */
- tmp->addr[0] = inb(EWRK3_CMR);
- ioc->len = 1;
- if (copy_to_user(ioc->data, tmp->addr, ioc->len))
- status = -EFAULT;
- break;
- case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */
- if (capable(CAP_NET_ADMIN)) {
- lp->txc = 1;
- } else {
- status = -EPERM;
- }
-
- break;
- case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */
- if (capable(CAP_NET_ADMIN)) {
- lp->txc = 0;
- } else {
- status = -EPERM;
- }
-
- break;
- default:
- status = -EOPNOTSUPP;
- }
- kfree(tmp);
- return status;
-}
-
-#ifdef MODULE
-static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S];
-static int ndevs;
-static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, };
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, byte, NULL, 0);
-MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)");
-
-static __exit void ewrk3_exit_module(void)
-{
- int i;
-
- for( i=0; i<ndevs; i++ ) {
- struct net_device *dev = ewrk3_devs[i];
- struct ewrk3_private *lp = netdev_priv(dev);
- ewrk3_devs[i] = NULL;
- unregister_netdev(dev);
- release_region(dev->base_addr, EWRK3_TOTAL_SIZE);
- iounmap(lp->shmem);
- free_netdev(dev);
- }
-}
-
-static __init int ewrk3_init_module(void)
-{
- int i=0;
-
- while( io[i] && irq[i] ) {
- struct net_device *dev
- = alloc_etherdev(sizeof(struct ewrk3_private));
-
- if (!dev)
- break;
-
- if (ewrk3_probe1(dev, io[i], irq[i]) != 0) {
- free_netdev(dev);
- break;
- }
-
- ewrk3_devs[ndevs++] = dev;
- i++;
- }
-
- return ndevs ? 0 : -EIO;
-}
-
-
-/* Hack for breakage in new module stuff */
-module_exit(ewrk3_exit_module);
-module_init(ewrk3_init_module);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dec/ewrk3.h b/drivers/net/ethernet/dec/ewrk3.h
deleted file mode 100644
index 8e0ee906567..00000000000
--- a/drivers/net/ethernet/dec/ewrk3.h
+++ /dev/null
@@ -1,322 +0,0 @@
-/*
- Written 1994 by David C. Davies.
-
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of the
- GNU General Public License, incorporated herein by reference.
-
- The author may be reached as davies@wanton.lkg.dec.com or Digital
- Equipment Corporation, 550 King Street, Littleton MA 01460.
-
- =========================================================================
-*/
-
-/*
-** I/O Address Register Map
-*/
-#define EWRK3_CSR iobase+0x00 /* Control and Status Register */
-#define EWRK3_CR iobase+0x01 /* Control Register */
-#define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */
-#define EWRK3_TSR iobase+0x03 /* Transmit Status Register */
-#define EWRK3_RSVD1 iobase+0x04 /* RESERVED */
-#define EWRK3_RSVD2 iobase+0x05 /* RESERVED */
-#define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */
-#define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */
-#define EWRK3_RQ iobase+0x08 /* Receive Queue */
-#define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */
-#define EWRK3_TQ iobase+0x0a /* Transmit Queue */
-#define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */
-#define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */
-#define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */
-#define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */
-#define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */
-#define EWRK3_DATA iobase+0x10 /* Data Register */
-#define EWRK3_IOPR iobase+0x11 /* I/O Page Register */
-#define EWRK3_IOBR iobase+0x12 /* I/O Base Register */
-#define EWRK3_MPR iobase+0x13 /* Memory Page Register */
-#define EWRK3_MBR iobase+0x14 /* Memory Base Register */
-#define EWRK3_APROM iobase+0x15 /* Address PROM */
-#define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */
-#define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */
-#define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */
-#define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */
-#define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */
-#define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */
-#define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */
-#define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */
-#define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */
-
-/*
-** Control Page Map
-*/
-#define PAGE0_FMQ 0x000 /* Free Memory Queue */
-#define PAGE0_RQ 0x080 /* Receive Queue */
-#define PAGE0_TQ 0x100 /* Transmit Queue */
-#define PAGE0_TDQ 0x180 /* Transmit Done Queue */
-#define PAGE0_HTE 0x200 /* Hash Table Entries */
-#define PAGE0_RSVD 0x240 /* RESERVED */
-#define PAGE0_USRD 0x600 /* User Data */
-
-/*
-** Control and Status Register bit definitions (EWRK3_CSR)
-*/
-#define CSR_RA 0x80 /* Runt Accept */
-#define CSR_PME 0x40 /* Promiscuous Mode Enable */
-#define CSR_MCE 0x20 /* Multicast Enable */
-#define CSR_TNE 0x08 /* TX Done Queue Not Empty */
-#define CSR_RNE 0x04 /* RX Queue Not Empty */
-#define CSR_TXD 0x02 /* TX Disable */
-#define CSR_RXD 0x01 /* RX Disable */
-
-/*
-** Control Register bit definitions (EWRK3_CR)
-*/
-#define CR_APD 0x80 /* Auto Port Disable */
-#define CR_PSEL 0x40 /* Port Select (0->TP port) */
-#define CR_LBCK 0x20 /* LoopBaCK enable */
-#define CR_FDUP 0x10 /* Full DUPlex enable */
-#define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */
-#define CR_EN_16 0x04 /* ENable 16 bit memory accesses */
-#define CR_LED 0x02 /* LED (1-> turn on) */
-
-/*
-** Interrupt Control Register bit definitions (EWRK3_ICR)
-*/
-#define ICR_IE 0x80 /* Interrupt Enable */
-#define ICR_IS 0x60 /* Interrupt Selected */
-#define ICR_TNEM 0x08 /* TNE Mask (0->mask) */
-#define ICR_RNEM 0x04 /* RNE Mask (0->mask) */
-#define ICR_TXDM 0x02 /* TXD Mask (0->mask) */
-#define ICR_RXDM 0x01 /* RXD Mask (0->mask) */
-
-/*
-** Transmit Status Register bit definitions (EWRK3_TSR)
-*/
-#define TSR_NCL 0x80 /* No Carrier Loopback */
-#define TSR_ID 0x40 /* Initially Deferred */
-#define TSR_LCL 0x20 /* Late CoLlision */
-#define TSR_ECL 0x10 /* Excessive CoLlisions */
-#define TSR_RCNTR 0x0f /* Retries CouNTeR */
-
-/*
-** I/O Page Register bit definitions (EWRK3_IOPR)
-*/
-#define EEPROM_INIT 0xc0 /* EEPROM INIT command */
-#define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */
-#define EEPROM_WR 0xd0 /* EEPROM WRITE command */
-#define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */
-#define EEPROM_RD 0xe0 /* EEPROM READ command */
-
-/*
-** I/O Base Register bit definitions (EWRK3_IOBR)
-*/
-#define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */
-#define EISA_IOB 0x1f /* Compare bits for I/O Base Address */
-
-/*
-** I/O Configuration/Management Register bit definitions (EWRK3_CMR)
-*/
-#define CMR_RA 0x80 /* Read Ahead */
-#define CMR_WB 0x40 /* Write Behind */
-#define CMR_LINK 0x20 /* 0->TP */
-#define CMR_POLARITY 0x10 /* Informational */
-#define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */
-#define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */
-#define CMR_PNP 0x04 /* Plug 'n Play */
-#define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */
-#define CMR_0WS 0x01 /* Zero Wait State */
-
-/*
-** MAC Receive Status Register bit definitions
-*/
-
-#define R_ROK 0x80 /* Receive OK summary */
-#define R_IAM 0x10 /* Individual Address Match */
-#define R_MCM 0x08 /* MultiCast Match */
-#define R_DBE 0x04 /* Dribble Bit Error */
-#define R_CRC 0x02 /* CRC error */
-#define R_PLL 0x01 /* Phase Lock Lost */
-
-/*
-** MAC Transmit Control Register bit definitions
-*/
-
-#define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */
-#define TCR_SED 0x20 /* Stop when Error Detected */
-#define TCR_QMODE 0x10 /* Q_MODE */
-#define TCR_LAB 0x08 /* Less Aggressive Backoff */
-#define TCR_PAD 0x04 /* PAD Runt Packets */
-#define TCR_IFC 0x02 /* Insert Frame Check */
-#define TCR_ISA 0x01 /* Insert Source Address */
-
-/*
-** MAC Transmit Status Register bit definitions
-*/
-
-#define T_VSTS 0x80 /* Valid STatuS */
-#define T_CTU 0x40 /* Cut Through Used */
-#define T_SQE 0x20 /* Signal Quality Error */
-#define T_NCL 0x10 /* No Carrier Loopback */
-#define T_LCL 0x08 /* Late Collision */
-#define T_ID 0x04 /* Initially Deferred */
-#define T_COLL 0x03 /* COLLision status */
-#define T_XCOLL 0x03 /* Excessive Collisions */
-#define T_MCOLL 0x02 /* Multiple Collisions */
-#define T_OCOLL 0x01 /* One Collision */
-#define T_NOCOLL 0x00 /* No Collisions */
-#define T_XUR 0x03 /* Excessive Underruns */
-#define T_TXE 0x7f /* TX Errors */
-
-/*
-** EISA Configuration Register bit definitions
-*/
-
-#define EISA_ID iobase + 0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */
-#define EISA_CR iobase + 0x0c84 /* EISA Control Register */
-
-/*
-** EEPROM BYTES
-*/
-#define EEPROM_MEMB 0x00
-#define EEPROM_IOB 0x01
-#define EEPROM_EISA_ID0 0x02
-#define EEPROM_EISA_ID1 0x03
-#define EEPROM_EISA_ID2 0x04
-#define EEPROM_EISA_ID3 0x05
-#define EEPROM_MISC0 0x06
-#define EEPROM_MISC1 0x07
-#define EEPROM_PNAME7 0x08
-#define EEPROM_PNAME6 0x09
-#define EEPROM_PNAME5 0x0a
-#define EEPROM_PNAME4 0x0b
-#define EEPROM_PNAME3 0x0c
-#define EEPROM_PNAME2 0x0d
-#define EEPROM_PNAME1 0x0e
-#define EEPROM_PNAME0 0x0f
-#define EEPROM_SWFLAGS 0x10
-#define EEPROM_HWCAT 0x11
-#define EEPROM_NETMAN2 0x12
-#define EEPROM_REVLVL 0x13
-#define EEPROM_NETMAN0 0x14
-#define EEPROM_NETMAN1 0x15
-#define EEPROM_CHIPVER 0x16
-#define EEPROM_SETUP 0x17
-#define EEPROM_PADDR0 0x18
-#define EEPROM_PADDR1 0x19
-#define EEPROM_PADDR2 0x1a
-#define EEPROM_PADDR3 0x1b
-#define EEPROM_PADDR4 0x1c
-#define EEPROM_PADDR5 0x1d
-#define EEPROM_PA_CRC 0x1e
-#define EEPROM_CHKSUM 0x1f
-
-/*
-** EEPROM bytes for checksumming
-*/
-#define EEPROM_MAX 32 /* bytes */
-
-/*
-** EEPROM MISCELLANEOUS FLAGS
-*/
-#define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */
-#define READ_AHEAD 0x0080 /* Read Ahead feature */
-#define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */
-#define IRQ_SEL 0x0060 /* IRQ line selection */
-#define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */
-#define ENA_16 0x0004 /* Enables 16 bit memory transfers */
-#define WRITE_BEHIND 0x0002 /* Write Behind feature */
-#define _0WS_ENA 0x0001 /* Zero Wait State Enable */
-
-/*
-** EEPROM NETWORK MANAGEMENT FLAGS
-*/
-#define NETMAN_POL 0x04 /* Polarity defeat */
-#define NETMAN_LINK 0x02 /* Link defeat */
-#define NETMAN_CCE 0x01 /* Custom Counters Enable */
-
-/*
-** EEPROM SW FLAGS
-*/
-#define SW_SQE 0x10 /* Signal Quality Error */
-#define SW_LAB 0x08 /* Less Aggressive Backoff */
-#define SW_INIT 0x04 /* Initialized */
-#define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */
-#define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */
-
-/*
-** EEPROM SETUP FLAGS
-*/
-#define SETUP_APD 0x80 /* AutoPort Disable */
-#define SETUP_PS 0x40 /* Port Select */
-#define SETUP_MP 0x20 /* MultiPort */
-#define SETUP_1TP 0x10 /* 1 port, TP */
-#define SETUP_1COAX 0x00 /* 1 port, Coax */
-#define SETUP_DRAM 0x02 /* Number of DRAMS on board */
-
-/*
-** EEPROM MANAGEMENT FLAGS
-*/
-#define MGMT_CCE 0x01 /* Custom Counters Enable */
-
-/*
-** EEPROM VERSIONS
-*/
-#define LeMAC 0x11
-#define LeMAC2 0x12
-
-/*
-** Miscellaneous
-*/
-
-#define EEPROM_WAIT_TIME 1000 /* Number of microseconds */
-#define EISA_EN 0x0001 /* Enable EISA bus buffers */
-
-#define HASH_TABLE_LEN 512 /* Bits */
-
-#define XCT 0x80 /* Transmit Cut Through */
-#define PRELOAD 16 /* 4 long words */
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define EEPROM_OFFSET(a) ((u_short)((u_long)(a)))
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-#define EWRK3IOCTL SIOCDEVPRIVATE
-
-struct ewrk3_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */
-#define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */
-#define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */
-#define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */
-#define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define EWRK3_GET_MCA 0x06 /* Get a multicast address */
-#define EWRK3_SET_MCA 0x07 /* Set a multicast address */
-#define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */
-#define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */
-#define EWRK3_GET_STATS 0x0a /* Get the driver statistics */
-#define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */
-#define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */
-#define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */
-#define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */
-#define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */
-#define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */
-#define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index b5afe218c31..ee26ce78e27 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_DLINK
bool "D-Link devices"
default y
- depends on PCI || PARPORT
+ depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -18,36 +18,6 @@ config NET_VENDOR_DLINK
if NET_VENDOR_DLINK
-config DE600
- tristate "D-Link DE600 pocket adapter support"
- depends on PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de600.
-
-config DE620
- tristate "D-Link DE620 pocket adapter support"
- depends on PARPORT
- ---help---
- This is a network (Ethernet) device which attaches to your parallel
- port. Read <file:Documentation/networking/DLINK.txt> as well as the
- Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, if you want to use
- this. It is possible to have several devices share a single parallel
- port and it is safe to compile the corresponding drivers into the
- kernel.
-
- To compile this driver as a module, choose M here: the module
- will be called de620.
-
config DL2K
tristate "DL2000/TC902x-based Gigabit Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/dlink/Makefile b/drivers/net/ethernet/dlink/Makefile
index c705eaa4f5b..40085f67157 100644
--- a/drivers/net/ethernet/dlink/Makefile
+++ b/drivers/net/ethernet/dlink/Makefile
@@ -2,7 +2,5 @@
# Makefile for the D-Link network device drivers.
#
-obj-$(CONFIG_DE600) += de600.o
-obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_DL2K) += dl2k.o
obj-$(CONFIG_SUNDANCE) += sundance.o
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
deleted file mode 100644
index 414f0eea104..00000000000
--- a/drivers/net/ethernet/dlink/de600.c
+++ /dev/null
@@ -1,529 +0,0 @@
-static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj0rn@blox.se)\n";
-/*
- * de600.c
- *
- * Linux driver for the D-Link DE-600 Ethernet pocket adapter.
- *
- * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall
- * The Author may be reached as bj0rn@blox.se
- *
- * Based on adapter information gathered from DE600.ASM by D-Link Inc.,
- * as included on disk C in the v.2.11 of PC/TCP from FTP Software.
- * For DE600.asm:
- * Portions (C) Copyright 1990 D-Link, Inc.
- * Copyright, 1988-1992, Russell Nelson, Crynwr Software
- *
- * Adapted to the sample network driver core for linux,
- * written by: Donald Becker <becker@super.org>
- * (Now at <becker@scyld.com>)
- *
- **************************************************************/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- **************************************************************/
-
-/* Add more time here if your adapter won't work OK: */
-#define DE600_SLOW_DOWN udelay(delay_time)
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/io.h>
-
-#include "de600.h"
-
-static bool check_lost = true;
-module_param(check_lost, bool, 0);
-MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
-
-static unsigned int delay_time = 10;
-module_param(delay_time, int, 0);
-MODULE_PARM_DESC(delay_time, "DE-600 deley on I/O in microseconds");
-
-
-/*
- * D-Link driver variables:
- */
-
-static volatile int rx_page;
-
-#define TX_PAGES 2
-static volatile int tx_fifo[TX_PAGES];
-static volatile int tx_fifo_in;
-static volatile int tx_fifo_out;
-static volatile int free_tx_pages = TX_PAGES;
-static int was_down;
-static DEFINE_SPINLOCK(de600_lock);
-
-static inline u8 de600_read_status(struct net_device *dev)
-{
- u8 status;
-
- outb_p(STATUS, DATA_PORT);
- status = inb(STATUS_PORT);
- outb_p(NULL_COMMAND | HI_NIBBLE, DATA_PORT);
-
- return status;
-}
-
-static inline u8 de600_read_byte(unsigned char type, struct net_device *dev)
-{
- /* dev used by macros */
- u8 lo;
- outb_p((type), DATA_PORT);
- lo = ((unsigned char)inb(STATUS_PORT)) >> 4;
- outb_p((type) | HI_NIBBLE, DATA_PORT);
- return ((unsigned char)inb(STATUS_PORT) & (unsigned char)0xf0) | lo;
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * after booting when 'ifconfig <dev->name> $IP_ADDR' is run (in rc.inet1).
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- */
-
-static int de600_open(struct net_device *dev)
-{
- unsigned long flags;
- int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev);
- if (ret) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
- return ret;
- }
- spin_lock_irqsave(&de600_lock, flags);
- ret = adapter_init(dev);
- spin_unlock_irqrestore(&de600_lock, flags);
- return ret;
-}
-
-/*
- * The inverse routine to de600_open().
- */
-
-static int de600_close(struct net_device *dev)
-{
- select_nic();
- rx_page = 0;
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
- de600_put_command(0);
- select_prn();
- free_irq(DE600_IRQ, dev);
- return 0;
-}
-
-static inline void trigger_interrupt(struct net_device *dev)
-{
- de600_put_command(FLIP_IRQ);
- select_prn();
- DE600_SLOW_DOWN;
- select_nic();
- de600_put_command(0);
-}
-
-/*
- * Copy a buffer to the adapter transmit page memory.
- * Start sending.
- */
-
-static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- int transmit_from;
- int len;
- int tickssofar;
- u8 *buffer = skb->data;
- int i;
-
- if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
- tickssofar = jiffies - dev_trans_start(dev);
- if (tickssofar < HZ/20)
- return NETDEV_TX_BUSY;
- /* else */
- printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
- /* Restart the adapter. */
- spin_lock_irqsave(&de600_lock, flags);
- if (adapter_init(dev)) {
- spin_unlock_irqrestore(&de600_lock, flags);
- return NETDEV_TX_BUSY;
- }
- spin_unlock_irqrestore(&de600_lock, flags);
- }
-
- /* Start real output */
- pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages);
-
- if ((len = skb->len) < RUNT)
- len = RUNT;
-
- spin_lock_irqsave(&de600_lock, flags);
- select_nic();
- tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
- tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */
-
- if(check_lost)
- {
- /* This costs about 40 instructions per packet... */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- de600_read_byte(READ_DATA, dev);
- if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
- if (adapter_init(dev)) {
- spin_unlock_irqrestore(&de600_lock, flags);
- return NETDEV_TX_BUSY;
- }
- }
- }
-
- de600_setup_address(transmit_from, RW_ADDR);
- for (i = 0; i < skb->len ; ++i, ++buffer)
- de600_put_byte(*buffer);
- for (; i < len; ++i)
- de600_put_byte(0);
-
- if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
- dev->trans_start = jiffies;
- netif_start_queue(dev); /* allow more packets into adapter */
- /* Send page and generate a faked interrupt */
- de600_setup_address(transmit_from, TX_ADDR);
- de600_put_command(TX_ENABLE);
- }
- else {
- if (free_tx_pages)
- netif_start_queue(dev);
- else
- netif_stop_queue(dev);
- select_prn();
- }
- spin_unlock_irqrestore(&de600_lock, flags);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-
-static irqreturn_t de600_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- u8 irq_status;
- int retrig = 0;
- int boguscount = 0;
-
- spin_lock(&de600_lock);
-
- select_nic();
- irq_status = de600_read_status(dev);
-
- do {
- pr_debug("de600_interrupt (%02X)\n", irq_status);
-
- if (irq_status & RX_GOOD)
- de600_rx_intr(dev);
- else if (!(irq_status & RX_BUSY))
- de600_put_command(RX_ENABLE);
-
- /* Any transmission in progress? */
- if (free_tx_pages < TX_PAGES)
- retrig = de600_tx_intr(dev, irq_status);
- else
- retrig = 0;
-
- irq_status = de600_read_status(dev);
- } while ( (irq_status & RX_GOOD) || ((++boguscount < 100) && retrig) );
- /*
- * Yeah, it _looks_ like busy waiting, smells like busy waiting
- * and I know it's not PC, but please, it will only occur once
- * in a while and then only for a loop or so (< 1ms for sure!)
- */
-
- /* Enable adapter interrupts */
- select_prn();
- if (retrig)
- trigger_interrupt(dev);
- spin_unlock(&de600_lock);
- return IRQ_HANDLED;
-}
-
-static int de600_tx_intr(struct net_device *dev, int irq_status)
-{
- /*
- * Returns 1 if tx still not done
- */
-
- /* Check if current transmission is done yet */
- if (irq_status & TX_BUSY)
- return 1; /* tx not done, try again */
-
- /* else */
- /* If last transmission OK then bump fifo index */
- if (!(irq_status & TX_FAILED16)) {
- tx_fifo_out = (tx_fifo_out + 1) % TX_PAGES;
- ++free_tx_pages;
- dev->stats.tx_packets++;
- netif_wake_queue(dev);
- }
-
- /* More to send, or resend last packet? */
- if ((free_tx_pages < TX_PAGES) || (irq_status & TX_FAILED16)) {
- dev->trans_start = jiffies;
- de600_setup_address(tx_fifo[tx_fifo_out], TX_ADDR);
- de600_put_command(TX_ENABLE);
- return 1;
- }
- /* else */
-
- return 0;
-}
-
-/*
- * We have a good packet, get it out of the adapter.
- */
-static void de600_rx_intr(struct net_device *dev)
-{
- struct sk_buff *skb;
- int i;
- int read_from;
- int size;
- unsigned char *buffer;
-
- /* Get size of received packet */
- size = de600_read_byte(RX_LEN, dev); /* low byte */
- size += (de600_read_byte(RX_LEN, dev) << 8); /* high byte */
- size -= 4; /* Ignore trailing 4 CRC-bytes */
-
- /* Tell adapter where to store next incoming packet, enable receiver */
- read_from = rx_page_adr();
- next_rx_page();
- de600_put_command(RX_ENABLE);
-
- if ((size < 32) || (size > 1535)) {
- printk(KERN_WARNING "%s: Bogus packet size %d.\n", dev->name, size);
- if (size > 10000)
- adapter_init(dev);
- return;
- }
-
- skb = netdev_alloc_skb(dev, size + 2);
- if (skb == NULL) {
- printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
- return;
- }
- /* else */
-
- skb_reserve(skb,2); /* Align */
-
- /* 'skb->data' points to the start of sk_buff data area. */
- buffer = skb_put(skb,size);
-
- /* copy the packet into the buffer */
- de600_setup_address(read_from, RW_ADDR);
- for (i = size; i > 0; --i, ++buffer)
- *buffer = de600_read_byte(READ_DATA, dev);
-
- skb->protocol=eth_type_trans(skb,dev);
-
- netif_rx(skb);
-
- /* update stats */
- dev->stats.rx_packets++; /* count all receives */
- dev->stats.rx_bytes += size; /* count all received bytes */
-
- /*
- * If any worth-while packets have been received, netif_rx()
- * will work on them when we get to the tasklets.
- */
-}
-
-static const struct net_device_ops de600_netdev_ops = {
- .ndo_open = de600_open,
- .ndo_stop = de600_close,
- .ndo_start_xmit = de600_start_xmit,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static struct net_device * __init de600_probe(void)
-{
- int i;
- struct net_device *dev;
- int err;
-
- dev = alloc_etherdev(0);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
-
- if (!request_region(DE600_IO, 3, "de600")) {
- printk(KERN_WARNING "DE600: port 0x%x busy\n", DE600_IO);
- err = -EBUSY;
- goto out;
- }
-
- printk(KERN_INFO "%s: D-Link DE-600 pocket adapter", dev->name);
- /* Alpha testers must have the version number to report bugs. */
- pr_debug("%s", version);
-
- /* probe for adapter */
- err = -ENODEV;
- rx_page = 0;
- select_nic();
- (void)de600_read_status(dev);
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
- if (de600_read_status(dev) & 0xf0) {
- printk(": not at I/O %#3x.\n", DATA_PORT);
- goto out1;
- }
-
- /*
- * Maybe we found one,
- * have to check if it is a D-Link DE-600 adapter...
- */
-
- /* Get the adapter ethernet address from the ROM */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = de600_read_byte(READ_DATA, dev);
- dev->broadcast[i] = 0xff;
- }
-
- /* Check magic code */
- if ((dev->dev_addr[1] == 0xde) && (dev->dev_addr[2] == 0x15)) {
- /* OK, install real address */
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0xc8;
- dev->dev_addr[3] &= 0x0f;
- dev->dev_addr[3] |= 0x70;
- } else {
- printk(" not identified in the printer port\n");
- goto out1;
- }
-
- printk(", Ethernet Address: %pM\n", dev->dev_addr);
-
- dev->netdev_ops = &de600_netdev_ops;
-
- dev->flags&=~IFF_MULTICAST;
-
- select_prn();
-
- err = register_netdev(dev);
- if (err)
- goto out1;
-
- return dev;
-
-out1:
- release_region(DE600_IO, 3);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static int adapter_init(struct net_device *dev)
-{
- int i;
-
- select_nic();
- rx_page = 0; /* used by RESET */
- de600_put_command(RESET);
- de600_put_command(STOP_RESET);
-
- /* Check if it is still there... */
- /* Get the some bytes of the adapter ethernet address from the ROM */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- de600_read_byte(READ_DATA, dev);
- if ((de600_read_byte(READ_DATA, dev) != 0xde) ||
- (de600_read_byte(READ_DATA, dev) != 0x15)) {
- /* was: if (de600_read_status(dev) & 0xf0) { */
- printk("Something has happened to the DE-600! Please check it and do a new ifconfig!\n");
- /* Goodbye, cruel world... */
- dev->flags &= ~IFF_UP;
- de600_close(dev);
- was_down = 1;
- netif_stop_queue(dev); /* Transmit busy... */
- return 1; /* failed */
- }
-
- if (was_down) {
- printk(KERN_INFO "%s: Thanks, I feel much better now!\n", dev->name);
- was_down = 0;
- }
-
- tx_fifo_in = 0;
- tx_fifo_out = 0;
- free_tx_pages = TX_PAGES;
-
-
- /* set the ether address. */
- de600_setup_address(NODE_ADDRESS, RW_ADDR);
- for (i = 0; i < ETH_ALEN; i++)
- de600_put_byte(dev->dev_addr[i]);
-
- /* where to start saving incoming packets */
- rx_page = RX_BP | RX_BASE_PAGE;
- de600_setup_address(MEM_4K, RW_ADDR);
- /* Enable receiver */
- de600_put_command(RX_ENABLE);
- select_prn();
-
- netif_start_queue(dev);
-
- return 0; /* OK */
-}
-
-static struct net_device *de600_dev;
-
-static int __init de600_init(void)
-{
- de600_dev = de600_probe();
- if (IS_ERR(de600_dev))
- return PTR_ERR(de600_dev);
- return 0;
-}
-
-static void __exit de600_exit(void)
-{
- unregister_netdev(de600_dev);
- release_region(DE600_IO, 3);
- free_netdev(de600_dev);
-}
-
-module_init(de600_init);
-module_exit(de600_exit);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de600.h b/drivers/net/ethernet/dlink/de600.h
deleted file mode 100644
index e80ecbabcf4..00000000000
--- a/drivers/net/ethernet/dlink/de600.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/**************************************************
- * *
- * Definition of D-Link Ethernet Pocket adapter *
- * *
- **************************************************/
-/*
- * D-Link Ethernet pocket adapter ports
- */
-/*
- * OK, so I'm cheating, but there are an awful lot of
- * reads and writes in order to get anything in and out
- * of the DE-600 with 4 bits at a time in the parallel port,
- * so every saved instruction really helps :-)
- */
-
-#ifndef DE600_IO
-#define DE600_IO 0x378
-#endif
-
-#define DATA_PORT (DE600_IO)
-#define STATUS_PORT (DE600_IO + 1)
-#define COMMAND_PORT (DE600_IO + 2)
-
-#ifndef DE600_IRQ
-#define DE600_IRQ 7
-#endif
-/*
- * It really should look like this, and autoprobing as well...
- *
-#define DATA_PORT (dev->base_addr + 0)
-#define STATUS_PORT (dev->base_addr + 1)
-#define COMMAND_PORT (dev->base_addr + 2)
-#define DE600_IRQ dev->irq
- */
-
-/*
- * D-Link COMMAND_PORT commands
- */
-#define SELECT_NIC 0x04 /* select Network Interface Card */
-#define SELECT_PRN 0x1c /* select Printer */
-#define NML_PRN 0xec /* normal Printer situation */
-#define IRQEN 0x10 /* enable IRQ line */
-
-/*
- * D-Link STATUS_PORT
- */
-#define RX_BUSY 0x80
-#define RX_GOOD 0x40
-#define TX_FAILED16 0x10
-#define TX_BUSY 0x08
-
-/*
- * D-Link DATA_PORT commands
- * command in low 4 bits
- * data in high 4 bits
- * select current data nibble with HI_NIBBLE bit
- */
-#define WRITE_DATA 0x00 /* write memory */
-#define READ_DATA 0x01 /* read memory */
-#define STATUS 0x02 /* read status register */
-#define COMMAND 0x03 /* write command register (see COMMAND below) */
-#define NULL_COMMAND 0x04 /* null command */
-#define RX_LEN 0x05 /* read received packet length */
-#define TX_ADDR 0x06 /* set adapter transmit memory address */
-#define RW_ADDR 0x07 /* set adapter read/write memory address */
-#define HI_NIBBLE 0x08 /* read/write the high nibble of data,
- or-ed with rest of command */
-
-/*
- * command register, accessed through DATA_PORT with low bits = COMMAND
- */
-#define RX_ALL 0x01 /* PROMISCUOUS */
-#define RX_BP 0x02 /* default: BROADCAST & PHYSICAL ADDRESS */
-#define RX_MBP 0x03 /* MULTICAST, BROADCAST & PHYSICAL ADDRESS */
-
-#define TX_ENABLE 0x04 /* bit 2 */
-#define RX_ENABLE 0x08 /* bit 3 */
-
-#define RESET 0x80 /* set bit 7 high */
-#define STOP_RESET 0x00 /* set bit 7 low */
-
-/*
- * data to command register
- * (high 4 bits in write to DATA_PORT)
- */
-#define RX_PAGE2_SELECT 0x10 /* bit 4, only 2 pages to select */
-#define RX_BASE_PAGE 0x20 /* bit 5, always set when specifying RX_ADDR */
-#define FLIP_IRQ 0x40 /* bit 6 */
-
-/*
- * D-Link adapter internal memory:
- *
- * 0-2K 1:st transmit page (send from pointer up to 2K)
- * 2-4K 2:nd transmit page (send from pointer up to 4K)
- *
- * 4-6K 1:st receive page (data from 4K upwards)
- * 6-8K 2:nd receive page (data from 6K upwards)
- *
- * 8K+ Adapter ROM (contains magic code and last 3 bytes of Ethernet address)
- */
-#define MEM_2K 0x0800 /* 2048 */
-#define MEM_4K 0x1000 /* 4096 */
-#define MEM_6K 0x1800 /* 6144 */
-#define NODE_ADDRESS 0x2000 /* 8192 */
-
-#define RUNT 60 /* Too small Ethernet packet */
-
-/**************************************************
- * *
- * End of definition *
- * *
- **************************************************/
-
-/*
- * Index to functions, as function prototypes.
- */
-/* Routines used internally. (See "convenience macros") */
-static u8 de600_read_status(struct net_device *dev);
-static u8 de600_read_byte(unsigned char type, struct net_device *dev);
-
-/* Put in the device structure. */
-static int de600_open(struct net_device *dev);
-static int de600_close(struct net_device *dev);
-static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev);
-
-/* Dispatch from interrupts. */
-static irqreturn_t de600_interrupt(int irq, void *dev_id);
-static int de600_tx_intr(struct net_device *dev, int irq_status);
-static void de600_rx_intr(struct net_device *dev);
-
-/* Initialization */
-static void trigger_interrupt(struct net_device *dev);
-static int adapter_init(struct net_device *dev);
-
-/*
- * Convenience macros/functions for D-Link adapter
- */
-
-#define select_prn() outb_p(SELECT_PRN, COMMAND_PORT); DE600_SLOW_DOWN
-#define select_nic() outb_p(SELECT_NIC, COMMAND_PORT); DE600_SLOW_DOWN
-
-/* Thanks for hints from Mark Burton <markb@ordern.demon.co.uk> */
-#define de600_put_byte(data) ( \
- outb_p(((data) << 4) | WRITE_DATA , DATA_PORT), \
- outb_p(((data) & 0xf0) | WRITE_DATA | HI_NIBBLE, DATA_PORT))
-
-/*
- * The first two outb_p()'s below could perhaps be deleted if there
- * would be more delay in the last two. Not certain about it yet...
- */
-#define de600_put_command(cmd) ( \
- outb_p(( rx_page << 4) | COMMAND , DATA_PORT), \
- outb_p(( rx_page & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT), \
- outb_p(((rx_page | cmd) << 4) | COMMAND , DATA_PORT), \
- outb_p(((rx_page | cmd) & 0xf0) | COMMAND | HI_NIBBLE, DATA_PORT))
-
-#define de600_setup_address(addr,type) ( \
- outb_p((((addr) << 4) & 0xf0) | type , DATA_PORT), \
- outb_p(( (addr) & 0xf0) | type | HI_NIBBLE, DATA_PORT), \
- outb_p((((addr) >> 4) & 0xf0) | type , DATA_PORT), \
- outb_p((((addr) >> 8) & 0xf0) | type | HI_NIBBLE, DATA_PORT))
-
-#define rx_page_adr() ((rx_page & RX_PAGE2_SELECT)?(MEM_6K):(MEM_4K))
-
-/* Flip bit, only 2 pages */
-#define next_rx_page() (rx_page ^= RX_PAGE2_SELECT)
-
-#define tx_page_adr(a) (((a) + 1) * MEM_2K)
diff --git a/drivers/net/ethernet/dlink/de620.c b/drivers/net/ethernet/dlink/de620.c
deleted file mode 100644
index 2e2bc60ee81..00000000000
--- a/drivers/net/ethernet/dlink/de620.c
+++ /dev/null
@@ -1,987 +0,0 @@
-/*
- * de620.c $Revision: 1.40 $ BETA
- *
- *
- * Linux driver for the D-Link DE-620 Ethernet pocket adapter.
- *
- * Portions (C) Copyright 1993, 1994 by Bjorn Ekwall <bj0rn@blox.se>
- *
- * Based on adapter information gathered from DOS packetdriver
- * sources from D-Link Inc: (Special thanks to Henry Ngai of D-Link.)
- * Portions (C) Copyright D-Link SYSTEM Inc. 1991, 1992
- * Copyright, 1988, Russell Nelson, Crynwr Software
- *
- * Adapted to the sample network driver core for linux,
- * written by: Donald Becker <becker@super.org>
- * (Now at <becker@scyld.com>)
- *
- * Valuable assistance from:
- * J. Joshua Kopper <kopper@rtsg.mot.com>
- * Olav Kvittem <Olav.Kvittem@uninett.no>
- * Germano Caronni <caronni@nessie.cs.id.ethz.ch>
- * Jeremy Fitzhardinge <jeremy@suite.sw.oz.au>
- *
- *****************************************************************************/
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *****************************************************************************/
-static const char version[] =
- "de620.c: $Revision: 1.40 $, Bjorn Ekwall <bj0rn@blox.se>\n";
-
-/***********************************************************************
- *
- * "Tuning" section.
- *
- * Compile-time options: (see below for descriptions)
- * -DDE620_IO=0x378 (lpt1)
- * -DDE620_IRQ=7 (lpt1)
- * -DSHUTDOWN_WHEN_LOST
- * -DCOUNT_LOOPS
- * -DLOWSPEED
- * -DREAD_DELAY
- * -DWRITE_DELAY
- */
-
-/*
- * This driver assumes that the printer port is a "normal",
- * dumb, uni-directional port!
- * If your port is "fancy" in any way, please try to set it to "normal"
- * with your BIOS setup. I have no access to machines with bi-directional
- * ports, so I can't test such a driver :-(
- * (Yes, I _know_ it is possible to use DE620 with bidirectional ports...)
- *
- * There are some clones of DE620 out there, with different names.
- * If the current driver does not recognize a clone, try to change
- * the following #define to:
- *
- * #define DE620_CLONE 1
- */
-#define DE620_CLONE 0
-
-/*
- * If the adapter has problems with high speeds, enable this #define
- * otherwise full printerport speed will be attempted.
- *
- * You can tune the READ_DELAY/WRITE_DELAY below if you enable LOWSPEED
- *
-#define LOWSPEED
- */
-
-#ifndef READ_DELAY
-#define READ_DELAY 100 /* adapter internal read delay in 100ns units */
-#endif
-
-#ifndef WRITE_DELAY
-#define WRITE_DELAY 100 /* adapter internal write delay in 100ns units */
-#endif
-
-/*
- * Enable this #define if you want the adapter to do a "ifconfig down" on
- * itself when we have detected that something is possibly wrong with it.
- * The default behaviour is to retry with "adapter_init()" until success.
- * This should be used for debugging purposes only.
- *
-#define SHUTDOWN_WHEN_LOST
- */
-
-#ifdef LOWSPEED
-/*
- * Enable this #define if you want to see debugging output that show how long
- * we have to wait before the DE-620 is ready for the next read/write/command.
- *
-#define COUNT_LOOPS
- */
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include <asm/io.h>
-
-/* Constant definitions for the DE-620 registers, commands and bits */
-#include "de620.h"
-
-typedef unsigned char byte;
-
-/*******************************************************
- * *
- * Definition of D-Link DE-620 Ethernet Pocket adapter *
- * See also "de620.h" *
- * *
- *******************************************************/
-#ifndef DE620_IO /* Compile-time configurable */
-#define DE620_IO 0x378
-#endif
-
-#ifndef DE620_IRQ /* Compile-time configurable */
-#define DE620_IRQ 7
-#endif
-
-#define DATA_PORT (dev->base_addr)
-#define STATUS_PORT (dev->base_addr + 1)
-#define COMMAND_PORT (dev->base_addr + 2)
-
-#define RUNT 60 /* Too small Ethernet packet */
-#define GIANT 1514 /* largest legal size packet, no fcs */
-
-/*
- * Force media with insmod:
- * insmod de620.o bnc=1
- * or
- * insmod de620.o utp=1
- *
- * Force io and/or irq with insmod:
- * insmod de620.o io=0x378 irq=7
- *
- * Make a clone skip the Ethernet-address range check:
- * insmod de620.o clone=1
- */
-static int bnc;
-static int utp;
-static int io = DE620_IO;
-static int irq = DE620_IRQ;
-static int clone = DE620_CLONE;
-
-static spinlock_t de620_lock;
-
-module_param(bnc, int, 0);
-module_param(utp, int, 0);
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(clone, int, 0);
-MODULE_PARM_DESC(bnc, "DE-620 set BNC medium (0-1)");
-MODULE_PARM_DESC(utp, "DE-620 set UTP medium (0-1)");
-MODULE_PARM_DESC(io, "DE-620 I/O base address,required");
-MODULE_PARM_DESC(irq, "DE-620 IRQ number,required");
-MODULE_PARM_DESC(clone, "Check also for non-D-Link DE-620 clones (0-1)");
-
-/***********************************************
- * *
- * Index to functions, as function prototypes. *
- * *
- ***********************************************/
-
-/*
- * Routines used internally. (See also "convenience macros.. below")
- */
-
-/* Put in the device structure. */
-static int de620_open(struct net_device *);
-static int de620_close(struct net_device *);
-static void de620_set_multicast_list(struct net_device *);
-static int de620_start_xmit(struct sk_buff *, struct net_device *);
-
-/* Dispatch from interrupts. */
-static irqreturn_t de620_interrupt(int, void *);
-static int de620_rx_intr(struct net_device *);
-
-/* Initialization */
-static int adapter_init(struct net_device *);
-static int read_eeprom(struct net_device *);
-
-
-/*
- * D-Link driver variables:
- */
-#define SCR_DEF NIBBLEMODE |INTON | SLEEP | AUTOTX
-#define TCR_DEF RXPB /* not used: | TXSUCINT | T16INT */
-#define DE620_RX_START_PAGE 12 /* 12 pages (=3k) reserved for tx */
-#define DEF_NIC_CMD IRQEN | ICEN | DS1
-
-static volatile byte NIC_Cmd;
-static volatile byte next_rx_page;
-static byte first_rx_page;
-static byte last_rx_page;
-static byte EIPRegister;
-
-static struct nic {
- byte NodeID[6];
- byte RAM_Size;
- byte Model;
- byte Media;
- byte SCR;
-} nic_data;
-
-/**********************************************************
- * *
- * Convenience macros/functions for D-Link DE-620 adapter *
- * *
- **********************************************************/
-#define de620_tx_buffs(dd) (inb(STATUS_PORT) & (TXBF0 | TXBF1))
-#define de620_flip_ds(dd) NIC_Cmd ^= DS0 | DS1; outb(NIC_Cmd, COMMAND_PORT);
-
-/* Check for ready-status, and return a nibble (high 4 bits) for data input */
-#ifdef COUNT_LOOPS
-static int tot_cnt;
-#endif
-static inline byte
-de620_ready(struct net_device *dev)
-{
- byte value;
- register short int cnt = 0;
-
- while ((((value = inb(STATUS_PORT)) & READY) == 0) && (cnt <= 1000))
- ++cnt;
-
-#ifdef COUNT_LOOPS
- tot_cnt += cnt;
-#endif
- return value & 0xf0; /* nibble */
-}
-
-static inline void
-de620_send_command(struct net_device *dev, byte cmd)
-{
- de620_ready(dev);
- if (cmd == W_DUMMY)
- outb(NIC_Cmd, COMMAND_PORT);
-
- outb(cmd, DATA_PORT);
-
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
- de620_ready(dev);
- outb(NIC_Cmd, COMMAND_PORT);
-}
-
-static inline void
-de620_put_byte(struct net_device *dev, byte value)
-{
- /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
- de620_ready(dev);
- outb(value, DATA_PORT);
- de620_flip_ds(dev);
-}
-
-static inline byte
-de620_read_byte(struct net_device *dev)
-{
- byte value;
-
- /* The de620_ready() makes 7 loops, on the average, on a DX2/66 */
- value = de620_ready(dev); /* High nibble */
- de620_flip_ds(dev);
- value |= de620_ready(dev) >> 4; /* Low nibble */
- return value;
-}
-
-static inline void
-de620_write_block(struct net_device *dev, byte *buffer, int count, int pad)
-{
-#ifndef LOWSPEED
- byte uflip = NIC_Cmd ^ (DS0 | DS1);
- byte dflip = NIC_Cmd;
-#else /* LOWSPEED */
-#ifdef COUNT_LOOPS
- int bytes = count;
-#endif /* COUNT_LOOPS */
-#endif /* LOWSPEED */
-
-#ifdef LOWSPEED
-#ifdef COUNT_LOOPS
- tot_cnt = 0;
-#endif /* COUNT_LOOPS */
- /* No further optimization useful, the limit is in the adapter. */
- for ( ; count > 0; --count, ++buffer) {
- de620_put_byte(dev,*buffer);
- }
- for ( count = pad ; count > 0; --count, ++buffer) {
- de620_put_byte(dev, 0);
- }
- de620_send_command(dev,W_DUMMY);
-#ifdef COUNT_LOOPS
- /* trial debug output: loops per byte in de620_ready() */
- printk("WRITE(%d)\n", tot_cnt/((bytes?bytes:1)));
-#endif /* COUNT_LOOPS */
-#else /* not LOWSPEED */
- for ( ; count > 0; count -=2) {
- outb(*buffer++, DATA_PORT);
- outb(uflip, COMMAND_PORT);
- outb(*buffer++, DATA_PORT);
- outb(dflip, COMMAND_PORT);
- }
- de620_send_command(dev,W_DUMMY);
-#endif /* LOWSPEED */
-}
-
-static inline void
-de620_read_block(struct net_device *dev, byte *data, int count)
-{
-#ifndef LOWSPEED
- byte value;
- byte uflip = NIC_Cmd ^ (DS0 | DS1);
- byte dflip = NIC_Cmd;
-#else /* LOWSPEED */
-#ifdef COUNT_LOOPS
- int bytes = count;
-
- tot_cnt = 0;
-#endif /* COUNT_LOOPS */
-#endif /* LOWSPEED */
-
-#ifdef LOWSPEED
- /* No further optimization useful, the limit is in the adapter. */
- while (count-- > 0) {
- *data++ = de620_read_byte(dev);
- de620_flip_ds(dev);
- }
-#ifdef COUNT_LOOPS
- /* trial debug output: loops per byte in de620_ready() */
- printk("READ(%d)\n", tot_cnt/(2*(bytes?bytes:1)));
-#endif /* COUNT_LOOPS */
-#else /* not LOWSPEED */
- while (count-- > 0) {
- value = inb(STATUS_PORT) & 0xf0; /* High nibble */
- outb(uflip, COMMAND_PORT);
- *data++ = value | inb(STATUS_PORT) >> 4; /* Low nibble */
- outb(dflip , COMMAND_PORT);
- }
-#endif /* LOWSPEED */
-}
-
-static inline void
-de620_set_delay(struct net_device *dev)
-{
- de620_ready(dev);
- outb(W_DFR, DATA_PORT);
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
-
- de620_ready(dev);
-#ifdef LOWSPEED
- outb(WRITE_DELAY, DATA_PORT);
-#else
- outb(0, DATA_PORT);
-#endif
- de620_flip_ds(dev);
-
- de620_ready(dev);
-#ifdef LOWSPEED
- outb(READ_DELAY, DATA_PORT);
-#else
- outb(0, DATA_PORT);
-#endif
- de620_flip_ds(dev);
-}
-
-static inline void
-de620_set_register(struct net_device *dev, byte reg, byte value)
-{
- de620_ready(dev);
- outb(reg, DATA_PORT);
- outb(NIC_Cmd ^ CS0, COMMAND_PORT);
-
- de620_put_byte(dev, value);
-}
-
-static inline byte
-de620_get_register(struct net_device *dev, byte reg)
-{
- byte value;
-
- de620_send_command(dev,reg);
- value = de620_read_byte(dev);
- de620_send_command(dev,W_DUMMY);
-
- return value;
-}
-
-/*********************************************************************
- *
- * Open/initialize the board.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- *
- */
-static int de620_open(struct net_device *dev)
-{
- int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
- if (ret) {
- printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
- return ret;
- }
-
- if (adapter_init(dev)) {
- ret = -EIO;
- goto out_free_irq;
- }
-
- netif_start_queue(dev);
- return 0;
-
-out_free_irq:
- free_irq(dev->irq, dev);
- return ret;
-}
-
-/************************************************
- *
- * The inverse routine to de620_open().
- *
- */
-
-static int de620_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- /* disable recv */
- de620_set_register(dev, W_TCR, RXOFF);
- free_irq(dev->irq, dev);
- return 0;
-}
-
-/*********************************************
- *
- * Set or clear the multicast filter for this adaptor.
- * (no real multicast implemented for the DE-620, but she can be promiscuous...)
- *
- */
-
-static void de620_set_multicast_list(struct net_device *dev)
-{
- if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
- { /* Enable promiscuous mode */
- de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
- }
- else
- { /* Disable promiscuous mode, use normal mode */
- de620_set_register(dev, W_TCR, TCR_DEF);
- }
-}
-
-/*******************************************************
- *
- * Handle timeouts on transmit
- */
-
-static void de620_timeout(struct net_device *dev)
-{
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, "network cable problem");
- /* Restart the adapter. */
- if (!adapter_init(dev)) /* maybe close it */
- netif_wake_queue(dev);
-}
-
-/*******************************************************
- *
- * Copy a buffer to the adapter transmit page memory.
- * Start sending.
- */
-static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- int len;
- byte *buffer = skb->data;
- byte using_txbuf;
-
- using_txbuf = de620_tx_buffs(dev); /* Peek at the adapter */
-
- netif_stop_queue(dev);
-
-
- if ((len = skb->len) < RUNT)
- len = RUNT;
- if (len & 1) /* send an even number of bytes */
- ++len;
-
- /* Start real output */
-
- spin_lock_irqsave(&de620_lock, flags);
- pr_debug("de620_start_xmit: len=%d, bufs 0x%02x\n",
- (int)skb->len, using_txbuf);
-
- /* select a free tx buffer. if there is one... */
- switch (using_txbuf) {
- default: /* both are free: use TXBF0 */
- case TXBF1: /* use TXBF0 */
- de620_send_command(dev,W_CR | RW0);
- using_txbuf |= TXBF0;
- break;
-
- case TXBF0: /* use TXBF1 */
- de620_send_command(dev,W_CR | RW1);
- using_txbuf |= TXBF1;
- break;
-
- case (TXBF0 | TXBF1): /* NONE!!! */
- printk(KERN_WARNING "%s: No tx-buffer available!\n", dev->name);
- spin_unlock_irqrestore(&de620_lock, flags);
- return NETDEV_TX_BUSY;
- }
- de620_write_block(dev, buffer, skb->len, len-skb->len);
-
- if(!(using_txbuf == (TXBF0 | TXBF1)))
- netif_wake_queue(dev);
-
- dev->stats.tx_packets++;
- spin_unlock_irqrestore(&de620_lock, flags);
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*****************************************************
- *
- * Handle the network interface interrupts.
- *
- */
-static irqreturn_t
-de620_interrupt(int irq_in, void *dev_id)
-{
- struct net_device *dev = dev_id;
- byte irq_status;
- int bogus_count = 0;
- int again = 0;
-
- spin_lock(&de620_lock);
-
- /* Read the status register (_not_ the status port) */
- irq_status = de620_get_register(dev, R_STS);
-
- pr_debug("de620_interrupt (%2.2X)\n", irq_status);
-
- if (irq_status & RXGOOD) {
- do {
- again = de620_rx_intr(dev);
- pr_debug("again=%d\n", again);
- }
- while (again && (++bogus_count < 100));
- }
-
- if(de620_tx_buffs(dev) != (TXBF0 | TXBF1))
- netif_wake_queue(dev);
-
- spin_unlock(&de620_lock);
- return IRQ_HANDLED;
-}
-
-/**************************************
- *
- * Get a packet from the adapter
- *
- * Send it "upstairs"
- *
- */
-static int de620_rx_intr(struct net_device *dev)
-{
- struct header_buf {
- byte status;
- byte Rx_NextPage;
- unsigned short Rx_ByteCount;
- } header_buf;
- struct sk_buff *skb;
- int size;
- byte *buffer;
- byte pagelink;
- byte curr_page;
-
- pr_debug("de620_rx_intr: next_rx_page = %d\n", next_rx_page);
-
- /* Tell the adapter that we are going to read data, and from where */
- de620_send_command(dev, W_CR | RRN);
- de620_set_register(dev, W_RSA1, next_rx_page);
- de620_set_register(dev, W_RSA0, 0);
-
- /* Deep breath, and away we goooooo */
- de620_read_block(dev, (byte *)&header_buf, sizeof(struct header_buf));
- pr_debug("page status=0x%02x, nextpage=%d, packetsize=%d\n",
- header_buf.status, header_buf.Rx_NextPage,
- header_buf.Rx_ByteCount);
-
- /* Plausible page header? */
- pagelink = header_buf.Rx_NextPage;
- if ((pagelink < first_rx_page) || (last_rx_page < pagelink)) {
- /* Ouch... Forget it! Skip all and start afresh... */
- printk(KERN_WARNING "%s: Ring overrun? Restoring...\n", dev->name);
- /* You win some, you lose some. And sometimes plenty... */
- adapter_init(dev);
- netif_wake_queue(dev);
- dev->stats.rx_over_errors++;
- return 0;
- }
-
- /* OK, this look good, so far. Let's see if it's consistent... */
- /* Let's compute the start of the next packet, based on where we are */
- pagelink = next_rx_page +
- ((header_buf.Rx_ByteCount + (4 - 1 + 0x100)) >> 8);
-
- /* Are we going to wrap around the page counter? */
- if (pagelink > last_rx_page)
- pagelink -= (last_rx_page - first_rx_page + 1);
-
- /* Is the _computed_ next page number equal to what the adapter says? */
- if (pagelink != header_buf.Rx_NextPage) {
- /* Naah, we'll skip this packet. Probably bogus data as well */
- printk(KERN_WARNING "%s: Page link out of sync! Restoring...\n", dev->name);
- next_rx_page = header_buf.Rx_NextPage; /* at least a try... */
- de620_send_command(dev, W_DUMMY);
- de620_set_register(dev, W_NPRF, next_rx_page);
- dev->stats.rx_over_errors++;
- return 0;
- }
- next_rx_page = pagelink;
-
- size = header_buf.Rx_ByteCount - 4;
- if ((size < RUNT) || (GIANT < size)) {
- printk(KERN_WARNING "%s: Illegal packet size: %d!\n", dev->name, size);
- }
- else { /* Good packet? */
- skb = netdev_alloc_skb(dev, size + 2);
- if (skb == NULL) { /* Yeah, but no place to put it... */
- printk(KERN_WARNING "%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, size);
- dev->stats.rx_dropped++;
- }
- else { /* Yep! Go get it! */
- skb_reserve(skb,2); /* Align */
- /* skb->data points to the start of sk_buff data area */
- buffer = skb_put(skb,size);
- /* copy the packet into the buffer */
- de620_read_block(dev, buffer, size);
- pr_debug("Read %d bytes\n", size);
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb); /* deliver it "upstairs" */
- /* count all receives */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += size;
- }
- }
-
- /* Let's peek ahead to see if we have read the last current packet */
- /* NOTE! We're _not_ checking the 'EMPTY'-flag! This seems better... */
- curr_page = de620_get_register(dev, R_CPR);
- de620_set_register(dev, W_NPRF, next_rx_page);
- pr_debug("next_rx_page=%d CPR=%d\n", next_rx_page, curr_page);
-
- return next_rx_page != curr_page; /* That was slightly tricky... */
-}
-
-/*********************************************
- *
- * Reset the adapter to a known state
- *
- */
-static int adapter_init(struct net_device *dev)
-{
- int i;
- static int was_down;
-
- if ((nic_data.Model == 3) || (nic_data.Model == 0)) { /* CT */
- EIPRegister = NCTL0;
- if (nic_data.Media != 1)
- EIPRegister |= NIS0; /* not BNC */
- }
- else if (nic_data.Model == 2) { /* UTP */
- EIPRegister = NCTL0 | NIS0;
- }
-
- if (utp)
- EIPRegister = NCTL0 | NIS0;
- if (bnc)
- EIPRegister = NCTL0;
-
- de620_send_command(dev, W_CR | RNOP | CLEAR);
- de620_send_command(dev, W_CR | RNOP);
-
- de620_set_register(dev, W_SCR, SCR_DEF);
- /* disable recv to wait init */
- de620_set_register(dev, W_TCR, RXOFF);
-
- /* Set the node ID in the adapter */
- for (i = 0; i < 6; ++i) { /* W_PARn = 0xaa + n */
- de620_set_register(dev, W_PAR0 + i, dev->dev_addr[i]);
- }
-
- de620_set_register(dev, W_EIP, EIPRegister);
-
- next_rx_page = first_rx_page = DE620_RX_START_PAGE;
- if (nic_data.RAM_Size)
- last_rx_page = nic_data.RAM_Size - 1;
- else /* 64k RAM */
- last_rx_page = 255;
-
- de620_set_register(dev, W_SPR, first_rx_page); /* Start Page Register*/
- de620_set_register(dev, W_EPR, last_rx_page); /* End Page Register */
- de620_set_register(dev, W_CPR, first_rx_page);/*Current Page Register*/
- de620_send_command(dev, W_NPR | first_rx_page); /* Next Page Register*/
- de620_send_command(dev, W_DUMMY);
- de620_set_delay(dev);
-
- /* Final sanity check: Anybody out there? */
- /* Let's hope some bits from the statusregister make a good check */
-#define CHECK_MASK ( 0 | TXSUC | T16 | 0 | RXCRC | RXSHORT | 0 | 0 )
-#define CHECK_OK ( 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 )
- /* success: X 0 0 X 0 0 X X */
- /* ignore: EEDI RXGOOD COLS LNKS*/
-
- if (((i = de620_get_register(dev, R_STS)) & CHECK_MASK) != CHECK_OK) {
- printk(KERN_ERR "%s: Something has happened to the DE-620! Please check it"
-#ifdef SHUTDOWN_WHEN_LOST
- " and do a new ifconfig"
-#endif
- "! (%02x)\n", dev->name, i);
-#ifdef SHUTDOWN_WHEN_LOST
- /* Goodbye, cruel world... */
- dev->flags &= ~IFF_UP;
- de620_close(dev);
-#endif
- was_down = 1;
- return 1; /* failed */
- }
- if (was_down) {
- printk(KERN_WARNING "%s: Thanks, I feel much better now!\n", dev->name);
- was_down = 0;
- }
-
- /* All OK, go ahead... */
- de620_set_register(dev, W_TCR, TCR_DEF);
-
- return 0; /* all ok */
-}
-
-static const struct net_device_ops de620_netdev_ops = {
- .ndo_open = de620_open,
- .ndo_stop = de620_close,
- .ndo_start_xmit = de620_start_xmit,
- .ndo_tx_timeout = de620_timeout,
- .ndo_set_rx_mode = de620_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/******************************************************************************
- *
- * Only start-up code below
- *
- */
-/****************************************
- *
- * Check if there is a DE-620 connected
- */
-struct net_device * __init de620_probe(int unit)
-{
- byte checkbyte = 0xa5;
- struct net_device *dev;
- int err = -ENOMEM;
- int i;
-
- dev = alloc_etherdev(0);
- if (!dev)
- goto out;
-
- spin_lock_init(&de620_lock);
-
- /*
- * This is where the base_addr and irq gets set.
- * Tunable at compile-time and insmod-time
- */
- dev->base_addr = io;
- dev->irq = irq;
-
- /* allow overriding parameters on command line */
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- pr_debug("%s", version);
-
- printk(KERN_INFO "D-Link DE-620 pocket adapter");
-
- if (!request_region(dev->base_addr, 3, "de620")) {
- printk(" io 0x%3lX, which is busy.\n", dev->base_addr);
- err = -EBUSY;
- goto out1;
- }
-
- /* Initially, configure basic nibble mode, so we can read the EEPROM */
- NIC_Cmd = DEF_NIC_CMD;
- de620_set_register(dev, W_EIP, EIPRegister);
-
- /* Anybody out there? */
- de620_set_register(dev, W_CPR, checkbyte);
- checkbyte = de620_get_register(dev, R_CPR);
-
- if ((checkbyte != 0xa5) || (read_eeprom(dev) != 0)) {
- printk(" not identified in the printer port\n");
- err = -ENODEV;
- goto out2;
- }
-
- /* else, got it! */
- dev->dev_addr[0] = nic_data.NodeID[0];
- for (i = 1; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = nic_data.NodeID[i];
- dev->broadcast[i] = 0xff;
- }
-
- printk(", Ethernet Address: %pM", dev->dev_addr);
-
- printk(" (%dk RAM,",
- (nic_data.RAM_Size) ? (nic_data.RAM_Size >> 2) : 64);
-
- if (nic_data.Media == 1)
- printk(" BNC)\n");
- else
- printk(" UTP)\n");
-
- dev->netdev_ops = &de620_netdev_ops;
- dev->watchdog_timeo = HZ*2;
-
- /* base_addr and irq are already set, see above! */
-
- /* dump eeprom */
- pr_debug("\nEEPROM contents:\n"
- "RAM_Size = 0x%02X\n"
- "NodeID = %pM\n"
- "Model = %d\n"
- "Media = %d\n"
- "SCR = 0x%02x\n", nic_data.RAM_Size, nic_data.NodeID,
- nic_data.Model, nic_data.Media, nic_data.SCR);
-
- err = register_netdev(dev);
- if (err)
- goto out2;
- return dev;
-
-out2:
- release_region(dev->base_addr, 3);
-out1:
- free_netdev(dev);
-out:
- return ERR_PTR(err);
-}
-
-/**********************************
- *
- * Read info from on-board EEPROM
- *
- * Note: Bitwise serial I/O to/from the EEPROM vi the status _register_!
- */
-#define sendit(dev,data) de620_set_register(dev, W_EIP, data | EIPRegister);
-
-static unsigned short __init ReadAWord(struct net_device *dev, int from)
-{
- unsigned short data;
- int nbits;
-
- /* cs [__~~] SET SEND STATE */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 0); sendit(dev, 1); sendit(dev, 5); sendit(dev, 4);
-
- /* Send the 9-bit address from where we want to read the 16-bit word */
- for (nbits = 9; nbits > 0; --nbits, from <<= 1) {
- if (from & 0x0100) { /* bit set? */
- /* cs [~~~~] SEND 1 */
- /* di [~~~~] */
- /* sck [_~~_] */
- sendit(dev, 6); sendit(dev, 7); sendit(dev, 7); sendit(dev, 6);
- }
- else {
- /* cs [~~~~] SEND 0 */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
- }
- }
-
- /* Shift in the 16-bit word. The bits appear serially in EEDI (=0x80) */
- for (data = 0, nbits = 16; nbits > 0; --nbits) {
- /* cs [~~~~] SEND 0 */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 4); sendit(dev, 5); sendit(dev, 5); sendit(dev, 4);
- data = (data << 1) | ((de620_get_register(dev, R_STS) & EEDI) >> 7);
- }
- /* cs [____] RESET SEND STATE */
- /* di [____] */
- /* sck [_~~_] */
- sendit(dev, 0); sendit(dev, 1); sendit(dev, 1); sendit(dev, 0);
-
- return data;
-}
-
-static int __init read_eeprom(struct net_device *dev)
-{
- unsigned short wrd;
-
- /* D-Link Ethernet addresses are in the series 00:80:c8:7X:XX:XX:XX */
- wrd = ReadAWord(dev, 0x1aa); /* bytes 0 + 1 of NodeID */
- if (!clone && (wrd != htons(0x0080))) /* Valid D-Link ether sequence? */
- return -1; /* Nope, not a DE-620 */
- nic_data.NodeID[0] = wrd & 0xff;
- nic_data.NodeID[1] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ab); /* bytes 2 + 3 of NodeID */
- if (!clone && ((wrd & 0xff) != 0xc8)) /* Valid D-Link ether sequence? */
- return -1; /* Nope, not a DE-620 */
- nic_data.NodeID[2] = wrd & 0xff;
- nic_data.NodeID[3] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ac); /* bytes 4 + 5 of NodeID */
- nic_data.NodeID[4] = wrd & 0xff;
- nic_data.NodeID[5] = wrd >> 8;
-
- wrd = ReadAWord(dev, 0x1ad); /* RAM size in pages (256 bytes). 0 = 64k */
- nic_data.RAM_Size = (wrd >> 8);
-
- wrd = ReadAWord(dev, 0x1ae); /* hardware model (CT = 3) */
- nic_data.Model = (wrd & 0xff);
-
- wrd = ReadAWord(dev, 0x1af); /* media (indicates BNC/UTP) */
- nic_data.Media = (wrd & 0xff);
-
- wrd = ReadAWord(dev, 0x1a8); /* System Configuration Register */
- nic_data.SCR = (wrd >> 8);
-
- return 0; /* no errors */
-}
-
-/******************************************************************************
- *
- * Loadable module skeleton
- *
- */
-#ifdef MODULE
-static struct net_device *de620_dev;
-
-int __init init_module(void)
-{
- de620_dev = de620_probe(-1);
- if (IS_ERR(de620_dev))
- return PTR_ERR(de620_dev);
- return 0;
-}
-
-void cleanup_module(void)
-{
- unregister_netdev(de620_dev);
- release_region(de620_dev->base_addr, 3);
- free_netdev(de620_dev);
-}
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/dlink/de620.h b/drivers/net/ethernet/dlink/de620.h
deleted file mode 100644
index e8d9a88f4cb..00000000000
--- a/drivers/net/ethernet/dlink/de620.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*********************************************************
- * *
- * Definition of D-Link DE-620 Ethernet Pocket adapter *
- * *
- *********************************************************/
-
-/* DE-620's CMD port Command */
-#define CS0 0x08 /* 1->0 command strobe */
-#define ICEN 0x04 /* 0=enable DL3520 host interface */
-#define DS0 0x02 /* 1->0 data strobe 0 */
-#define DS1 0x01 /* 1->0 data strobe 1 */
-
-#define WDIR 0x20 /* general 0=read 1=write */
-#define RDIR 0x00 /* (not 100% confirm ) */
-#define PS2WDIR 0x00 /* ps/2 mode 1=read, 0=write */
-#define PS2RDIR 0x20
-
-#define IRQEN 0x10 /* 1 = enable printer IRQ line */
-#define SELECTIN 0x08 /* 1 = select printer */
-#define INITP 0x04 /* 0 = initial printer */
-#define AUTOFEED 0x02 /* 1 = printer auto form feed */
-#define STROBE 0x01 /* 0->1 data strobe */
-
-#define RESET 0x08
-#define NIS0 0x20 /* 0 = BNC, 1 = UTP */
-#define NCTL0 0x10
-
-/* DE-620 DIC Command */
-#define W_DUMMY 0x00 /* DIC reserved command */
-#define W_CR 0x20 /* DIC write command register */
-#define W_NPR 0x40 /* DIC write Next Page Register */
-#define W_TBR 0x60 /* DIC write Tx Byte Count 1 reg */
-#define W_RSA 0x80 /* DIC write Remote Start Addr 1 */
-
-/* DE-620's STAT port bits 7-4 */
-#define EMPTY 0x80 /* 1 = receive buffer empty */
-#define INTLEVEL 0x40 /* 1 = interrupt level is high */
-#define TXBF1 0x20 /* 1 = transmit buffer 1 is in use */
-#define TXBF0 0x10 /* 1 = transmit buffer 0 is in use */
-#define READY 0x08 /* 1 = h/w ready to accept cmd/data */
-
-/* IDC 1 Command */
-#define W_RSA1 0xa0 /* write remote start address 1 */
-#define W_RSA0 0xa1 /* write remote start address 0 */
-#define W_NPRF 0xa2 /* write next page register NPR15-NPR8 */
-#define W_DFR 0xa3 /* write delay factor register */
-#define W_CPR 0xa4 /* write current page register */
-#define W_SPR 0xa5 /* write start page register */
-#define W_EPR 0xa6 /* write end page register */
-#define W_SCR 0xa7 /* write system configuration register */
-#define W_TCR 0xa8 /* write Transceiver Configuration reg */
-#define W_EIP 0xa9 /* write EEPM Interface port */
-#define W_PAR0 0xaa /* write physical address register 0 */
-#define W_PAR1 0xab /* write physical address register 1 */
-#define W_PAR2 0xac /* write physical address register 2 */
-#define W_PAR3 0xad /* write physical address register 3 */
-#define W_PAR4 0xae /* write physical address register 4 */
-#define W_PAR5 0xaf /* write physical address register 5 */
-
-/* IDC 2 Command */
-#define R_STS 0xc0 /* read status register */
-#define R_CPR 0xc1 /* read current page register */
-#define R_BPR 0xc2 /* read boundary page register */
-#define R_TDR 0xc3 /* read time domain reflectometry reg */
-
-/* STATUS Register */
-#define EEDI 0x80 /* EEPM DO pin */
-#define TXSUC 0x40 /* tx success */
-#define T16 0x20 /* tx fail 16 times */
-#define TS1 0x40 /* 0=Tx success, 1=T16 */
-#define TS0 0x20 /* 0=Tx success, 1=T16 */
-#define RXGOOD 0x10 /* rx a good packet */
-#define RXCRC 0x08 /* rx a CRC error packet */
-#define RXSHORT 0x04 /* rx a short packet */
-#define COLS 0x02 /* coaxial collision status */
-#define LNKS 0x01 /* UTP link status */
-
-/* Command Register */
-#define CLEAR 0x10 /* reset part of hardware */
-#define NOPER 0x08 /* No Operation */
-#define RNOP 0x08
-#define RRA 0x06 /* After RR then auto-advance NPR & BPR(=NPR-1) */
-#define RRN 0x04 /* Normal Remote Read mode */
-#define RW1 0x02 /* Remote Write tx buffer 1 ( page 6 - 11 ) */
-#define RW0 0x00 /* Remote Write tx buffer 0 ( page 0 - 5 ) */
-#define TXEN 0x01 /* 0->1 tx enable */
-
-/* System Configuration Register */
-#define TESTON 0x80 /* test host data transfer reliability */
-#define SLEEP 0x40 /* sleep mode */
-#if 0
-#define FASTMODE 0x04 /* fast mode for intel 82360SL fast mode */
-#define BYTEMODE 0x02 /* byte mode */
-#else
-#define FASTMODE 0x20 /* fast mode for intel 82360SL fast mode */
-#define BYTEMODE 0x10 /* byte mode */
-#endif
-#define NIBBLEMODE 0x00 /* nibble mode */
-#define IRQINV 0x08 /* turn off IRQ line inverter */
-#define IRQNML 0x00 /* turn on IRQ line inverter */
-#define INTON 0x04
-#define AUTOFFSET 0x02 /* auto shift address to TPR+12 */
-#define AUTOTX 0x01 /* auto tx when leave RW mode */
-
-/* Transceiver Configuration Register */
-#define JABBER 0x80 /* generate jabber condition */
-#define TXSUCINT 0x40 /* enable tx success interrupt */
-#define T16INT 0x20 /* enable T16 interrupt */
-#define RXERRPKT 0x10 /* accept CRC error or short packet */
-#define EXTERNALB2 0x0C /* external loopback 2 */
-#define EXTERNALB1 0x08 /* external loopback 1 */
-#define INTERNALB 0x04 /* internal loopback */
-#define NMLOPERATE 0x00 /* normal operation */
-#define RXPBM 0x03 /* rx physical, broadcast, multicast */
-#define RXPB 0x02 /* rx physical, broadcast */
-#define RXALL 0x01 /* rx all packet */
-#define RXOFF 0x00 /* rx disable */
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1d342d37915..110d26f4c60 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1156,9 +1156,10 @@ set_multicast (struct net_device *dev)
static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, "dl2k");
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pdev));
+
+ strlcpy(info->driver, "dl2k", sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 28fc11b2f1e..50d9c631593 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -530,7 +530,6 @@ static int sundance_probe1(struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((__le16 *)dev->dev_addr)[i] =
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
np = netdev_priv(dev);
np->base = ioaddr;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 2c177b329c8..f3d60eb13c3 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -281,11 +281,11 @@ static int dnet_mii_probe(struct net_device *dev)
/* attach the mac to the phy */
if (bp->capabilities & DNET_HAS_RMII) {
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &dnet_handle_link_change, 0,
+ &dnet_handle_link_change,
PHY_INTERFACE_MODE_RMII);
} else {
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &dnet_handle_link_change, 0,
+ &dnet_handle_link_change,
PHY_INTERFACE_MODE_MII);
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index f1b3df167ff..28ceb841418 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.4.161.0u"
+#define DRV_VER "4.6.62.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8a250c38fb8..071aea79d21 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -93,13 +93,16 @@ static void be_mcc_notify(struct be_adapter *adapter)
* little endian) */
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
+ u32 flags;
+
if (compl->flags != 0) {
- compl->flags = le32_to_cpu(compl->flags);
- BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
- return true;
- } else {
- return false;
+ flags = le32_to_cpu(compl->flags);
+ if (flags & CQE_FLAGS_VALID_MASK) {
+ compl->flags = flags;
+ return true;
+ }
}
+ return false;
}
/* Need to reset the entire word that houses the valid bit */
@@ -3138,6 +3141,39 @@ err:
return status;
}
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+ int vf_num)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_iface_list *req;
+ struct be_cmd_resp_get_iface_list *resp;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
+ wrb, NULL);
+ req->hdr.domain = vf_num + 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ resp = (struct be_cmd_resp_get_iface_list *)req;
+ vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
/* Uses sync mcc */
int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d6552e19ffe..96970860c91 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_FN_PRIVILEGES 170
#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
+#define OPCODE_COMMON_GET_IFACE_LIST 194
#define OPCODE_COMMON_ENABLE_DISABLE_VF 196
#define OPCODE_ETH_RSS_CONFIG 1
@@ -1795,6 +1796,23 @@ static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
return flags & adapter->cmd_privileges ? true : false;
}
+/************** Get IFACE LIST *******************/
+struct be_if_desc {
+ u32 if_id;
+ u32 cap_flags;
+ u32 en_flags;
+};
+
+struct be_cmd_req_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_get_iface_list {
+ struct be_cmd_req_hdr hdr;
+ u32 if_cnt;
+ struct be_if_desc if_desc;
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -1917,4 +1935,6 @@ extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain);
+extern int be_cmd_get_if_id(struct be_adapter *adapter,
+ struct be_vf_cfg *vf_cfg, int vf_num);
extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 00454a10f88..76b302f30c8 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -183,12 +183,12 @@ static void be_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
- if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
- strcat(drvinfo->fw_version, " [");
- strcat(drvinfo->fw_version, fw_on_flash);
- strcat(drvinfo->fw_version, "]");
- }
+ if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN))
+ strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%s [%s]", adapter->fw_ver, fw_on_flash);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 4d6f3c54427..3860888ac71 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2597,7 +2597,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
* These addresses are programmed in the ASIC by the PF and the VF driver
* queries for the MAC address during its probe.
*/
-static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
+static int be_vf_eth_addr_config(struct be_adapter *adapter)
{
u32 vf;
int status = 0;
@@ -2626,13 +2626,34 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
return status;
}
+static int be_vfs_mac_query(struct be_adapter *adapter)
+{
+ int status, vf;
+ u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
+ bool active;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ be_cmd_get_mac_from_list(adapter, mac, &active,
+ &vf_cfg->pmac_id, 0);
+
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ vf_cfg->if_handle, 0);
+ if (status)
+ return status;
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
+ }
+ return 0;
+}
+
static void be_vf_clear(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
u32 vf;
if (be_find_vfs(adapter, ASSIGNED)) {
- dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
+ dev_warn(&adapter->pdev->dev,
+ "VFs are assigned to VMs: not disabling VFs\n");
goto done;
}
@@ -2681,21 +2702,29 @@ static int be_clear(struct be_adapter *adapter)
return 0;
}
-static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
- u32 *cap_flags, u8 domain)
+static int be_vfs_if_create(struct be_adapter *adapter)
{
- bool profile_present = false;
+ struct be_vf_cfg *vf_cfg;
+ u32 cap_flags, en_flags, vf;
int status;
- if (lancer_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, cap_flags, domain);
- if (!status)
- profile_present = true;
- }
+ cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
- if (!profile_present)
- *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST;
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (!BE3_chip(adapter))
+ be_cmd_get_profile_config(adapter, &cap_flags, vf + 1);
+
+ /* If a FW profile exists, then cap_flags are updated */
+ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
+ if (status)
+ goto err;
+ }
+err:
+ return status;
}
static int be_vf_setup_init(struct be_adapter *adapter)
@@ -2718,65 +2747,70 @@ static int be_vf_setup_init(struct be_adapter *adapter)
static int be_vf_setup(struct be_adapter *adapter)
{
struct be_vf_cfg *vf_cfg;
- struct device *dev = &adapter->pdev->dev;
- u32 cap_flags, en_flags, vf;
u16 def_vlan, lnk_speed;
- int status, enabled_vfs;
-
- enabled_vfs = be_find_vfs(adapter, ENABLED);
- if (enabled_vfs) {
- dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
- dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
- return 0;
- }
-
- if (num_vfs > adapter->dev_num_vfs) {
- dev_warn(dev, "Device supports %d VFs and not %d\n",
- adapter->dev_num_vfs, num_vfs);
- num_vfs = adapter->dev_num_vfs;
- }
+ int status, old_vfs, vf;
+ struct device *dev = &adapter->pdev->dev;
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- if (!status) {
- adapter->num_vfs = num_vfs;
+ old_vfs = be_find_vfs(adapter, ENABLED);
+ if (old_vfs) {
+ dev_info(dev, "%d VFs are already enabled\n", old_vfs);
+ if (old_vfs != num_vfs)
+ dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+ adapter->num_vfs = old_vfs;
} else {
- /* Platform doesn't support SRIOV though device supports it */
- dev_warn(dev, "SRIOV enable failed\n");
- return 0;
+ if (num_vfs > adapter->dev_num_vfs)
+ dev_info(dev, "Device supports %d VFs and not %d\n",
+ adapter->dev_num_vfs, num_vfs);
+ adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
+
+ status = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (status) {
+ dev_err(dev, "SRIOV enable failed\n");
+ adapter->num_vfs = 0;
+ return 0;
+ }
}
status = be_vf_setup_init(adapter);
if (status)
goto err;
- for_all_vfs(adapter, vf_cfg, vf) {
- be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
-
- en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST);
-
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ if (old_vfs) {
+ for_all_vfs(adapter, vf_cfg, vf) {
+ status = be_cmd_get_if_id(adapter, vf_cfg, vf);
+ if (status)
+ goto err;
+ }
+ } else {
+ status = be_vfs_if_create(adapter);
if (status)
goto err;
}
- if (!enabled_vfs) {
+ if (old_vfs) {
+ status = be_vfs_mac_query(adapter);
+ if (status)
+ goto err;
+ } else {
status = be_vf_eth_addr_config(adapter);
if (status)
goto err;
}
for_all_vfs(adapter, vf_cfg, vf) {
- lnk_speed = 1000;
- status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
- if (status)
- goto err;
- vf_cfg->tx_rate = lnk_speed * 10;
+ /* BE3 FW, by default, caps VF TX-rate to 100mbps.
+ * Allow full available bandwidth
+ */
+ if (BE3_chip(adapter) && !old_vfs)
+ be_cmd_set_qos(adapter, 1000, vf+1);
+
+ status = be_cmd_link_status_query(adapter, &lnk_speed,
+ NULL, vf + 1);
+ if (!status)
+ vf_cfg->tx_rate = lnk_speed;
status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle);
+ vf + 1, vf_cfg->if_handle);
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
@@ -2785,6 +2819,8 @@ static int be_vf_setup(struct be_adapter *adapter)
}
return 0;
err:
+ dev_err(dev, "VF setup failed\n");
+ be_vf_clear(adapter);
return status;
}
@@ -2838,12 +2874,12 @@ static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
static void be_get_resources(struct be_adapter *adapter)
{
- int status;
+ u16 dev_num_vfs;
+ int pos, status;
bool profile_present = false;
- if (lancer_chip(adapter)) {
+ if (!BEx_chip(adapter)) {
status = be_cmd_get_func_config(adapter);
-
if (!status)
profile_present = true;
}
@@ -2899,13 +2935,21 @@ static void be_get_resources(struct be_adapter *adapter)
if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
}
+
+ pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
+ &dev_num_vfs);
+ if (BE3_chip(adapter))
+ dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
+ adapter->dev_num_vfs = dev_num_vfs;
+ }
}
/* Routine to query per function resource limits */
static int be_get_config(struct be_adapter *adapter)
{
- int pos, status;
- u16 dev_num_vfs;
+ int status;
status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
&adapter->function_mode,
@@ -2923,14 +2967,6 @@ static int be_get_config(struct be_adapter *adapter)
goto err;
}
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
- &dev_num_vfs);
- if (!lancer_chip(adapter))
- dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
- adapter->dev_num_vfs = dev_num_vfs;
- }
err:
return status;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8db1c06008d..5722bc61fa5 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -206,7 +206,7 @@ struct ethoc {
unsigned int num_rx;
unsigned int cur_rx;
- void** vma;
+ void **vma;
struct net_device *netdev;
struct napi_struct napi;
@@ -292,7 +292,7 @@ static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
{
struct ethoc_bd bd;
int i;
- void* vma;
+ void *vma;
dev->cur_tx = 0;
dev->dty_tx = 0;
@@ -447,8 +447,8 @@ static int ethoc_rx(struct net_device *dev, int limit)
netif_receive_skb(skb);
} else {
if (net_ratelimit())
- dev_warn(&dev->dev, "low on memory - "
- "packet dropped\n");
+ dev_warn(&dev->dev,
+ "low on memory - packet dropped\n");
dev->stats.rx_dropped++;
break;
@@ -555,9 +555,8 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
pending = ethoc_read(priv, INT_SOURCE);
pending &= mask;
- if (unlikely(pending == 0)) {
+ if (unlikely(pending == 0))
return IRQ_NONE;
- }
ethoc_ack_irq(priv, pending);
@@ -620,7 +619,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
- for (i=0; i < 5; i++) {
+ for (i = 0; i < 5; i++) {
u32 status = ethoc_read(priv, MIISTATUS);
if (!(status & MIISTATUS_BUSY)) {
u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -628,7 +627,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
ethoc_write(priv, MIICOMMAND, 0);
return data;
}
- usleep_range(100,200);
+ usleep_range(100, 200);
}
return -EBUSY;
@@ -643,14 +642,14 @@ static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
ethoc_write(priv, MIITX_DATA, val);
ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
- for (i=0; i < 5; i++) {
+ for (i = 0; i < 5; i++) {
u32 stat = ethoc_read(priv, MIISTATUS);
if (!(stat & MIISTATUS_BUSY)) {
/* reset MII command register */
ethoc_write(priv, MIICOMMAND, 0);
return 0;
}
- usleep_range(100,200);
+ usleep_range(100, 200);
}
return -EBUSY;
@@ -671,19 +670,18 @@ static int ethoc_mdio_probe(struct net_device *dev)
struct phy_device *phy;
int err;
- if (priv->phy_id != -1) {
+ if (priv->phy_id != -1)
phy = priv->mdio->phy_map[priv->phy_id];
- } else {
+ else
phy = phy_find_first(priv->mdio);
- }
if (!phy) {
dev_err(&dev->dev, "no PHY found\n");
return -ENXIO;
}
- err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
- PHY_INTERFACE_MODE_GMII);
+ err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
+ PHY_INTERFACE_MODE_GMII);
if (err) {
dev_err(&dev->dev, "could not attach to PHY\n");
return err;
@@ -771,21 +769,24 @@ static int ethoc_config(struct net_device *dev, struct ifmap *map)
return -ENOSYS;
}
-static int ethoc_set_mac_address(struct net_device *dev, void *addr)
+static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
- u8 *mac = (u8 *)addr;
-
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
+ unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
ethoc_write(priv, MAC_ADDR1, (mac[0] << 8) | (mac[1] << 0));
+}
- memcpy(dev->dev_addr, mac, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+static int ethoc_set_mac_address(struct net_device *dev, void *p)
+{
+ const struct sockaddr *addr = p;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ ethoc_do_set_mac_address(dev);
return 0;
}
@@ -1022,7 +1023,7 @@ static int ethoc_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
priv->num_tx, priv->num_rx);
- priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
+ priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
goto error;
@@ -1038,7 +1039,7 @@ static int ethoc_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
{
- const uint8_t* mac;
+ const uint8_t *mac;
mac = of_get_property(pdev->dev.of_node,
"local-mac-address",
@@ -1050,25 +1051,23 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Check that the given MAC address is valid. If it isn't, read the
- * current MAC from the controller. */
+ * current MAC from the controller.
+ */
if (!is_valid_ether_addr(netdev->dev_addr))
ethoc_get_mac_address(netdev, netdev->dev_addr);
/* Check the MAC again for validity, if it still isn't choose and
- * program a random one. */
+ * program a random one.
+ */
if (!is_valid_ether_addr(netdev->dev_addr)) {
eth_random_addr(netdev->dev_addr);
random_mac = true;
}
- ret = ethoc_set_mac_address(netdev, netdev->dev_addr);
- if (ret) {
- dev_err(&netdev->dev, "failed to set MAC address\n");
- goto error;
- }
+ ethoc_do_set_mac_address(netdev);
if (random_mac)
- netdev->addr_assign_type |= NET_ADDR_RANDOM;
+ netdev->addr_assign_type = NET_ADDR_RANDOM;
/* register MII bus */
priv->mdio = mdiobus_alloc();
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 74d749e29aa..7c361d1db94 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -858,8 +858,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
}
phydev = phy_connect(netdev, dev_name(&phydev->dev),
- &ftgmac100_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ &ftgmac100_adjust_link, PHY_INTERFACE_MODE_GMII);
if (IS_ERR(phydev)) {
netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
@@ -955,9 +954,9 @@ static int ftgmac100_mdiobus_reset(struct mii_bus *bus)
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(&netdev->dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftgmac100_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b901a01e3fa..b5ea8fbd8a7 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -820,9 +820,9 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(&netdev->dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index ec490d741fc..6048dc8604e 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -26,6 +26,7 @@ config FEC
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
+ select PTP_1588_CLOCK
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -92,12 +93,4 @@ config GIANFAR
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
and MPC86xx family of chips, and the FEC on the 8540.
-config FEC_PTP
- bool "PTP Hardware Clock (PHC)"
- depends on FEC && ARCH_MXC && !SOC_IMX25 && !SOC_IMX27 && !SOC_IMX35 && !SOC_IMX5
- select PTP_1588_CLOCK
- --help---
- Say Y here if you want to use PTP Hardware Clock (PHC) in the
- driver. Only the basic clock operations have been implemented.
-
endif # NET_VENDOR_FREESCALE
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index d4d19b3d00a..b7d58fe6f53 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -2,8 +2,7 @@
# Makefile for the Freescale network device drivers.
#
-obj-$(CONFIG_FEC) += fec.o
-obj-$(CONFIG_FEC_PTP) += fec_ptp.o
+obj-$(CONFIG_FEC) += fec.o fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 0704bcab178..29d82cf1528 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -67,6 +67,15 @@
#endif
#define DRIVER_NAME "fec"
+#define FEC_NAPI_WEIGHT 64
+
+/* Pause frame feild and FIFO threshold */
+#define FEC_ENET_FCE (1 << 5)
+#define FEC_ENET_RSEM_V 0x84
+#define FEC_ENET_RSFL_V 16
+#define FEC_ENET_RAEM_V 0x8
+#define FEC_ENET_RAFL_V 0x8
+#define FEC_ENET_OPD_V 0xFFF0
/* Controller is ENET-MAC */
#define FEC_QUIRK_ENET_MAC (1 << 0)
@@ -76,6 +85,8 @@
#define FEC_QUIRK_USE_GASKET (1 << 2)
/* Controller has GBIT support */
#define FEC_QUIRK_HAS_GBIT (1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
static struct platform_device_id fec_devtype[] = {
{
@@ -93,7 +104,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX,
}, {
/* sentinel */
}
@@ -140,7 +152,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
+#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
#error "FEC: descriptor ring size constants too large"
#endif
@@ -157,6 +169,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* The FEC stores dest/src/type, data, and checksum for receive packets.
*/
@@ -190,8 +203,29 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+#define FEC_PAUSE_FLAG_AUTONEG 0x1
+#define FEC_PAUSE_FLAG_ENABLE 0x2
+
static int mii_cnt;
+static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex + 1);
+ else
+ return bdp + 1;
+}
+
+static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
+{
+ struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
+ if (is_ex)
+ return (struct bufdesc *)(ex - 1);
+ else
+ return bdp - 1;
+}
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -248,7 +282,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
*/
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
unsigned int index;
- index = bdp - fep->tx_bd_base;
+ if (fep->bufdesc_ex)
+ index = (struct bufdesc_ex *)bdp -
+ (struct bufdesc_ex *)fep->tx_bd_base;
+ else
+ index = bdp - fep->tx_bd_base;
memcpy(fep->tx_bounce[index], skb->data, skb->len);
bufaddr = fep->tx_bounce[index];
}
@@ -280,17 +318,19 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_bdu = 0;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ if (fep->bufdesc_ex) {
+
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_bdu = 0;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
fep->hwts_tx_en)) {
- bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
+ } else {
- bdp->cbd_esc = BD_ENET_TX_INT;
+ ebdp->cbd_esc = BD_ENET_TX_INT;
+ }
}
-#endif
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
@@ -298,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
if (bdp == fep->dirty_tx) {
fep->tx_full = 1;
@@ -359,8 +399,12 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
- fep->hwp + FEC_X_DES_START);
+ if (fep->bufdesc_ex)
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ else
+ writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
+ * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
fep->cur_rx = fep->rx_bd_base;
@@ -439,6 +483,25 @@ fec_restart(struct net_device *ndev, int duplex)
}
#endif
}
+
+ /* enable pause frame*/
+ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
+ ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
+ fep->phy_dev && fep->phy_dev->pause)) {
+ rcntl |= FEC_ENET_FCE;
+
+ /* set FIFO thresh hold parameter to reduce overrun */
+ writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
+ writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
+ writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
+ writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
+
+ /* OPD */
+ writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
+ } else {
+ rcntl &= ~FEC_ENET_FCE;
+ }
+
writel(rcntl, fep->hwp + FEC_R_CNTRL);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
@@ -448,17 +511,16 @@ fec_restart(struct net_device *ndev, int duplex)
writel(1 << 8, fep->hwp + FEC_X_WMRK);
}
-#ifdef CONFIG_FEC_PTP
- ecntl |= (1 << 4);
-#endif
+ if (fep->bufdesc_ex)
+ ecntl |= (1 << 4);
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-#ifdef CONFIG_FEC_PTP
- fec_ptp_start_cyclecounter(ndev);
-#endif
+ if (fep->bufdesc_ex)
+ fec_ptp_start_cyclecounter(ndev);
+
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
@@ -544,19 +606,20 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_packets++;
}
-#ifdef CONFIG_FEC_PTP
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
unsigned long flags;
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps.hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
skb_tstamp_tx(skb, &shhwtstamps);
}
-#endif
+
if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
@@ -575,7 +638,7 @@ fec_enet_tx(struct net_device *ndev)
if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Since we have freed up a buffer, the ring is no longer full
*/
@@ -595,8 +658,8 @@ fec_enet_tx(struct net_device *ndev)
* not been given to the system, we just set the empty indicator,
* effectively tossing the packet.
*/
-static void
-fec_enet_rx(struct net_device *ndev)
+static int
+fec_enet_rx(struct net_device *ndev, int budget)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
@@ -606,13 +669,12 @@ fec_enet_rx(struct net_device *ndev)
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
+ int pkt_received = 0;
#ifdef CONFIG_M532x
flush_cache_all();
#endif
- spin_lock(&fep->hw_lock);
-
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
@@ -620,6 +682,10 @@ fec_enet_rx(struct net_device *ndev)
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+ if (pkt_received >= budget)
+ break;
+ pkt_received++;
+
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
@@ -683,23 +749,25 @@ fec_enet_rx(struct net_device *ndev)
skb_put(skb, pkt_len - 4); /* Make room */
skb_copy_to_linear_data(skb, data, pkt_len - 4);
skb->protocol = eth_type_trans(skb, ndev);
-#ifdef CONFIG_FEC_PTP
+
/* Get receive timestamp from the skb */
- if (fep->hwts_rx_en) {
+ if (fep->hwts_rx_en && fep->bufdesc_ex) {
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
unsigned long flags;
+ struct bufdesc_ex *ebdp =
+ (struct bufdesc_ex *)bdp;
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
spin_lock_irqsave(&fep->tmreg_lock, flags);
shhwtstamps->hwtstamp = ns_to_ktime(
- timecounter_cyc2time(&fep->tc, bdp->ts));
+ timecounter_cyc2time(&fep->tc, ebdp->ts));
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
-#endif
+
if (!skb_defer_rx_timestamp(skb))
- netif_rx(skb);
+ napi_gro_receive(&fep->napi, skb);
}
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@@ -712,17 +780,19 @@ rx_processing_done:
status |= BD_ENET_RX_EMPTY;
bdp->cbd_sc = status;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
- bdp->cbd_prot = 0;
- bdp->cbd_bdu = 0;
-#endif
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_prot = 0;
+ ebdp->cbd_bdu = 0;
+ }
/* Update BD pointer to next entry */
if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
else
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
@@ -731,7 +801,7 @@ rx_processing_done:
}
fep->cur_rx = bdp;
- spin_unlock(&fep->hw_lock);
+ return pkt_received;
}
static irqreturn_t
@@ -748,7 +818,13 @@ fec_enet_interrupt(int irq, void *dev_id)
if (int_events & FEC_ENET_RXF) {
ret = IRQ_HANDLED;
- fec_enet_rx(ndev);
+
+ /* Disable the RX interrupt */
+ if (napi_schedule_prep(&fep->napi)) {
+ writel(FEC_RX_DISABLED_IMASK,
+ fep->hwp + FEC_IMASK);
+ __napi_schedule(&fep->napi);
+ }
}
/* Transmit OK, or non-fatal error. Update the buffer
@@ -769,10 +845,21 @@ fec_enet_interrupt(int irq, void *dev_id)
return ret;
}
+static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
+{
+ struct net_device *ndev = napi->dev;
+ int pkts = fec_enet_rx(ndev, budget);
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ if (pkts < budget) {
+ napi_complete(napi);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+ }
+ return pkts;
+}
/* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *ndev)
+static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
@@ -973,7 +1060,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
- phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
+ phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
fep->phy_interface);
if (IS_ERR(phy_dev)) {
printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
@@ -981,8 +1068,10 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
/* mask with MAC supported features */
- if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT)
+ if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
phy_dev->supported &= PHY_GBIT_FEATURES;
+ phy_dev->supported |= SUPPORTED_Pause;
+ }
else
phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1133,17 +1222,95 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strcpy(info->driver, fep->pdev->dev.driver->name);
- strcpy(info->version, "Revision: 1.0");
- strcpy(info->bus_info, dev_name(&ndev->dev));
+ strlcpy(info->driver, fep->pdev->dev.driver->name,
+ sizeof(info->driver));
+ strlcpy(info->version, "Revision: 1.0", sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+}
+
+static int fec_enet_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (fep->bufdesc_ex) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ if (fep->ptp_clock)
+ info->phc_index = ptp_clock_index(fep->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+ return 0;
+ } else {
+ return ethtool_op_get_ts_info(ndev, info);
+ }
+}
+
+static void fec_enet_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
+ pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
+ pause->rx_pause = pause->tx_pause;
+}
+
+static int fec_enet_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ if (pause->tx_pause != pause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx");
+ return -EINVAL;
+ }
+
+ fep->pause_flag = 0;
+
+ /* tx pause must be same as rx pause */
+ fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
+ fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
+
+ if (pause->rx_pause || pause->autoneg) {
+ fep->phy_dev->supported |= ADVERTISED_Pause;
+ fep->phy_dev->advertising |= ADVERTISED_Pause;
+ } else {
+ fep->phy_dev->supported &= ~ADVERTISED_Pause;
+ fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+ }
+
+ if (pause->autoneg) {
+ if (netif_running(ndev))
+ fec_stop(ndev);
+ phy_start_aneg(fep->phy_dev);
+ }
+ if (netif_running(ndev))
+ fec_restart(ndev, 0);
+
+ return 0;
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
+ .get_pauseparam = fec_enet_get_pauseparam,
+ .set_pauseparam = fec_enet_set_pauseparam,
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = fec_enet_get_ts_info,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -1157,10 +1324,9 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
-#ifdef CONFIG_FEC_PTP
- if (cmd == SIOCSHWTSTAMP)
+ if (cmd == SIOCSHWTSTAMP && fep->bufdesc_ex)
return fec_ptp_ioctl(ndev, rq, cmd);
-#endif
+
return phy_mii_ioctl(phydev, rq, cmd);
}
@@ -1180,7 +1346,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
bdp = fep->tx_bd_base;
@@ -1207,14 +1373,17 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
bdp->cbd_sc = BD_ENET_RX_EMPTY;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base;
@@ -1224,14 +1393,16 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
-#ifdef CONFIG_FEC_PTP
- bdp->cbd_esc = BD_ENET_RX_INT;
-#endif
- bdp++;
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ ebdp->cbd_esc = BD_ENET_RX_INT;
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap. */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -1243,6 +1414,8 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ napi_enable(&fep->napi);
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1444,24 +1617,31 @@ static int fec_enet_init(struct net_device *ndev)
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
- fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+ if (fep->bufdesc_ex)
+ fep->tx_bd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
+ else
+ fep->tx_bd_base = cbd_base + RX_RING_SIZE;
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->netdev_ops = &fec_netdev_ops;
ndev->ethtool_ops = &fec_enet_ethtool_ops;
+ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
+
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
/* ...and the same for transmit */
@@ -1471,11 +1651,11 @@ static int fec_enet_init(struct net_device *ndev)
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
- bdp++;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
- bdp--;
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fec_restart(ndev, 0);
@@ -1509,22 +1689,25 @@ static void fec_reset_phy(struct platform_device *pdev)
msec = 1;
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+ if (!gpio_is_valid(phy_reset))
+ return;
+
err = devm_gpio_request_one(&pdev->dev, phy_reset,
GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
- pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
+ dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
msleep(msec);
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
-static inline int fec_get_phy_mode_dt(struct platform_device *pdev)
+static int fec_get_phy_mode_dt(struct platform_device *pdev)
{
return -ENODEV;
}
-static inline void fec_reset_phy(struct platform_device *pdev)
+static void fec_reset_phy(struct platform_device *pdev)
{
/*
* In case of platform probe, the reset has been done
@@ -1570,10 +1753,17 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
+ /* default enable pause frame auto negotiation */
+ if (pdev->id_entry &&
+ (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
+ fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
fep->dev_id = dev_id++;
+ fep->bufdesc_ex = 0;
+
if (!fep->hwp) {
ret = -ENOMEM;
goto failed_ioremap;
@@ -1628,19 +1818,19 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
-#ifdef CONFIG_FEC_PTP
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+ fep->bufdesc_ex =
+ pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
if (IS_ERR(fep->clk_ptp)) {
ret = PTR_ERR(fep->clk_ptp);
- goto failed_clk;
+ fep->bufdesc_ex = 0;
}
-#endif
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_prepare_enable(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_prepare_enable(fep->clk_ptp);
+
reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(reg_phy)) {
ret = regulator_enable(reg_phy);
@@ -1653,6 +1843,9 @@ fec_probe(struct platform_device *pdev)
fec_reset_phy(pdev);
+ if (fep->bufdesc_ex)
+ fec_ptp_init(ndev, pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1668,10 +1861,6 @@ fec_probe(struct platform_device *pdev)
if (ret)
goto failed_register;
-#ifdef CONFIG_FEC_PTP
- fec_ptp_init(ndev, pdev);
-#endif
-
return 0;
failed_register:
@@ -1681,9 +1870,8 @@ failed_init:
failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
-#ifdef CONFIG_FEC_PTP
- clk_disable_unprepare(fep->clk_ptp);
-#endif
+ if (!IS_ERR(fep->clk_ptp))
+ clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1716,12 +1904,10 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
-#ifdef CONFIG_FEC_PTP
del_timer_sync(&fep->time_keep);
clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
-#endif
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
iounmap(fep->hwp);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index c5a3bc1475c..01579b8e37c 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -13,11 +13,9 @@
#define FEC_H
/****************************************************************************/
-#ifdef CONFIG_FEC_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
@@ -50,6 +48,10 @@
#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
+#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
+#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
+#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
+#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -94,14 +96,17 @@ struct bufdesc {
unsigned short cbd_datlen; /* Data length */
unsigned short cbd_sc; /* Control and status info */
unsigned long cbd_bufaddr; /* Buffer address */
-#ifdef CONFIG_FEC_PTP
+};
+
+struct bufdesc_ex {
+ struct bufdesc desc;
unsigned long cbd_esc;
unsigned long cbd_prot;
unsigned long cbd_bdu;
unsigned long ts;
unsigned short res0[4];
-#endif
};
+
#else
struct bufdesc {
unsigned short cbd_sc; /* Control and status info */
@@ -203,9 +208,7 @@ struct fec_enet_private {
struct clk *clk_ipg;
struct clk *clk_ahb;
-#ifdef CONFIG_FEC_PTP
struct clk *clk_ptp;
-#endif
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -243,8 +246,11 @@ struct fec_enet_private {
int full_duplex;
struct completion mdio_done;
int irq[FEC_IRQ_NUM];
+ int bufdesc_ex;
+ int pause_flag;
+
+ struct napi_struct napi;
-#ifdef CONFIG_FEC_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
@@ -257,15 +263,12 @@ struct fec_enet_private {
int hwts_rx_en;
int hwts_tx_en;
struct timer_list time_keep;
-#endif
};
-#ifdef CONFIG_FEC_PTP
void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
-#endif
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 817d081d2cd..7f91b0c5c26 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_mdio.h>
+#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <linux/netdevice.h>
@@ -76,10 +77,6 @@ static void mpc52xx_fec_stop(struct net_device *dev);
static void mpc52xx_fec_start(struct net_device *dev);
static void mpc52xx_fec_reset(struct net_device *dev);
-static u8 mpc52xx_fec_mac_addr[6];
-module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0);
-MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe");
-
#define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
static int debug = -1; /* the above default */
@@ -110,15 +107,6 @@ static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
-static void mpc52xx_fec_get_paddr(struct net_device *dev, u8 *mac)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct mpc52xx_fec __iomem *fec = priv->fec;
-
- *(u32 *)(&mac[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&mac[4]) = in_be32(&fec->paddr2) >> 16;
-}
-
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
@@ -853,6 +841,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
struct resource mem;
const u32 *prop;
int prop_size;
+ struct device_node *np = op->dev.of_node;
+ const char *mac_addr;
phys_addr_t rx_fifo;
phys_addr_t tx_fifo;
@@ -866,7 +856,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->ndev = ndev;
/* Reserve FEC control zone */
- rv = of_address_to_resource(op->dev.of_node, 0, &mem);
+ rv = of_address_to_resource(np, 0, &mem);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error while parsing device node resource\n" );
@@ -919,7 +909,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Get the IRQ we need one by one */
/* Control */
- ndev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+ ndev->irq = irq_of_parse_and_map(np, 0);
/* RX */
priv->r_irq = bcom_get_task_irq(priv->rx_dmatsk);
@@ -927,11 +917,33 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* TX */
priv->t_irq = bcom_get_task_irq(priv->tx_dmatsk);
- /* MAC address init */
- if (!is_zero_ether_addr(mpc52xx_fec_mac_addr))
- memcpy(ndev->dev_addr, mpc52xx_fec_mac_addr, 6);
- else
- mpc52xx_fec_get_paddr(ndev, ndev->dev_addr);
+ /*
+ * MAC address init:
+ *
+ * First try to read MAC address from DT
+ */
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr) {
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ } else {
+ struct mpc52xx_fec __iomem *fec = priv->fec;
+
+ /*
+ * If the MAC addresse is not provided via DT then read
+ * it back from the controller regs
+ */
+ *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ }
+
+ /*
+ * Check if the MAC address is valid, if not get a random one
+ */
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(&ndev->dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
priv->msg_enable = netif_msg_init(debug, MPC52xx_MESSAGES_DEFAULT);
@@ -942,20 +954,20 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(op->dev.of_node) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
- prop = of_get_property(op->dev.of_node, "current-speed", &prop_size);
+ prop = of_get_property(np, "current-speed", &prop_size);
if (prop && (prop_size >= sizeof(u32) * 2)) {
priv->speed = prop[0];
priv->duplex = prop[1] ? DUPLEX_FULL : DUPLEX_HALF;
}
/* If there is a phy handle, then get the PHY node */
- priv->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(op->dev.of_node, "fsl,7-wire-mode", NULL)) {
+ if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
@@ -970,6 +982,8 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* We're done ! */
dev_set_drvdata(&op->dev, ndev);
+ printk(KERN_INFO "%s: %s MAC %pM\n",
+ ndev->name, op->dev.of_node->full_name, ndev->dev_addr);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index c40526c78c2..1f17ca0f220 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -104,7 +104,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
unsigned long flags;
int inc;
- inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+ inc = 1000000000 / fep->cycle_speed;
/* grab the ptp lock */
spin_lock_irqsave(&fep->tmreg_lock, flags);
@@ -363,6 +363,8 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
fep->ptp_caps.settime = fec_ptp_settime;
fep->ptp_caps.enable = fec_ptp_enable;
+ fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+
spin_lock_init(&fep->tmreg_lock);
fec_ptp_start_cyclecounter(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e9879c5af7b..46df28893c1 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -888,8 +888,8 @@ static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int fs_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index bffb2edd685..4b5e8a69248 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -133,8 +133,8 @@ static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi);
void gfar_halt(struct net_device *dev);
static void gfar_halt_nodisable(struct net_device *dev);
void gfar_start(struct net_device *dev);
@@ -231,7 +231,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
dma_addr_t addr;
int i, j, k;
struct gfar_private *priv = netdev_priv(ndev);
- struct device *dev = &priv->ofdev->dev;
+ struct device *dev = priv->dev;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -277,14 +277,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
- tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size,
- GFP_KERNEL);
- if (!tx_queue->tx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate tx_skbuff\n");
+ tx_queue->tx_skbuff =
+ kmalloc_array(tx_queue->tx_ring_size,
+ sizeof(*tx_queue->tx_skbuff),
+ GFP_KERNEL);
+ if (!tx_queue->tx_skbuff)
goto cleanup;
- }
for (k = 0; k < tx_queue->tx_ring_size; k++)
tx_queue->tx_skbuff[k] = NULL;
@@ -292,15 +290,12 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size,
- GFP_KERNEL);
-
- if (!rx_queue->rx_skbuff) {
- netif_err(priv, ifup, ndev,
- "Could not allocate rx_skbuff\n");
+ rx_queue->rx_skbuff =
+ kmalloc_array(rx_queue->rx_ring_size,
+ sizeof(*rx_queue->rx_skbuff),
+ GFP_KERNEL);
+ if (!rx_queue->rx_skbuff)
goto cleanup;
- }
for (j = 0; j < rx_queue->rx_ring_size; j++)
rx_queue->rx_skbuff[j] = NULL;
@@ -349,14 +344,23 @@ static void gfar_init_mac(struct net_device *ndev)
/* Configure the coalescing support */
gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ /* set this when rx hw offload (TOE) functions are being used */
+ priv->uses_rxfcb = 0;
+
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */
gfar_write(&regs->rir0, DEFAULT_RIR0);
}
- if (ndev->features & NETIF_F_RXCSUM)
+ /* Restore PROMISC mode */
+ if (ndev->flags & IFF_PROMISC)
+ rctrl |= RCTRL_PROM;
+
+ if (ndev->features & NETIF_F_RXCSUM) {
rctrl |= RCTRL_CHECKSUMMING;
+ priv->uses_rxfcb = 1;
+ }
if (priv->extended_hash) {
rctrl |= RCTRL_EXTHASH;
@@ -378,11 +382,15 @@ static void gfar_init_mac(struct net_device *ndev)
}
/* Enable HW time stamping if requested from user space */
- if (priv->hwts_rx_en)
+ if (priv->hwts_rx_en) {
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
+ priv->uses_rxfcb = 1;
+ }
- if (ndev->features & NETIF_F_HW_VLAN_RX)
+ if (ndev->features & NETIF_F_HW_VLAN_RX) {
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 1;
+ }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
@@ -501,20 +509,6 @@ void unlock_tx_qs(struct gfar_private *priv)
spin_unlock(&priv->tx_queue[i]->txlock);
}
-static bool gfar_is_vlan_on(struct gfar_private *priv)
-{
- return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
- (priv->ndev->features & NETIF_F_HW_VLAN_TX);
-}
-
-/* Returns 1 if incoming frames use an FCB */
-static inline int gfar_uses_fcb(struct gfar_private *priv)
-{
- return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
-}
-
static void free_tx_pointers(struct gfar_private *priv)
{
int i;
@@ -540,6 +534,19 @@ static void unmap_group_regs(struct gfar_private *priv)
iounmap(priv->gfargrp[i].regs);
}
+static void free_gfar_dev(struct gfar_private *priv)
+{
+ int i, j;
+
+ for (i = 0; i < priv->num_grps; i++)
+ for (j = 0; j < GFAR_NUM_IRQS; j++) {
+ kfree(priv->gfargrp[i].irqinfo[j]);
+ priv->gfargrp[i].irqinfo[j] = NULL;
+ }
+
+ free_netdev(priv->ndev);
+}
+
static void disable_napi(struct gfar_private *priv)
{
int i;
@@ -559,40 +566,46 @@ static void enable_napi(struct gfar_private *priv)
static int gfar_parse_group(struct device_node *np,
struct gfar_private *priv, const char *model)
{
+ struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
u32 *queue_mask;
+ int i;
+
+ for (i = 0; i < GFAR_NUM_IRQS; i++) {
+ grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
+ GFP_KERNEL);
+ if (!grp->irqinfo[i])
+ return -ENOMEM;
+ }
- priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
- if (!priv->gfargrp[priv->num_grps].regs)
+ grp->regs = of_iomap(np, 0);
+ if (!grp->regs)
return -ENOMEM;
- priv->gfargrp[priv->num_grps].interruptTransmit =
- irq_of_parse_and_map(np, 0);
+ gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
/* If we aren't the FEC we have multiple interrupts */
if (model && strcasecmp(model, "FEC")) {
- priv->gfargrp[priv->num_grps].interruptReceive =
- irq_of_parse_and_map(np, 1);
- priv->gfargrp[priv->num_grps].interruptError =
- irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
- priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
- priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
+ gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
+ gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
+ if (gfar_irq(grp, TX)->irq == NO_IRQ ||
+ gfar_irq(grp, RX)->irq == NO_IRQ ||
+ gfar_irq(grp, ER)->irq == NO_IRQ)
return -EINVAL;
}
- priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
- priv->gfargrp[priv->num_grps].priv = priv;
- spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
+ grp->grp_id = priv->num_grps;
+ grp->priv = priv;
+ spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+ grp->rx_bit_map = queue_mask ?
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+ grp->tx_bit_map = queue_mask ?
*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
} else {
- priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
- priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
+ grp->rx_bit_map = 0xFF;
+ grp->tx_bit_map = 0xFF;
}
priv->num_grps++;
@@ -645,7 +658,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return -ENOMEM;
priv = netdev_priv(dev);
- priv->node = ofdev->dev.of_node;
priv->ndev = dev;
priv->num_tx_queues = num_tx_qs;
@@ -777,7 +789,7 @@ tx_alloc_failed:
free_tx_pointers(priv);
err_grp_init:
unmap_group_regs(priv);
- free_netdev(dev);
+ free_gfar_dev(priv);
return err;
}
@@ -983,7 +995,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv = netdev_priv(dev);
priv->ndev = dev;
priv->ofdev = ofdev;
- priv->node = ofdev->dev.of_node;
+ priv->dev = &ofdev->dev;
SET_NETDEV_DEV(dev, &ofdev->dev);
spin_lock_init(&priv->bflock);
@@ -1020,8 +1032,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Set the dev->base_addr to the gfar reg region */
dev->base_addr = (unsigned long) regs;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
/* Fill in the dev structure */
dev->watchdog_timeo = TX_TIMEOUT;
dev->mtu = 1500;
@@ -1182,15 +1192,16 @@ static int gfar_probe(struct platform_device *ofdev)
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[i];
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- sprintf(priv->gfargrp[i].int_name_tx, "%s%s%c%s",
+ sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_tx");
- sprintf(priv->gfargrp[i].int_name_rx, "%s%s%c%s",
+ sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_rx");
- sprintf(priv->gfargrp[i].int_name_er, "%s%s%c%s",
+ sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
dev->name, "_g", '0' + i, "_er");
} else
- strcpy(priv->gfargrp[i].int_name_tx, dev->name);
+ strcpy(gfar_irq(grp, TX)->name, dev->name);
}
/* Initialize the filer table */
@@ -1223,7 +1234,7 @@ register_fail:
of_node_put(priv->phy_node);
if (priv->tbi_node)
of_node_put(priv->tbi_node);
- free_netdev(dev);
+ free_gfar_dev(priv);
return err;
}
@@ -1240,7 +1251,7 @@ static int gfar_remove(struct platform_device *ofdev)
unregister_netdev(priv->ndev);
unmap_group_regs(priv);
- free_netdev(priv->ndev);
+ free_gfar_dev(priv);
return 0;
}
@@ -1648,9 +1659,9 @@ void gfar_halt(struct net_device *dev)
static void free_grp_irqs(struct gfar_priv_grp *grp)
{
- free_irq(grp->interruptError, grp);
- free_irq(grp->interruptTransmit, grp);
- free_irq(grp->interruptReceive, grp);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
}
void stop_gfar(struct net_device *dev)
@@ -1679,7 +1690,7 @@ void stop_gfar(struct net_device *dev)
free_grp_irqs(&priv->gfargrp[i]);
} else {
for (i = 0; i < priv->num_grps; i++)
- free_irq(priv->gfargrp[i].interruptTransmit,
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
&priv->gfargrp[i]);
}
@@ -1698,13 +1709,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
if (!tx_queue->tx_skbuff[i])
continue;
- dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_single(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
j++) {
txbdp++;
- dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
+ dma_unmap_page(priv->dev, txbdp->bufPtr,
txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
@@ -1725,8 +1736,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
- dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
+ dma_unmap_single(priv->dev, rxbdp->bufPtr,
+ priv->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
@@ -1765,7 +1776,7 @@ static void free_skb_resources(struct gfar_private *priv)
free_skb_rx_queue(rx_queue);
}
- dma_free_coherent(&priv->ofdev->dev,
+ dma_free_coherent(priv->dev,
sizeof(struct txbd8) * priv->total_tx_ring_size +
sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base,
@@ -1854,32 +1865,34 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- if ((err = request_irq(grp->interruptError, gfar_error,
- 0, grp->int_name_er, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
+ gfar_irq(grp, ER)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptError);
+ gfar_irq(grp, ER)->irq);
goto err_irq_fail;
}
-
- if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
- 0, grp->int_name_tx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptTransmit);
+ gfar_irq(grp, TX)->irq);
goto tx_irq_fail;
}
-
- if ((err = request_irq(grp->interruptReceive, gfar_receive,
- 0, grp->int_name_rx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
+ gfar_irq(grp, RX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptReceive);
+ gfar_irq(grp, RX)->irq);
goto rx_irq_fail;
}
} else {
- if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
- 0, grp->int_name_tx, grp)) < 0) {
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
+ gfar_irq(grp, TX)->name, grp);
+ if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
- grp->interruptTransmit);
+ gfar_irq(grp, TX)->irq);
goto err_irq_fail;
}
}
@@ -1887,9 +1900,9 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
return 0;
rx_irq_fail:
- free_irq(grp->interruptTransmit, grp);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
tx_irq_fail:
- free_irq(grp->interruptError, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
err_irq_fail:
return err;
@@ -2143,7 +2156,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (i == nr_frags - 1)
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
- bufaddr = skb_frag_dma_map(&priv->ofdev->dev,
+ bufaddr = skb_frag_dma_map(priv->dev,
&skb_shinfo(skb)->frags[i],
0,
length,
@@ -2195,7 +2208,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_TOE);
}
- txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
+ txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
/* If time stamping is requested one additional TxBD must be set up. The
@@ -2308,10 +2321,13 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
tempval = gfar_read(&regs->rctrl);
/* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER)
+ if (tempval & RCTRL_REQ_PARSER) {
tempval |= RCTRL_PRSDEP_INIT;
- else
+ priv->uses_rxfcb = 1;
+ } else {
tempval &= ~RCTRL_PRSDEP_INIT;
+ priv->uses_rxfcb = 0;
+ }
gfar_write(&regs->rctrl, tempval);
}
@@ -2344,6 +2360,7 @@ void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
tempval = gfar_read(&regs->rctrl);
tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
gfar_write(&regs->rctrl, tempval);
+ priv->uses_rxfcb = 1;
} else {
/* Disable VLAN tag extraction */
tempval = gfar_read(&regs->rctrl);
@@ -2367,15 +2384,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
- if (gfar_is_vlan_on(priv))
- frame_size += VLAN_HLEN;
-
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
netif_err(priv, drv, dev, "Invalid MTU setting\n");
return -EINVAL;
}
- if (gfar_uses_fcb(priv))
+ if (priv->uses_rxfcb)
frame_size += GMAC_FCB_LEN;
frame_size += priv->padding;
@@ -2508,7 +2522,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
} else
buflen = bdp->length;
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2527,7 +2541,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_page(priv->dev, bdp->bufPtr,
bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2593,7 +2607,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
struct gfar_private *priv = netdev_priv(dev);
dma_addr_t buf;
- buf = dma_map_single(&priv->ofdev->dev, skb->data,
+ buf = dma_map_single(priv->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
gfar_init_rxbdp(rx_queue, bdp, buf);
}
@@ -2627,7 +2641,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
- estats->rx_trunc++;
+ atomic64_inc(&estats->rx_trunc);
return;
}
@@ -2636,20 +2650,20 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
stats->rx_length_errors++;
if (status & RXBD_LARGE)
- estats->rx_large++;
+ atomic64_inc(&estats->rx_large);
else
- estats->rx_short++;
+ atomic64_inc(&estats->rx_short);
}
if (status & RXBD_NONOCTET) {
stats->rx_frame_errors++;
- estats->rx_nonoctet++;
+ atomic64_inc(&estats->rx_nonoctet);
}
if (status & RXBD_CRCERR) {
- estats->rx_crcerr++;
+ atomic64_inc(&estats->rx_crcerr);
stats->rx_crc_errors++;
}
if (status & RXBD_OVERRUN) {
- estats->rx_overrun++;
+ atomic64_inc(&estats->rx_overrun);
stats->rx_crc_errors++;
}
}
@@ -2674,8 +2688,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
- int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
+ int amount_pull, struct napi_struct *napi)
{
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
@@ -2722,10 +2736,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Send the packet up the stack */
ret = napi_gro_receive(napi, skb);
- if (GRO_DROP == ret)
- priv->extra_stats.kernel_dropped++;
-
- return 0;
+ if (unlikely(GRO_DROP == ret))
+ atomic64_inc(&priv->extra_stats.kernel_dropped);
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2746,7 +2758,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = rx_queue->cur_rx;
base = rx_queue->rx_bd_base;
- amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
+ amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
@@ -2758,7 +2770,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
- dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
+ dma_unmap_single(priv->dev, bdp->bufPtr,
priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
@@ -2791,7 +2803,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
rx_queue->stats.rx_dropped++;
- priv->extra_stats.rx_skbmissing++;
+ atomic64_inc(&priv->extra_stats.rx_skbmissing);
}
}
@@ -3224,7 +3236,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
netif_dbg(priv, tx_err, dev,
"TX FIFO underrun, packet dropped\n");
dev->stats.tx_dropped++;
- priv->extra_stats.tx_underrun++;
+ atomic64_inc(&priv->extra_stats.tx_underrun);
local_irq_save(flags);
lock_tx_qs(priv);
@@ -3239,7 +3251,7 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BSY) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_bsy++;
+ atomic64_inc(&priv->extra_stats.rx_bsy);
gfar_receive(irq, grp_id);
@@ -3248,19 +3260,19 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
if (events & IEVENT_BABR) {
dev->stats.rx_errors++;
- priv->extra_stats.rx_babr++;
+ atomic64_inc(&priv->extra_stats.rx_babr);
netif_dbg(priv, rx_err, dev, "babbling RX error\n");
}
if (events & IEVENT_EBERR) {
- priv->extra_stats.eberr++;
+ atomic64_inc(&priv->extra_stats.eberr);
netif_dbg(priv, rx_err, dev, "bus error\n");
}
if (events & IEVENT_RXC)
netif_dbg(priv, rx_status, dev, "control frame\n");
if (events & IEVENT_BABT) {
- priv->extra_stats.tx_babt++;
+ atomic64_inc(&priv->extra_stats.tx_babt);
netif_dbg(priv, tx_err, dev, "babbling TX error\n");
}
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 22eabc13ca9..63a28d294e2 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -627,36 +627,29 @@ struct rmon_mib
};
struct gfar_extra_stats {
- u64 kernel_dropped;
- u64 rx_large;
- u64 rx_short;
- u64 rx_nonoctet;
- u64 rx_crcerr;
- u64 rx_overrun;
- u64 rx_bsy;
- u64 rx_babr;
- u64 rx_trunc;
- u64 eberr;
- u64 tx_babt;
- u64 tx_underrun;
- u64 rx_skbmissing;
- u64 tx_timeout;
+ atomic64_t kernel_dropped;
+ atomic64_t rx_large;
+ atomic64_t rx_short;
+ atomic64_t rx_nonoctet;
+ atomic64_t rx_crcerr;
+ atomic64_t rx_overrun;
+ atomic64_t rx_bsy;
+ atomic64_t rx_babr;
+ atomic64_t rx_trunc;
+ atomic64_t eberr;
+ atomic64_t tx_babt;
+ atomic64_t tx_underrun;
+ atomic64_t rx_skbmissing;
+ atomic64_t tx_timeout;
};
#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32))
-#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64))
+#define GFAR_EXTRA_STATS_LEN \
+ (sizeof(struct gfar_extra_stats)/sizeof(atomic64_t))
-/* Number of stats in the stats structure (ignore car and cam regs)*/
+/* Number of stats exported via ethtool */
#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN)
-#define GFAR_INFOSTR_LEN 32
-
-struct gfar_stats {
- u64 extra[GFAR_EXTRA_STATS_LEN];
- u64 rmon[GFAR_RMON_LEN];
-};
-
-
struct gfar {
u32 tsec_id; /* 0x.000 - Controller ID register */
u32 tsec_id2; /* 0x.004 - Controller ID2 register */
@@ -937,26 +930,25 @@ struct tx_q_stats {
* @txtime: coalescing value if based on time
*/
struct gfar_priv_tx_q {
+ /* cacheline 1 */
spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct sk_buff ** tx_skbuff;
- /* Buffer descriptor pointers */
- dma_addr_t tx_bd_dma_base;
struct txbd8 *tx_bd_base;
struct txbd8 *cur_tx;
- struct txbd8 *dirty_tx;
+ unsigned int num_txbdfree;
+ unsigned short skb_curtx;
+ unsigned short tx_ring_size;
struct tx_q_stats stats;
- struct net_device *dev;
struct gfar_priv_grp *grp;
- u16 skb_curtx;
- u16 skb_dirtytx;
- u16 qindex;
- unsigned int tx_ring_size;
- unsigned int num_txbdfree;
+ /* cacheline 2 */
+ struct net_device *dev;
+ struct sk_buff **tx_skbuff;
+ struct txbd8 *dirty_tx;
+ unsigned short skb_dirtytx;
+ unsigned short qindex;
/* Configuration info for the coalescing features */
- unsigned char txcoalescing;
+ unsigned int txcoalescing;
unsigned long txic;
- unsigned short txcount;
- unsigned short txtime;
+ dma_addr_t tx_bd_dma_base;
};
/*
@@ -999,18 +991,25 @@ struct gfar_priv_rx_q {
unsigned long rxic;
};
+enum gfar_irqinfo_id {
+ GFAR_TX = 0,
+ GFAR_RX = 1,
+ GFAR_ER = 2,
+ GFAR_NUM_IRQS = 3
+};
+
+struct gfar_irqinfo {
+ unsigned int irq;
+ char name[GFAR_INT_NAME_MAX];
+};
+
/**
* struct gfar_priv_grp - per group structure
* @napi: the napi poll function
* @priv: back pointer to the priv structure
* @regs: the ioremapped register space for this group
* @grp_id: group id for this group
- * @interruptTransmit: The TX interrupt number for this group
- * @interruptReceive: The RX interrupt number for this group
- * @interruptError: The ERROR interrupt number for this group
- * @int_name_tx: tx interrupt name for this group
- * @int_name_rx: rx interrupt name for this group
- * @int_name_er: er interrupt name for this group
+ * @irqinfo: TX/RX/ER irq data for this group
*/
struct gfar_priv_grp {
@@ -1019,23 +1018,20 @@ struct gfar_priv_grp {
struct gfar_private *priv;
struct gfar __iomem *regs;
unsigned int grp_id;
- unsigned long rx_bit_map;
- unsigned long tx_bit_map;
- unsigned long num_tx_queues;
unsigned long num_rx_queues;
+ unsigned long rx_bit_map;
+ /* cacheline 3 */
unsigned int rstat;
unsigned int tstat;
- unsigned int imask;
- unsigned int ievent;
- unsigned int interruptTransmit;
- unsigned int interruptReceive;
- unsigned int interruptError;
-
- char int_name_tx[GFAR_INT_NAME_MAX];
- char int_name_rx[GFAR_INT_NAME_MAX];
- char int_name_er[GFAR_INT_NAME_MAX];
+ unsigned long num_tx_queues;
+ unsigned long tx_bit_map;
+
+ struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
};
+#define gfar_irq(grp, ID) \
+ ((grp)->irqinfo[GFAR_##ID])
+
enum gfar_errata {
GFAR_ERRATA_74 = 0x01,
GFAR_ERRATA_76 = 0x02,
@@ -1053,28 +1049,65 @@ enum gfar_errata {
* the buffer descriptor determines the actual condition.
*/
struct gfar_private {
-
- /* Indicates how many tx, rx queues are enabled */
- unsigned int num_tx_queues;
unsigned int num_rx_queues;
- unsigned int num_grps;
- unsigned int mode;
- /* The total tx and rx ring size for the enabled queues */
- unsigned int total_tx_ring_size;
- unsigned int total_rx_ring_size;
-
- struct device_node *node;
+ struct device *dev;
struct net_device *ndev;
- struct platform_device *ofdev;
enum gfar_errata errata;
+ unsigned int rx_buffer_size;
+
+ u16 uses_rxfcb;
+ u16 padding;
+
+ /* HW time stamping enabled flag */
+ int hwts_rx_en;
+ int hwts_tx_en;
- struct gfar_priv_grp gfargrp[MAXGROUPS];
struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
+ struct gfar_priv_grp gfargrp[MAXGROUPS];
+
+ u32 device_flags;
+
+ unsigned int mode;
+ unsigned int num_tx_queues;
+ unsigned int num_grps;
+
+ /* Network Statistics */
+ struct gfar_extra_stats extra_stats;
+
+ /* PHY stuff */
+ phy_interface_t interface;
+ struct device_node *phy_node;
+ struct device_node *tbi_node;
+ struct phy_device *phydev;
+ struct mii_bus *mii_bus;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+
+ /* Bitfield update lock */
+ spinlock_t bflock;
+
+ uint32_t msg_enable;
+
+ struct work_struct reset_task;
+
+ struct platform_device *ofdev;
+ unsigned char
+ extended_hash:1,
+ bd_stash_en:1,
+ rx_filer_enable:1,
+ /* Wake-on-LAN enabled */
+ wol_en:1,
+ /* Enable priorty based Tx scheduling in Hw */
+ prio_sched_en:1;
+
+ /* The total tx and rx ring size for the enabled queues */
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
/* RX per device parameters */
- unsigned int rx_buffer_size;
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1093,39 +1126,6 @@ struct gfar_private {
unsigned int fifo_starve;
unsigned int fifo_starve_off;
- /* Bitfield update lock */
- spinlock_t bflock;
-
- phy_interface_t interface;
- struct device_node *phy_node;
- struct device_node *tbi_node;
- u32 device_flags;
- unsigned char
- extended_hash:1,
- bd_stash_en:1,
- rx_filer_enable:1,
- wol_en:1, /* Wake-on-LAN enabled */
- prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
- unsigned short padding;
-
- /* PHY stuff */
- struct phy_device *phydev;
- struct mii_bus *mii_bus;
- int oldspeed;
- int oldduplex;
- int oldlink;
-
- uint32_t msg_enable;
-
- struct work_struct reset_task;
-
- /* Network Statistics */
- struct gfar_extra_stats extra_stats;
-
- /* HW time stamping enabled flag */
- int hwts_rx_en;
- int hwts_tx_en;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1138,16 +1138,16 @@ static inline int gfar_has_errata(struct gfar_private *priv,
return priv->errata & err;
}
-static inline u32 gfar_read(volatile unsigned __iomem *addr)
+static inline u32 gfar_read(unsigned __iomem *addr)
{
u32 val;
- val = in_be32(addr);
+ val = ioread32be(addr);
return val;
}
-static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
+static inline void gfar_write(unsigned __iomem *addr, u32 val)
{
- out_be32(addr, val);
+ iowrite32be(val, addr);
}
static inline void gfar_write_filer(struct gfar_private *priv,
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index ab6762caa95..75e89acf491 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -149,20 +149,17 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
int i;
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u64 *extra = (u64 *) & priv->extra_stats;
+ atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
+
+ for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
+ buf[i] = atomic64_read(&extra[i]);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
- struct gfar_stats *stats = (struct gfar_stats *) buf;
-
- for (i = 0; i < GFAR_RMON_LEN; i++)
- stats->rmon[i] = (u64) gfar_read(&rmon[i]);
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- stats->extra[i] = extra[i];
- } else
- for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
- buf[i] = extra[i];
+ for (; i < GFAR_STATS_LEN; i++, rmon++)
+ buf[i] = (u64) gfar_read(rmon);
+ }
}
static int gfar_sset_count(struct net_device *dev, int sset)
@@ -184,10 +181,11 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
- strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
- strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN);
- strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, gfar_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
@@ -715,12 +713,11 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
int j = MAX_FILER_IDX, l = 0x0;
int ret = 1;
- local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
- local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
+ local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
+ GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
- pr_err("Out of memory\n");
ret = 0;
goto err;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 37b03530601..1ebf7128ec0 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -350,10 +350,10 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "QUICC ENGINE", 32);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
drvinfo->eedump_len = 0;
drvinfo->regdump_len = uec_get_regs_len(netdev);
}
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index dffee9d44fd..c6a87625898 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_FUJITSU
bool "Fujitsu devices"
default y
- depends on ISA || PCMCIA || ((ISA || MCA_LEGACY) && EXPERIMENTAL)
+ depends on ISA || PCMCIA || (ISA && EXPERIMENTAL)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
@@ -17,18 +17,6 @@ config NET_VENDOR_FUJITSU
if NET_VENDOR_FUJITSU
-config AT1700
- tristate "AT1700/1720 support (EXPERIMENTAL)"
- depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
- select CRC32
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called at1700.
-
config PCMCIA_FMVJ18X
tristate "Fujitsu FMV-J18x PCMCIA support"
depends on PCMCIA
@@ -40,15 +28,4 @@ config PCMCIA_FMVJ18X
To compile this driver as a module, choose M here: the module will be
called fmvj18x_cs. If unsure, say N.
-config ETH16I
- tristate "ICL EtherTeam 16i/32 support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eth16i.
-
endif # NET_VENDOR_FUJITSU
diff --git a/drivers/net/ethernet/fujitsu/Makefile b/drivers/net/ethernet/fujitsu/Makefile
index 2730ae67d3a..21561fdcc69 100644
--- a/drivers/net/ethernet/fujitsu/Makefile
+++ b/drivers/net/ethernet/fujitsu/Makefile
@@ -2,6 +2,4 @@
# Makefile for the Fujitsu network device drivers.
#
-obj-$(CONFIG_AT1700) += at1700.o
-obj-$(CONFIG_ETH16I) += eth16i.o
obj-$(CONFIG_PCMCIA_FMVJ18X) += fmvj18x_cs.o
diff --git a/drivers/net/ethernet/fujitsu/at1700.c b/drivers/net/ethernet/fujitsu/at1700.c
deleted file mode 100644
index 4b80dc4531a..00000000000
--- a/drivers/net/ethernet/fujitsu/at1700.c
+++ /dev/null
@@ -1,791 +0,0 @@
-/* at1700.c: A network device driver for the Allied Telesis AT1700.
-
- Written 1993-98 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- This is a device driver for the Allied Telesis AT1700, and
- Fujitsu FMV-181/182/181A/182A/183/184/183A/184A, which are
- straight-forward Fujitsu MB86965 implementations.
-
- Modification for Fujitsu FMV-18X cards is done by Yutaka Tamiya
- (tamy@flab.fujitsu.co.jp).
-
- Sources:
- The Fujitsu MB86965 datasheet.
-
- After the initial version of this driver was written Gerry Sawkins of
- ATI provided their EEPROM configuration code header file.
- Thanks to NIIBE Yutaka <gniibe@mri.co.jp> for bug fixes.
-
- MCA bus (AT1720) support (now deleted) by Rene Schmit <rene@bss.lu>
-
- Bugs:
- The MB86965 has a design flaw that makes all probes unreliable. Not
- only is it difficult to detect, it also moves around in I/O space in
- response to inb()s from other device probes!
-*/
-
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/crc32.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-static char version[] __initdata =
- "at1700.c:v1.16 9/11/06 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#define DRV_NAME "at1700"
-
-/* Tunable parameters. */
-
-/* When to switch from the 64-entry multicast filter to Rx-all-multicast. */
-#define MC_FILTERBREAK 64
-
-/* These unusual address orders are used to verify the CONFIG register. */
-
-static int fmv18x_probe_list[] __initdata = {
- 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x300, 0x340, 0
-};
-
-/*
- * ISA
- */
-
-static unsigned at1700_probe_list[] __initdata = {
- 0x260, 0x280, 0x2a0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
-};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-typedef unsigned char uchar;
-
-/* Information that need to be kept for each board. */
-struct net_local {
- spinlock_t lock;
- unsigned char mc_filter[8];
- uint jumpered:1; /* Set iff the board has jumper config. */
- uint tx_started:1; /* Packets are on the Tx queue. */
- uint tx_queue_ready:1; /* Tx queue is ready to be sent. */
- uint rx_started:1; /* Packets are Rxing. */
- uchar tx_queue; /* Number of packet on the Tx queue. */
- ushort tx_queue_len; /* Current length of the Tx queue. */
-};
-
-
-/* Offsets from the base address. */
-#define STATUS 0
-#define TX_STATUS 0
-#define RX_STATUS 1
-#define TX_INTR 2 /* Bit-mapped interrupt enable registers. */
-#define RX_INTR 3
-#define TX_MODE 4
-#define RX_MODE 5
-#define CONFIG_0 6 /* Misc. configuration settings. */
-#define CONFIG_1 7
-/* Run-time register bank 2 definitions. */
-#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
-#define TX_START 10
-#define COL16CNTL 11 /* Control Reg for 16 collisions */
-#define MODE13 13
-#define RX_CTRL 14
-/* Configuration registers only on the '865A/B chips. */
-#define EEPROM_Ctrl 16
-#define EEPROM_Data 17
-#define CARDSTATUS 16 /* FMV-18x Card Status */
-#define CARDSTATUS1 17 /* FMV-18x Card Status */
-#define IOCONFIG 18 /* Either read the jumper, or move the I/O. */
-#define IOCONFIG1 19
-#define SAPROM 20 /* The station address PROM, if no EEPROM. */
-#define MODE24 24
-#define RESET 31 /* Write to reset some parts of the chip. */
-#define AT1700_IO_EXTENT 32
-#define PORT_OFFSET(o) (o)
-
-
-#define TX_TIMEOUT (HZ/10)
-
-
-/* Index to functions, as function prototypes. */
-
-static int at1700_probe1(struct net_device *dev, int ioaddr);
-static int read_eeprom(long ioaddr, int location);
-static int net_open(struct net_device *dev);
-static netdev_tx_t net_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t net_interrupt(int irq, void *dev_id);
-static void net_rx(struct net_device *dev);
-static int net_close(struct net_device *dev);
-static void set_rx_mode(struct net_device *dev);
-static void net_tx_timeout (struct net_device *dev);
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
- */
-
-static int io = 0x260;
-
-static int irq;
-
-static void cleanup_card(struct net_device *dev)
-{
- free_irq(dev->irq, NULL);
- release_region(dev->base_addr, AT1700_IO_EXTENT);
-}
-
-struct net_device * __init at1700_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- } else {
- dev->base_addr = io;
- dev->irq = irq;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = at1700_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = at1700_probe_list; *port; port++) {
- if (at1700_probe1(dev, *port) == 0)
- break;
- dev->irq = irq;
- }
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops at1700_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = net_send_packet,
- .ndo_set_rx_mode = set_rx_mode,
- .ndo_tx_timeout = net_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
- "signature", the default bit pattern after a reset. This *doesn't* work --
- there is no way to reset the bus interface without a complete power-cycle!
-
- It turns out that ATI came to the same conclusion I did: the only thing
- that can be done is checking a few bits and then diving right into an
- EEPROM read. */
-
-static int __init at1700_probe1(struct net_device *dev, int ioaddr)
-{
- static const char fmv_irqmap[4] = {3, 7, 10, 15};
- static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
- static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
- unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
- int ret = -ENODEV;
- struct net_local *lp = netdev_priv(dev);
-
- if (!request_region(ioaddr, AT1700_IO_EXTENT, DRV_NAME))
- return -EBUSY;
-
- /* Resetting the chip doesn't reset the ISA interface, so don't bother.
- That means we have to be careful with the register values we probe
- for.
- */
-#ifdef notdef
- printk("at1700 probe at %#x, eeprom is %4.4x %4.4x %4.4x ctrl %4.4x.\n",
- ioaddr, read_eeprom(ioaddr, 4), read_eeprom(ioaddr, 5),
- read_eeprom(ioaddr, 6), inw(ioaddr + EEPROM_Ctrl));
-#endif
- /* We must check for the EEPROM-config boards first, else accessing
- IOCONFIG0 will move the board! */
- if (at1700_probe_list[inb(ioaddr + IOCONFIG1) & 0x07] == ioaddr &&
- read_eeprom(ioaddr, 4) == 0x0000 &&
- (read_eeprom(ioaddr, 5) & 0xff00) == 0xF400)
- is_at1700 = 1;
- else if (inb(ioaddr + SAPROM ) == 0x00 &&
- inb(ioaddr + SAPROM + 1) == 0x00 &&
- inb(ioaddr + SAPROM + 2) == 0x0e)
- is_fmv18x = 1;
- else {
- goto err_out;
- }
-
- /* Reset the internal state machines. */
- outb(0, ioaddr + RESET);
-
- if (is_at1700) {
- irq = at1700_irqmap[(read_eeprom(ioaddr, 12)&0x04)
- | (read_eeprom(ioaddr, 0)>>14)];
- } else {
- /* Check PnP mode for FMV-183/184/183A/184A. */
- /* This PnP routine is very poor. IO and IRQ should be known. */
- if (inb(ioaddr + CARDSTATUS1) & 0x20) {
- irq = dev->irq;
- for (i = 0; i < 8; i++) {
- if (irq == fmv_irqmap_pnp[i])
- break;
- }
- if (i == 8) {
- goto err_out;
- }
- } else {
- if (fmv18x_probe_list[inb(ioaddr + IOCONFIG) & 0x07] != ioaddr)
- goto err_out;
- irq = fmv_irqmap[(inb(ioaddr + IOCONFIG)>>6) & 0x03];
- }
- }
-
- printk("%s: %s found at %#3x, IRQ %d, address ", dev->name,
- is_at1700 ? "AT1700" : "FMV-18X", ioaddr, irq);
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- if (is_at1700) {
- for(i = 0; i < 3; i++) {
- unsigned short eeprom_val = read_eeprom(ioaddr, 4+i);
- ((unsigned short *)dev->dev_addr)[i] = ntohs(eeprom_val);
- }
- } else {
- for(i = 0; i < 6; i++) {
- unsigned char val = inb(ioaddr + SAPROM + i);
- dev->dev_addr[i] = val;
- }
- }
- printk("%pM", dev->dev_addr);
-
- /* The EEPROM word 12 bit 0x0400 means use regular 100 ohm 10baseT signals,
- rather than 150 ohm shielded twisted pair compensation.
- 0x0000 == auto-sense the interface
- 0x0800 == use TP interface
- 0x1800 == use coax interface
- */
- {
- const char *porttype[] = {"auto-sense", "10baseT", "auto-sense", "10base2"};
- if (is_at1700) {
- ushort setup_value = read_eeprom(ioaddr, 12);
- dev->if_port = setup_value >> 8;
- } else {
- ushort setup_value = inb(ioaddr + CARDSTATUS);
- switch (setup_value & 0x07) {
- case 0x01: /* 10base5 */
- case 0x02: /* 10base2 */
- dev->if_port = 0x18; break;
- case 0x04: /* 10baseT */
- dev->if_port = 0x08; break;
- default: /* auto-sense */
- dev->if_port = 0x00; break;
- }
- }
- printk(" %s interface.\n", porttype[(dev->if_port>>3) & 3]);
- }
-
- /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
- bus access, two 4K Tx queues, and disabled Tx and Rx. */
- outb(0xda, ioaddr + CONFIG_0);
-
- /* Set the station address in bank zero. */
- outb(0x00, ioaddr + CONFIG_1);
- for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + PORT_OFFSET(8 + i));
-
- /* Switch to bank 1 and set the multicast table to accept none. */
- outb(0x04, ioaddr + CONFIG_1);
- for (i = 0; i < 8; i++)
- outb(0x00, ioaddr + PORT_OFFSET(8 + i));
-
-
- /* Switch to bank 2 */
- /* Lock our I/O address, and set manual processing mode for 16 collisions. */
- outb(0x08, ioaddr + CONFIG_1);
- outb(dev->if_port, ioaddr + MODE13);
- outb(0x00, ioaddr + COL16CNTL);
-
- if (net_debug)
- printk(version);
-
- dev->netdev_ops = &at1700_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
-
- spin_lock_init(&lp->lock);
-
- lp->jumpered = is_fmv18x;
- /* Snarf the interrupt vector now. */
- ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
- if (ret) {
- printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
- "conflict on IRQ %d.\n",
- ioaddr, irq);
- goto err_out;
- }
-
- return 0;
-
-err_out:
- release_region(ioaddr, AT1700_IO_EXTENT);
- return ret;
-}
-
-
-/* EEPROM_Ctrl bits. */
-#define EE_SHIFT_CLK 0x40 /* EEPROM shift clock, in reg. 16. */
-#define EE_CS 0x20 /* EEPROM chip select, in reg. 16. */
-#define EE_DATA_WRITE 0x80 /* EEPROM chip data in, in reg. 17. */
-#define EE_DATA_READ 0x80 /* EEPROM chip data out, in reg. 17. */
-
-/* The EEPROM commands include the alway-set leading bit. */
-#define EE_WRITE_CMD (5 << 6)
-#define EE_READ_CMD (6 << 6)
-#define EE_ERASE_CMD (7 << 6)
-
-static int __init read_eeprom(long ioaddr, int location)
-{
- int i;
- unsigned short retval = 0;
- long ee_addr = ioaddr + EEPROM_Ctrl;
- long ee_daddr = ioaddr + EEPROM_Data;
- int read_cmd = location | EE_READ_CMD;
-
- /* Shift the read command bits out. */
- for (i = 9; i >= 0; i--) {
- short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
- outb(EE_CS, ee_addr);
- outb(dataval, ee_daddr);
- outb(EE_CS | EE_SHIFT_CLK, ee_addr); /* EEPROM clock tick. */
- }
- outb(EE_DATA_WRITE, ee_daddr);
- for (i = 16; i > 0; i--) {
- outb(EE_CS, ee_addr);
- outb(EE_CS | EE_SHIFT_CLK, ee_addr);
- retval = (retval << 1) | ((inb(ee_daddr) & EE_DATA_READ) ? 1 : 0);
- }
-
- /* Terminate the EEPROM access. */
- outb(EE_CS, ee_addr);
- outb(EE_SHIFT_CLK, ee_addr);
- outb(0, ee_addr);
- return retval;
-}
-
-
-
-static int net_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Set the configuration register 0 to 32K 100ns. byte-wide memory, 16 bit
- bus access, and two 4K Tx queues. */
- outb(0x5a, ioaddr + CONFIG_0);
-
- /* Powerup, switch to register bank 2, and enable the Rx and Tx. */
- outb(0xe8, ioaddr + CONFIG_1);
-
- lp->tx_started = 0;
- lp->tx_queue_ready = 1;
- lp->rx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- /* Turn on hardware Tx and Rx interrupts. */
- outb(0x82, ioaddr + TX_INTR);
- outb(0x81, ioaddr + RX_INTR);
-
- /* Enable the IRQ on boards of fmv18x it is feasible. */
- if (lp->jumpered) {
- outb(0x80, ioaddr + IOCONFIG1);
- }
-
- netif_start_queue(dev);
- return 0;
-}
-
-static void net_tx_timeout (struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- printk ("%s: transmit timed out with status %04x, %s?\n", dev->name,
- inw (ioaddr + STATUS), inb (ioaddr + TX_STATUS) & 0x80
- ? "IRQ conflict" : "network cable problem");
- printk ("%s: timeout registers: %04x %04x %04x %04x %04x %04x %04x %04x.\n",
- dev->name, inw(ioaddr + TX_STATUS), inw(ioaddr + TX_INTR), inw(ioaddr + TX_MODE),
- inw(ioaddr + CONFIG_0), inw(ioaddr + DATAPORT), inw(ioaddr + TX_START),
- inw(ioaddr + MODE13 - 1), inw(ioaddr + RX_CTRL));
- dev->stats.tx_errors++;
- /* ToDo: We should try to restart the adaptor... */
- outw(0xffff, ioaddr + MODE24);
- outw (0xffff, ioaddr + TX_STATUS);
- outb (0x5a, ioaddr + CONFIG_0);
- outb (0xe8, ioaddr + CONFIG_1);
- outw (0x8182, ioaddr + TX_INTR);
- outb (0x00, ioaddr + TX_START);
- outb (0x03, ioaddr + COL16CNTL);
-
- dev->trans_start = jiffies; /* prevent tx timeout */
-
- lp->tx_started = 0;
- lp->tx_queue_ready = 1;
- lp->rx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- netif_wake_queue(dev);
-}
-
-
-static netdev_tx_t net_send_packet (struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- short len = skb->len;
- unsigned char *buf = skb->data;
- static u8 pad[ETH_ZLEN];
-
- netif_stop_queue (dev);
-
- /* We may not start transmitting unless we finish transferring
- a packet into the Tx queue. During executing the following
- codes we possibly catch a Tx interrupt. Thus we flag off
- tx_queue_ready, so that we prevent the interrupt routine
- (net_interrupt) to start transmitting. */
- lp->tx_queue_ready = 0;
- {
- outw (length, ioaddr + DATAPORT);
- /* Packet data */
- outsw (ioaddr + DATAPORT, buf, len >> 1);
- /* Check for dribble byte */
- if (len & 1) {
- outw(skb->data[skb->len-1], ioaddr + DATAPORT);
- len++;
- }
- /* Check for packet padding */
- if (length != skb->len)
- outsw(ioaddr + DATAPORT, pad, (length - len + 1) >> 1);
-
- lp->tx_queue++;
- lp->tx_queue_len += length + 2;
- }
- lp->tx_queue_ready = 1;
-
- if (lp->tx_started == 0) {
- /* If the Tx is idle, always trigger a transmit. */
- outb (0x80 | lp->tx_queue, ioaddr + TX_START);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- netif_start_queue (dev);
- } else if (lp->tx_queue_len < 4096 - 1502)
- /* Yes, there is room for one more packet. */
- netif_start_queue (dev);
- dev_kfree_skb (skb);
-
- return NETDEV_TX_OK;
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t net_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status;
- int handled = 0;
-
- if (dev == NULL) {
- printk ("at1700_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock (&lp->lock);
-
- status = inw(ioaddr + TX_STATUS);
- outw(status, ioaddr + TX_STATUS);
-
- if (net_debug > 4)
- printk("%s: Interrupt with status %04x.\n", dev->name, status);
- if (lp->rx_started == 0 &&
- (status & 0xff00 || (inb(ioaddr + RX_MODE) & 0x40) == 0)) {
- /* Got a packet(s).
- We cannot execute net_rx more than once at the same time for
- the same device. During executing net_rx, we possibly catch a
- Tx interrupt. Thus we flag on rx_started, so that we prevent
- the interrupt routine (net_interrupt) to dive into net_rx
- again. */
- handled = 1;
- lp->rx_started = 1;
- outb(0x00, ioaddr + RX_INTR); /* Disable RX intr. */
- net_rx(dev);
- outb(0x81, ioaddr + RX_INTR); /* Enable RX intr. */
- lp->rx_started = 0;
- }
- if (status & 0x00ff) {
- handled = 1;
- if (status & 0x02) {
- /* More than 16 collisions occurred */
- if (net_debug > 4)
- printk("%s: 16 Collision occur during Txing.\n", dev->name);
- /* Cancel sending a packet. */
- outb(0x03, ioaddr + COL16CNTL);
- dev->stats.collisions++;
- }
- if (status & 0x82) {
- dev->stats.tx_packets++;
- /* The Tx queue has any packets and is not being
- transferred a packet from the host, start
- transmitting. */
- if (lp->tx_queue && lp->tx_queue_ready) {
- outb(0x80 | lp->tx_queue, ioaddr + TX_START);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
- netif_wake_queue (dev);
- } else {
- lp->tx_started = 0;
- netif_wake_queue (dev);
- }
- }
- }
-
- spin_unlock (&lp->lock);
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void
-net_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int boguscount = 5;
-
- while ((inb(ioaddr + RX_MODE) & 0x40) == 0) {
- ushort status = inw(ioaddr + DATAPORT);
- ushort pkt_len = inw(ioaddr + DATAPORT);
-
- if (net_debug > 4)
- printk("%s: Rxing packet mode %02x status %04x.\n",
- dev->name, inb(ioaddr + RX_MODE), status);
-#ifndef final_version
- if (status == 0) {
- outb(0x05, ioaddr + RX_CTRL);
- break;
- }
-#endif
-
- if ((status & 0xF0) != 0x20) { /* There was an error. */
- dev->stats.rx_errors++;
- if (status & 0x08) dev->stats.rx_length_errors++;
- if (status & 0x04) dev->stats.rx_frame_errors++;
- if (status & 0x02) dev->stats.rx_crc_errors++;
- if (status & 0x01) dev->stats.rx_over_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- if (pkt_len > 1550) {
- printk("%s: The AT1700 claimed a very large packet, size %d.\n",
- dev->name, pkt_len);
- /* Prime the FIFO and then flush the packet. */
- inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
- outb(0x05, ioaddr + RX_CTRL);
- dev->stats.rx_errors++;
- break;
- }
- skb = netdev_alloc_skb(dev, pkt_len + 3);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet (len %d).\n",
- dev->name, pkt_len);
- /* Prime the FIFO and then flush the packet. */
- inw(ioaddr + DATAPORT); inw(ioaddr + DATAPORT);
- outb(0x05, ioaddr + RX_CTRL);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb,2);
-
- insw(ioaddr + DATAPORT, skb_put(skb,pkt_len), (pkt_len + 1) >> 1);
- skb->protocol=eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- if (--boguscount <= 0)
- break;
- }
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(NET_BH) for us and will work on them
- when we get to the bottom-half routine. */
- {
- int i;
- for (i = 0; i < 20; i++) {
- if ((inb(ioaddr + RX_MODE) & 0x40) == 0x40)
- break;
- inw(ioaddr + DATAPORT); /* dummy status read */
- outb(0x05, ioaddr + RX_CTRL);
- }
-
- if (net_debug > 5)
- printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
- dev->name, inb(ioaddr + RX_MODE), i);
- }
-}
-
-/* The inverse routine to net_open(). */
-static int net_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- netif_stop_queue(dev);
-
- /* Set configuration register 0 to disable Tx and Rx. */
- outb(0xda, ioaddr + CONFIG_0);
-
- /* No statistic counters on the chip to update. */
-
- /* Disable the IRQ on boards of fmv18x where it is feasible. */
- if (lp->jumpered)
- outb(0x00, ioaddr + IOCONFIG1);
-
- /* Power-down the chip. Green, green, green! */
- outb(0x00, ioaddr + CONFIG_1);
- return 0;
-}
-
-/*
- Set the multicast/promiscuous mode for this adaptor.
-*/
-
-static void
-set_rx_mode(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- unsigned char mc_filter[8]; /* Multicast hash filter */
- unsigned long flags;
-
- if (dev->flags & IFF_PROMISC) {
- memset(mc_filter, 0xff, sizeof(mc_filter));
- outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
- (dev->flags & IFF_ALLMULTI)) {
- /* Too many to filter perfectly -- accept all multicasts. */
- memset(mc_filter, 0xff, sizeof(mc_filter));
- outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (netdev_mc_empty(dev)) {
- memset(mc_filter, 0x00, sizeof(mc_filter));
- outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
- } else {
- struct netdev_hw_addr *ha;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, dev) {
- unsigned int bit =
- ether_crc_le(ETH_ALEN, ha->addr) >> 26;
- mc_filter[bit >> 3] |= (1 << bit);
- }
- outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
- }
-
- spin_lock_irqsave (&lp->lock, flags);
- if (memcmp(mc_filter, lp->mc_filter, sizeof(mc_filter))) {
- int i;
- int saved_bank = inw(ioaddr + CONFIG_0);
- /* Switch to bank 1 and set the multicast table. */
- outw((saved_bank & ~0x0C00) | 0x0480, ioaddr + CONFIG_0);
- for (i = 0; i < 8; i++)
- outb(mc_filter[i], ioaddr + PORT_OFFSET(8 + i));
- memcpy(lp->mc_filter, mc_filter, sizeof(mc_filter));
- outw(saved_bank, ioaddr + CONFIG_0);
- }
- spin_unlock_irqrestore (&lp->lock, flags);
-}
-
-#ifdef MODULE
-static struct net_device *dev_at1700;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(net_debug, int, 0);
-MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
-MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
-MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
-
-static int __init at1700_module_init(void)
-{
- if (io == 0)
- printk("at1700: You should not use auto-probing with insmod!\n");
- dev_at1700 = at1700_probe(-1);
- if (IS_ERR(dev_at1700))
- return PTR_ERR(dev_at1700);
- return 0;
-}
-
-static void __exit at1700_module_exit(void)
-{
- unregister_netdev(dev_at1700);
- cleanup_card(dev_at1700);
- free_netdev(dev_at1700);
-}
-module_init(at1700_module_init);
-module_exit(at1700_module_exit);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/fujitsu/eth16i.c b/drivers/net/ethernet/fujitsu/eth16i.c
deleted file mode 100644
index a992d1f7e0d..00000000000
--- a/drivers/net/ethernet/fujitsu/eth16i.c
+++ /dev/null
@@ -1,1483 +0,0 @@
-/* eth16i.c An ICL EtherTeam 16i and 32 EISA ethernet driver for Linux
-
- Written 1994-1999 by Mika Kuoppala
-
- Copyright (C) 1994-1999 by Mika Kuoppala
- Based on skeleton.c and heavily on at1700.c by Donald Becker
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as miku@iki.fi
-
- This driver supports following cards :
- - ICL EtherTeam 16i
- - ICL EtherTeam 32 EISA
- (Uses true 32 bit transfers rather than 16i compatibility mode)
-
- Example Module usage:
- insmod eth16i.o io=0x2a0 mediatype=bnc
-
- mediatype can be one of the following: bnc,tp,dix,auto,eprom
-
- 'auto' will try to autoprobe mediatype.
- 'eprom' will use whatever type defined in eprom.
-
- I have benchmarked driver with PII/300Mhz as a ftp client
- and 486/33Mhz as a ftp server. Top speed was 1128.37 kilobytes/sec.
-
- Sources:
- - skeleton.c a sample network driver core for linux,
- written by Donald Becker <becker@scyld.com>
- - at1700.c a driver for Allied Telesis AT1700, written
- by Donald Becker.
- - e16iSRV.asm a Netware 3.X Server Driver for ICL EtherTeam16i
- written by Markku Viima
- - The Fujitsu MB86965 databook.
-
- Author thanks following persons due to their valueble assistance:
- Markku Viima (ICL)
- Ari Valve (ICL)
- Donald Becker
- Kurt Huwig <kurt@huwig.de>
-
- Revision history:
-
- Version Date Description
-
- 0.01 15.12-94 Initial version (card detection)
- 0.02 23.01-95 Interrupt is now hooked correctly
- 0.03 01.02-95 Rewrote initialization part
- 0.04 07.02-95 Base skeleton done...
- Made a few changes to signature checking
- to make it a bit reliable.
- - fixed bug in tx_buf mapping
- - fixed bug in initialization (DLC_EN
- wasn't enabled when initialization
- was done.)
- 0.05 08.02-95 If there were more than one packet to send,
- transmit was jammed due to invalid
- register write...now fixed
- 0.06 19.02-95 Rewrote interrupt handling
- 0.07 13.04-95 Wrote EEPROM read routines
- Card configuration now set according to
- data read from EEPROM
- 0.08 23.06-95 Wrote part that tries to probe used interface
- port if AUTO is selected
-
- 0.09 01.09-95 Added module support
-
- 0.10 04.09-95 Fixed receive packet allocation to work
- with kernels > 1.3.x
-
- 0.20 20.09-95 Added support for EtherTeam32 EISA
-
- 0.21 17.10-95 Removed the unnecessary extern
- init_etherdev() declaration. Some
- other cleanups.
-
- 0.22 22.02-96 Receive buffer was not flushed
- correctly when faulty packet was
- received. Now fixed.
-
- 0.23 26.02-96 Made resetting the adapter
- more reliable.
-
- 0.24 27.02-96 Rewrote faulty packet handling in eth16i_rx
-
- 0.25 22.05-96 kfree() was missing from cleanup_module.
-
- 0.26 11.06-96 Sometimes card was not found by
- check_signature(). Now made more reliable.
-
- 0.27 23.06-96 Oops. 16 consecutive collisions halted
- adapter. Now will try to retransmit
- MAX_COL_16 times before finally giving up.
-
- 0.28 28.10-97 Added dev_id parameter (NULL) for free_irq
-
- 0.29 29.10-97 Multiple card support for module users
-
- 0.30 30.10-97 Fixed irq allocation bug.
- (request_irq moved from probe to open)
-
- 0.30a 21.08-98 Card detection made more relaxed. Driver
- had problems with some TCP/IP-PROM boots
- to find the card. Suggested by
- Kurt Huwig <kurt@huwig.de>
-
- 0.31 28.08-98 Media interface port can now be selected
- with module parameters or kernel
- boot parameters.
-
- 0.32 31.08-98 IRQ was never freed if open/close
- pair wasn't called. Now fixed.
-
- 0.33 10.09-98 When eth16i_open() was called after
- eth16i_close() chip never recovered.
- Now more shallow reset is made on
- close.
-
- 0.34 29.06-99 Fixed one bad #ifdef.
- Changed ioaddr -> io for consistency
-
- 0.35 01.07-99 transmit,-receive bytes were never
- updated in stats.
-
- Bugs:
- In some cases the media interface autoprobing code doesn't find
- the correct interface type. In this case you can
- manually choose the interface type in DOS with E16IC.EXE which is
- configuration software for EtherTeam16i and EtherTeam32 cards.
- This is also true for IRQ setting. You cannot use module
- parameter to configure IRQ of the card (yet).
-
- To do:
- - Real multicast support
- - Rewrite the media interface autoprobing code. Its _horrible_ !
- - Possibly merge all the MB86965 specific code to external
- module for use by eth16.c and Donald's at1700.c
- - IRQ configuration with module parameter. I will do
- this when i will get enough info about setting
- irq without configuration utility.
-*/
-
-static char *version =
- "eth16i.c: v0.35 01-Jul-1999 Mika Kuoppala (miku@iki.fi)\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-
-
-
-/* Few macros */
-#define BITSET(ioaddr, bnum) ((outb(((inb(ioaddr)) | (bnum)), ioaddr)))
-#define BITCLR(ioaddr, bnum) ((outb(((inb(ioaddr)) & (~(bnum))), ioaddr)))
-
-/* This is the I/O address space for Etherteam 16i adapter. */
-#define ETH16I_IO_EXTENT 32
-
-/* Ticks before deciding that transmit has timed out */
-#define TX_TIMEOUT (400*HZ/1000)
-
-/* Maximum loop count when receiving packets */
-#define MAX_RX_LOOP 20
-
-/* Some interrupt masks */
-#define ETH16I_INTR_ON 0xef8a /* Higher is receive mask */
-#define ETH16I_INTR_OFF 0x0000
-
-/* Buffers header status byte meanings */
-#define PKT_GOOD BIT(5)
-#define PKT_GOOD_RMT BIT(4)
-#define PKT_SHORT BIT(3)
-#define PKT_ALIGN_ERR BIT(2)
-#define PKT_CRC_ERR BIT(1)
-#define PKT_RX_BUF_OVERFLOW BIT(0)
-
-/* Transmit status register (DLCR0) */
-#define TX_STATUS_REG 0
-#define TX_DONE BIT(7)
-#define NET_BUSY BIT(6)
-#define TX_PKT_RCD BIT(5)
-#define CR_LOST BIT(4)
-#define TX_JABBER_ERR BIT(3)
-#define COLLISION BIT(2)
-#define COLLISIONS_16 BIT(1)
-
-/* Receive status register (DLCR1) */
-#define RX_STATUS_REG 1
-#define RX_PKT BIT(7) /* Packet received */
-#define BUS_RD_ERR BIT(6)
-#define SHORT_PKT_ERR BIT(3)
-#define ALIGN_ERR BIT(2)
-#define CRC_ERR BIT(1)
-#define RX_BUF_OVERFLOW BIT(0)
-
-/* Transmit Interrupt Enable Register (DLCR2) */
-#define TX_INTR_REG 2
-#define TX_INTR_DONE BIT(7)
-#define TX_INTR_COL BIT(2)
-#define TX_INTR_16_COL BIT(1)
-
-/* Receive Interrupt Enable Register (DLCR3) */
-#define RX_INTR_REG 3
-#define RX_INTR_RECEIVE BIT(7)
-#define RX_INTR_SHORT_PKT BIT(3)
-#define RX_INTR_CRC_ERR BIT(1)
-#define RX_INTR_BUF_OVERFLOW BIT(0)
-
-/* Transmit Mode Register (DLCR4) */
-#define TRANSMIT_MODE_REG 4
-#define LOOPBACK_CONTROL BIT(1)
-#define CONTROL_OUTPUT BIT(2)
-
-/* Receive Mode Register (DLCR5) */
-#define RECEIVE_MODE_REG 5
-#define RX_BUFFER_EMPTY BIT(6)
-#define ACCEPT_BAD_PACKETS BIT(5)
-#define RECEIVE_SHORT_ADDR BIT(4)
-#define ACCEPT_SHORT_PACKETS BIT(3)
-#define REMOTE_RESET BIT(2)
-
-#define ADDRESS_FILTER_MODE BIT(1) | BIT(0)
-#define REJECT_ALL 0
-#define ACCEPT_ALL 3
-#define MODE_1 1 /* NODE ID, BC, MC, 2-24th bit */
-#define MODE_2 2 /* NODE ID, BC, MC, Hash Table */
-
-/* Configuration Register 0 (DLCR6) */
-#define CONFIG_REG_0 6
-#define DLC_EN BIT(7)
-#define SRAM_CYCLE_TIME_100NS BIT(6)
-#define SYSTEM_BUS_WIDTH_8 BIT(5) /* 1 = 8bit, 0 = 16bit */
-#define BUFFER_WIDTH_8 BIT(4) /* 1 = 8bit, 0 = 16bit */
-#define TBS1 BIT(3)
-#define TBS0 BIT(2)
-#define SRAM_BS1 BIT(1) /* 00=8kb, 01=16kb */
-#define SRAM_BS0 BIT(0) /* 10=32kb, 11=64kb */
-
-#ifndef ETH16I_TX_BUF_SIZE /* 0 = 2kb, 1 = 4kb */
-#define ETH16I_TX_BUF_SIZE 3 /* 2 = 8kb, 3 = 16kb */
-#endif
-#define TX_BUF_1x2048 0
-#define TX_BUF_2x2048 1
-#define TX_BUF_2x4098 2
-#define TX_BUF_2x8192 3
-
-/* Configuration Register 1 (DLCR7) */
-#define CONFIG_REG_1 7
-#define POWERUP BIT(5)
-
-/* Transmit start register */
-#define TRANSMIT_START_REG 10
-#define TRANSMIT_START_RB 2
-#define TX_START BIT(7) /* Rest of register bit indicate*/
- /* number of packets in tx buffer*/
-/* Node ID registers (DLCR8-13) */
-#define NODE_ID_0 8
-#define NODE_ID_RB 0
-
-/* Hash Table registers (HT8-15) */
-#define HASH_TABLE_0 8
-#define HASH_TABLE_RB 1
-
-/* Buffer memory ports */
-#define BUFFER_MEM_PORT_LB 8
-#define DATAPORT BUFFER_MEM_PORT_LB
-#define BUFFER_MEM_PORT_HB 9
-
-/* 16 Collision control register (BMPR11) */
-#define COL_16_REG 11
-#define HALT_ON_16 0x00
-#define RETRANS_AND_HALT_ON_16 0x02
-
-/* Maximum number of attempts to send after 16 concecutive collisions */
-#define MAX_COL_16 10
-
-/* DMA Burst and Transceiver Mode Register (BMPR13) */
-#define TRANSCEIVER_MODE_REG 13
-#define TRANSCEIVER_MODE_RB 2
-#define IO_BASE_UNLOCK BIT(7)
-#define LOWER_SQUELCH_TRESH BIT(6)
-#define LINK_TEST_DISABLE BIT(5)
-#define AUI_SELECT BIT(4)
-#define DIS_AUTO_PORT_SEL BIT(3)
-
-/* Filter Self Receive Register (BMPR14) */
-#define FILTER_SELF_RX_REG 14
-#define SKIP_RX_PACKET BIT(2)
-#define FILTER_SELF_RECEIVE BIT(0)
-
-/* EEPROM Control Register (BMPR 16) */
-#define EEPROM_CTRL_REG 16
-
-/* EEPROM Data Register (BMPR 17) */
-#define EEPROM_DATA_REG 17
-
-/* NMC93CSx6 EEPROM Control Bits */
-#define CS_0 0x00
-#define CS_1 0x20
-#define SK_0 0x00
-#define SK_1 0x40
-#define DI_0 0x00
-#define DI_1 0x80
-
-/* NMC93CSx6 EEPROM Instructions */
-#define EEPROM_READ 0x80
-
-/* NMC93CSx6 EEPROM Addresses */
-#define E_NODEID_0 0x02
-#define E_NODEID_1 0x03
-#define E_NODEID_2 0x04
-#define E_PORT_SELECT 0x14
- #define E_PORT_BNC 0x00
- #define E_PORT_DIX 0x01
- #define E_PORT_TP 0x02
- #define E_PORT_AUTO 0x03
- #define E_PORT_FROM_EPROM 0x04
-#define E_PRODUCT_CFG 0x30
-
-
-/* Macro to slow down io between EEPROM clock transitions */
-#define eeprom_slow_io() do { int _i = 40; while(--_i > 0) { inb(0x80); }}while(0)
-
-/* Jumperless Configuration Register (BMPR19) */
-#define JUMPERLESS_CONFIG 19
-
-/* ID ROM registers, writing to them also resets some parts of chip */
-#define ID_ROM_0 24
-#define ID_ROM_7 31
-#define RESET ID_ROM_0
-
-/* This is the I/O address list to be probed when seeking the card */
-static unsigned int eth16i_portlist[] __initdata = {
- 0x260, 0x280, 0x2A0, 0x240, 0x340, 0x320, 0x380, 0x300, 0
-};
-
-static unsigned int eth32i_portlist[] __initdata = {
- 0x1000, 0x2000, 0x3000, 0x4000, 0x5000, 0x6000, 0x7000, 0x8000,
- 0x9000, 0xA000, 0xB000, 0xC000, 0xD000, 0xE000, 0xF000, 0
-};
-
-/* This is the Interrupt lookup table for Eth16i card */
-static unsigned int eth16i_irqmap[] __initdata = { 9, 10, 5, 15, 0 };
-#define NUM_OF_ISA_IRQS 4
-
-/* This is the Interrupt lookup table for Eth32i card */
-static unsigned int eth32i_irqmap[] __initdata = { 3, 5, 7, 9, 10, 11, 12, 15, 0 };
-#define EISA_IRQ_REG 0xc89
-#define NUM_OF_EISA_IRQS 8
-
-static unsigned int eth16i_tx_buf_map[] = { 2048, 2048, 4096, 8192 };
-
-/* Use 0 for production, 1 for verification, >2 for debug */
-#ifndef ETH16I_DEBUG
-#define ETH16I_DEBUG 0
-#endif
-static unsigned int eth16i_debug = ETH16I_DEBUG;
-
-/* Information for each board */
-
-struct eth16i_local {
- unsigned char tx_started;
- unsigned char tx_buf_busy;
- unsigned short tx_queue; /* Number of packets in transmit buffer */
- unsigned short tx_queue_len;
- unsigned int tx_buf_size;
- unsigned long open_time;
- unsigned long tx_buffered_packets;
- unsigned long tx_buffered_bytes;
- unsigned long col_16;
- spinlock_t lock;
-};
-
-/* Function prototypes */
-
-static int eth16i_probe1(struct net_device *dev, int ioaddr);
-static int eth16i_check_signature(int ioaddr);
-static int eth16i_probe_port(int ioaddr);
-static void eth16i_set_port(int ioaddr, int porttype);
-static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l);
-static int eth16i_receive_probe_packet(int ioaddr);
-static int eth16i_get_irq(int ioaddr);
-static int eth16i_read_eeprom(int ioaddr, int offset);
-static int eth16i_read_eeprom_word(int ioaddr);
-static void eth16i_eeprom_cmd(int ioaddr, unsigned char command);
-static int eth16i_open(struct net_device *dev);
-static int eth16i_close(struct net_device *dev);
-static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev);
-static void eth16i_rx(struct net_device *dev);
-static void eth16i_timeout(struct net_device *dev);
-static irqreturn_t eth16i_interrupt(int irq, void *dev_id);
-static void eth16i_reset(struct net_device *dev);
-static void eth16i_timeout(struct net_device *dev);
-static void eth16i_skip_packet(struct net_device *dev);
-static void eth16i_multicast(struct net_device *dev);
-static void eth16i_select_regbank(unsigned char regbank, int ioaddr);
-static void eth16i_initialize(struct net_device *dev, int boot);
-
-#if 0
-static int eth16i_set_irq(struct net_device *dev);
-#endif
-
-#ifdef MODULE
-static ushort eth16i_parse_mediatype(const char* s);
-#endif
-
-static char cardname[] __initdata = "ICL EtherTeam 16i/32";
-
-static int __init do_eth16i_probe(struct net_device *dev)
-{
- int i;
- int ioaddr;
- int base_addr = dev->base_addr;
-
- if(eth16i_debug > 4)
- printk(KERN_DEBUG "Probing started for %s\n", cardname);
-
- if(base_addr > 0x1ff) /* Check only single location */
- return eth16i_probe1(dev, base_addr);
- else if(base_addr != 0) /* Don't probe at all */
- return -ENXIO;
-
- /* Seek card from the ISA io address space */
- for(i = 0; (ioaddr = eth16i_portlist[i]) ; i++)
- if(eth16i_probe1(dev, ioaddr) == 0)
- return 0;
-
- /* Seek card from the EISA io address space */
- for(i = 0; (ioaddr = eth32i_portlist[i]) ; i++)
- if(eth16i_probe1(dev, ioaddr) == 0)
- return 0;
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init eth16i_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct eth16i_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_eth16i_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops eth16i_netdev_ops = {
- .ndo_open = eth16i_open,
- .ndo_stop = eth16i_close,
- .ndo_start_xmit = eth16i_tx,
- .ndo_set_rx_mode = eth16i_multicast,
- .ndo_tx_timeout = eth16i_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- static unsigned version_printed;
- int retval;
-
- /* Let's grab the region */
- if (!request_region(ioaddr, ETH16I_IO_EXTENT, cardname))
- return -EBUSY;
-
- /*
- The MB86985 chip has on register which holds information in which
- io address the chip lies. First read this register and compare
- it to our current io address and if match then this could
- be our chip.
- */
-
- if(ioaddr < 0x1000) {
- if(eth16i_portlist[(inb(ioaddr + JUMPERLESS_CONFIG) & 0x07)]
- != ioaddr) {
- retval = -ENODEV;
- goto out;
- }
- }
-
- /* Now we will go a bit deeper and try to find the chip's signature */
-
- if(eth16i_check_signature(ioaddr) != 0) {
- retval = -ENODEV;
- goto out;
- }
-
- /*
- Now it seems that we have found a ethernet chip in this particular
- ioaddr. The MB86985 chip has this feature, that when you read a
- certain register it will increase it's io base address to next
- configurable slot. Now when we have found the chip, first thing is
- to make sure that the chip's ioaddr will hold still here.
- */
-
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(0x00, ioaddr + TRANSCEIVER_MODE_REG);
-
- outb(0x00, ioaddr + RESET); /* Reset some parts of chip */
- BITSET(ioaddr + CONFIG_REG_0, BIT(7)); /* Disable the data link */
-
- if( (eth16i_debug & version_printed++) == 0)
- printk(KERN_INFO "%s", version);
-
- dev->base_addr = ioaddr;
- dev->irq = eth16i_get_irq(ioaddr);
-
- /* Try to obtain interrupt vector */
-
- if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, cardname, dev))) {
- printk(KERN_WARNING "%s at %#3x, but is unusable due to conflicting IRQ %d.\n",
- cardname, ioaddr, dev->irq);
- goto out;
- }
-
- printk(KERN_INFO "%s: %s at %#3x, IRQ %d, ",
- dev->name, cardname, ioaddr, dev->irq);
-
-
- /* Now we will have to lock the chip's io address */
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(0x38, ioaddr + TRANSCEIVER_MODE_REG);
-
- eth16i_initialize(dev, 1); /* Initialize rest of the chip's registers */
-
- /* Now let's same some energy by shutting down the chip ;) */
- BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
-
- /* Initialize the device structure */
- dev->netdev_ops = &eth16i_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- spin_lock_init(&lp->lock);
-
- retval = register_netdev(dev);
- if (retval)
- goto out1;
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, ETH16I_IO_EXTENT);
- return retval;
-}
-
-
-static void eth16i_initialize(struct net_device *dev, int boot)
-{
- int ioaddr = dev->base_addr;
- int i, node_w = 0;
- unsigned char node_byte = 0;
-
- /* Setup station address */
- eth16i_select_regbank(NODE_ID_RB, ioaddr);
- for(i = 0 ; i < 3 ; i++) {
- unsigned short node_val = eth16i_read_eeprom(ioaddr, E_NODEID_0 + i);
- ((unsigned short *)dev->dev_addr)[i] = ntohs(node_val);
- }
-
- for(i = 0; i < 6; i++) {
- outb( ((unsigned char *)dev->dev_addr)[i], ioaddr + NODE_ID_0 + i);
- if(boot) {
- printk("%02x", inb(ioaddr + NODE_ID_0 + i));
- if(i != 5)
- printk(":");
- }
- }
-
- /* Now we will set multicast addresses to accept none */
- eth16i_select_regbank(HASH_TABLE_RB, ioaddr);
- for(i = 0; i < 8; i++)
- outb(0x00, ioaddr + HASH_TABLE_0 + i);
-
- /*
- Now let's disable the transmitter and receiver, set the buffer ram
- cycle time, bus width and buffer data path width. Also we shall
- set transmit buffer size and total buffer size.
- */
-
- eth16i_select_regbank(2, ioaddr);
-
- node_byte = 0;
- node_w = eth16i_read_eeprom(ioaddr, E_PRODUCT_CFG);
-
- if( (node_w & 0xFF00) == 0x0800)
- node_byte |= BUFFER_WIDTH_8;
-
- node_byte |= SRAM_BS1;
-
- if( (node_w & 0x00FF) == 64)
- node_byte |= SRAM_BS0;
-
- node_byte |= DLC_EN | SRAM_CYCLE_TIME_100NS | (ETH16I_TX_BUF_SIZE << 2);
-
- outb(node_byte, ioaddr + CONFIG_REG_0);
-
- /* We shall halt the transmitting, if 16 collisions are detected */
- outb(HALT_ON_16, ioaddr + COL_16_REG);
-
-#ifdef MODULE
- /* if_port already set by init_module() */
-#else
- dev->if_port = (dev->mem_start < E_PORT_FROM_EPROM) ?
- dev->mem_start : E_PORT_FROM_EPROM;
-#endif
-
- /* Set interface port type */
- if(boot) {
- static const char * const porttype[] = {
- "BNC", "DIX", "TP", "AUTO", "FROM_EPROM"
- };
-
- switch(dev->if_port)
- {
-
- case E_PORT_FROM_EPROM:
- dev->if_port = eth16i_read_eeprom(ioaddr, E_PORT_SELECT);
- break;
-
- case E_PORT_AUTO:
- dev->if_port = eth16i_probe_port(ioaddr);
- break;
-
- case E_PORT_BNC:
- case E_PORT_TP:
- case E_PORT_DIX:
- break;
- }
-
- printk(" %s interface.\n", porttype[dev->if_port]);
-
- eth16i_set_port(ioaddr, dev->if_port);
- }
-
- /* Set Receive Mode to normal operation */
- outb(MODE_2, ioaddr + RECEIVE_MODE_REG);
-}
-
-static int eth16i_probe_port(int ioaddr)
-{
- int i;
- int retcode;
- unsigned char dummy_packet[64];
-
- /* Powerup the chip */
- outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
-
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
-
- eth16i_select_regbank(NODE_ID_RB, ioaddr);
-
- for(i = 0; i < 6; i++) {
- dummy_packet[i] = inb(ioaddr + NODE_ID_0 + i);
- dummy_packet[i+6] = inb(ioaddr + NODE_ID_0 + i);
- }
-
- dummy_packet[12] = 0x00;
- dummy_packet[13] = 0x04;
- memset(dummy_packet + 14, 0, sizeof(dummy_packet) - 14);
-
- eth16i_select_regbank(2, ioaddr);
-
- for(i = 0; i < 3; i++) {
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
- eth16i_set_port(ioaddr, i);
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Set port number %d\n", i);
-
- retcode = eth16i_send_probe_packet(ioaddr, dummy_packet, 64);
- if(retcode == 0) {
- retcode = eth16i_receive_probe_packet(ioaddr);
- if(retcode != -1) {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Eth16i interface port found at %d\n", i);
- return i;
- }
- }
- else {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "TRANSMIT_DONE timeout when probing interface port\n");
- }
- }
-
- if( eth16i_debug > 1)
- printk(KERN_DEBUG "Using default port\n");
-
- return E_PORT_BNC;
-}
-
-static void eth16i_set_port(int ioaddr, int porttype)
-{
- unsigned short temp = 0;
-
- eth16i_select_regbank(TRANSCEIVER_MODE_RB, ioaddr);
- outb(LOOPBACK_CONTROL, ioaddr + TRANSMIT_MODE_REG);
-
- temp |= DIS_AUTO_PORT_SEL;
-
- switch(porttype) {
-
- case E_PORT_BNC :
- temp |= AUI_SELECT;
- break;
-
- case E_PORT_TP :
- break;
-
- case E_PORT_DIX :
- temp |= AUI_SELECT;
- BITSET(ioaddr + TRANSMIT_MODE_REG, CONTROL_OUTPUT);
- break;
- }
-
- outb(temp, ioaddr + TRANSCEIVER_MODE_REG);
-
- if(eth16i_debug > 1) {
- printk(KERN_DEBUG "TRANSMIT_MODE_REG = %x\n", inb(ioaddr + TRANSMIT_MODE_REG));
- printk(KERN_DEBUG "TRANSCEIVER_MODE_REG = %x\n",
- inb(ioaddr+TRANSCEIVER_MODE_REG));
- }
-}
-
-static int eth16i_send_probe_packet(int ioaddr, unsigned char *b, int l)
-{
- unsigned long starttime;
-
- outb(0xff, ioaddr + TX_STATUS_REG);
-
- outw(l, ioaddr + DATAPORT);
- outsw(ioaddr + DATAPORT, (unsigned short *)b, (l + 1) >> 1);
-
- starttime = jiffies;
- outb(TX_START | 1, ioaddr + TRANSMIT_START_REG);
-
- while( (inb(ioaddr + TX_STATUS_REG) & 0x80) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
- return -1;
- }
- }
-
- return 0;
-}
-
-static int eth16i_receive_probe_packet(int ioaddr)
-{
- unsigned long starttime;
-
- starttime = jiffies;
-
- while((inb(ioaddr + TX_STATUS_REG) & 0x20) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Timeout occurred waiting transmit packet received\n");
- starttime = jiffies;
- while((inb(ioaddr + RX_STATUS_REG) & 0x80) == 0) {
- if( time_after(jiffies, starttime + TX_TIMEOUT)) {
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "Timeout occurred waiting receive packet\n");
- return -1;
- }
- }
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "RECEIVE_PACKET\n");
- return 0; /* Found receive packet */
- }
- }
-
- if(eth16i_debug > 1) {
- printk(KERN_DEBUG "TRANSMIT_PACKET_RECEIVED %x\n", inb(ioaddr + TX_STATUS_REG));
- printk(KERN_DEBUG "RX_STATUS_REG = %x\n", inb(ioaddr + RX_STATUS_REG));
- }
-
- return 0; /* Return success */
-}
-
-#if 0
-static int eth16i_set_irq(struct net_device* dev)
-{
- const int ioaddr = dev->base_addr;
- const int irq = dev->irq;
- int i = 0;
-
- if(ioaddr < 0x1000) {
- while(eth16i_irqmap[i] && eth16i_irqmap[i] != irq)
- i++;
-
- if(i < NUM_OF_ISA_IRQS) {
- u8 cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
- cbyte = (cbyte & 0x3F) | (i << 6);
- outb(cbyte, ioaddr + JUMPERLESS_CONFIG);
- return 0;
- }
- }
- else {
- printk(KERN_NOTICE "%s: EISA Interrupt cannot be set. Use EISA Configuration utility.\n", dev->name);
- }
-
- return -1;
-
-}
-#endif
-
-static int __init eth16i_get_irq(int ioaddr)
-{
- unsigned char cbyte;
-
- if( ioaddr < 0x1000) {
- cbyte = inb(ioaddr + JUMPERLESS_CONFIG);
- return eth16i_irqmap[((cbyte & 0xC0) >> 6)];
- } else { /* Oh..the card is EISA so method getting IRQ different */
- unsigned short index = 0;
- cbyte = inb(ioaddr + EISA_IRQ_REG);
- while( (cbyte & 0x01) == 0) {
- cbyte = cbyte >> 1;
- index++;
- }
- return eth32i_irqmap[index];
- }
-}
-
-static int __init eth16i_check_signature(int ioaddr)
-{
- int i;
- unsigned char creg[4] = { 0 };
-
- for(i = 0; i < 4 ; i++) {
-
- creg[i] = inb(ioaddr + TRANSMIT_MODE_REG + i);
-
- if(eth16i_debug > 1)
- printk("eth16i: read signature byte %x at %x\n",
- creg[i],
- ioaddr + TRANSMIT_MODE_REG + i);
- }
-
- creg[0] &= 0x0F; /* Mask collision cnr */
- creg[2] &= 0x7F; /* Mask DCLEN bit */
-
-#if 0
- /*
- This was removed because the card was sometimes left to state
- from which it couldn't be find anymore. If there is need
- to more strict check still this have to be fixed.
- */
- if( ! ((creg[0] == 0x06) && (creg[1] == 0x41)) ) {
- if(creg[1] != 0x42)
- return -1;
- }
-#endif
-
- if( !((creg[2] == 0x36) && (creg[3] == 0xE0)) ) {
- creg[2] &= 0x40;
- creg[3] &= 0x03;
-
- if( !((creg[2] == 0x40) && (creg[3] == 0x00)) )
- return -1;
- }
-
- if(eth16i_read_eeprom(ioaddr, E_NODEID_0) != 0)
- return -1;
-
- if((eth16i_read_eeprom(ioaddr, E_NODEID_1) & 0xFF00) != 0x4B00)
- return -1;
-
- return 0;
-}
-
-static int eth16i_read_eeprom(int ioaddr, int offset)
-{
- int data = 0;
-
- eth16i_eeprom_cmd(ioaddr, EEPROM_READ | offset);
- outb(CS_1, ioaddr + EEPROM_CTRL_REG);
- data = eth16i_read_eeprom_word(ioaddr);
- outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
-
- return data;
-}
-
-static int eth16i_read_eeprom_word(int ioaddr)
-{
- int i;
- int data = 0;
-
- for(i = 16; i > 0; i--) {
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- data = (data << 1) |
- ((inb(ioaddr + EEPROM_DATA_REG) & DI_1) ? 1 : 0);
-
- eeprom_slow_io();
- }
-
- return data;
-}
-
-static void eth16i_eeprom_cmd(int ioaddr, unsigned char command)
-{
- int i;
-
- outb(CS_0 | SK_0, ioaddr + EEPROM_CTRL_REG);
- outb(DI_0, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- outb(DI_1, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
-
- for(i = 7; i >= 0; i--) {
- short cmd = ( (command & (1 << i)) ? DI_1 : DI_0 );
- outb(cmd, ioaddr + EEPROM_DATA_REG);
- outb(CS_1 | SK_0, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- outb(CS_1 | SK_1, ioaddr + EEPROM_CTRL_REG);
- eeprom_slow_io();
- }
-}
-
-static int eth16i_open(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Powerup the chip */
- outb(0xc0 | POWERUP, ioaddr + CONFIG_REG_1);
-
- /* Initialize the chip */
- eth16i_initialize(dev, 0);
-
- /* Set the transmit buffer size */
- lp->tx_buf_size = eth16i_tx_buf_map[ETH16I_TX_BUF_SIZE & 0x03];
-
- if(eth16i_debug > 0)
- printk(KERN_DEBUG "%s: transmit buffer size %d\n",
- dev->name, lp->tx_buf_size);
-
- /* Now enable Transmitter and Receiver sections */
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
-
- /* Now switch to register bank 2, for run time operation */
- eth16i_select_regbank(2, ioaddr);
-
- lp->open_time = jiffies;
- lp->tx_started = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
-
- /* Turn on interrupts*/
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
-
- netif_start_queue(dev);
- return 0;
-}
-
-static int eth16i_close(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- eth16i_reset(dev);
-
- /* Turn off interrupts*/
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- netif_stop_queue(dev);
-
- lp->open_time = 0;
-
- /* Disable transmit and receive */
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
-
- /* Reset the chip */
- /* outb(0xff, ioaddr + RESET); */
- /* outw(0xffff, ioaddr + TX_STATUS_REG); */
-
- outb(0x00, ioaddr + CONFIG_REG_1);
-
- return 0;
-}
-
-static void eth16i_timeout(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- /*
- If we get here, some higher level has decided that
- we are broken. There should really be a "kick me"
- function call instead.
- */
-
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
- printk(KERN_WARNING "%s: transmit timed out with status %04x, %s ?\n",
- dev->name,
- inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
- "IRQ conflict" : "network cable problem");
-
- dev->trans_start = jiffies; /* prevent tx timeout */
-
- /* Let's dump all registers */
- if(eth16i_debug > 0) {
- printk(KERN_DEBUG "%s: timeout: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
- dev->name, inb(ioaddr + 0),
- inb(ioaddr + 1), inb(ioaddr + 2),
- inb(ioaddr + 3), inb(ioaddr + 4),
- inb(ioaddr + 5),
- inb(ioaddr + 6), inb(ioaddr + 7));
-
- printk(KERN_DEBUG "%s: transmit start reg: %02x. collision reg %02x\n",
- dev->name, inb(ioaddr + TRANSMIT_START_REG),
- inb(ioaddr + COL_16_REG));
- printk(KERN_DEBUG "lp->tx_queue = %d\n", lp->tx_queue);
- printk(KERN_DEBUG "lp->tx_queue_len = %d\n", lp->tx_queue_len);
- printk(KERN_DEBUG "lp->tx_started = %d\n", lp->tx_started);
- }
- dev->stats.tx_errors++;
- eth16i_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
- netif_wake_queue(dev);
-}
-
-static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int status = 0;
- ushort length = skb->len;
- unsigned char *buf;
- unsigned long flags;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- buf = skb->data;
-
- netif_stop_queue(dev);
-
- /* Turn off TX interrupts */
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- /* We would be better doing the disable_irq tricks the 3c509 does,
- that would make this suck a lot less */
-
- spin_lock_irqsave(&lp->lock, flags);
-
- if( (length + 2) > (lp->tx_buf_size - lp->tx_queue_len)) {
- if(eth16i_debug > 0)
- printk(KERN_WARNING "%s: Transmit buffer full.\n", dev->name);
- }
- else {
- outw(length, ioaddr + DATAPORT);
-
- if( ioaddr < 0x1000 )
- outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
- else {
- unsigned char frag = length % 4;
- outsl(ioaddr + DATAPORT, buf, length >> 2);
- if( frag != 0 ) {
- outsw(ioaddr + DATAPORT, (buf + (length & 0xFFFC)), 1);
- if( frag == 3 )
- outsw(ioaddr + DATAPORT,
- (buf + (length & 0xFFFC) + 2), 1);
- }
- }
- lp->tx_buffered_packets++;
- lp->tx_buffered_bytes = length;
- lp->tx_queue++;
- lp->tx_queue_len += length + 2;
- }
- lp->tx_buf_busy = 0;
-
- if(lp->tx_started == 0) {
- /* If the transmitter is idle..always trigger a transmit */
- outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- netif_wake_queue(dev);
- }
- else if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
- /* There is still more room for one more packet in tx buffer */
- netif_wake_queue(dev);
- }
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
- /* Turn TX interrupts back on */
- /* outb(TX_INTR_DONE | TX_INTR_16_COL, ioaddr + TX_INTR_REG); */
- status = 0;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static void eth16i_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int boguscount = MAX_RX_LOOP;
-
- /* Loop until all packets have been read */
- while( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) {
-
- /* Read status byte from receive buffer */
- ushort status = inw(ioaddr + DATAPORT);
-
- /* Get the size of the packet from receive buffer */
- ushort pkt_len = inw(ioaddr + DATAPORT);
-
- if(eth16i_debug > 4)
- printk(KERN_DEBUG "%s: Receiving packet mode %02x status %04x.\n",
- dev->name,
- inb(ioaddr + RECEIVE_MODE_REG), status);
-
- if( !(status & PKT_GOOD) ) {
- dev->stats.rx_errors++;
-
- if( (pkt_len < ETH_ZLEN) || (pkt_len > ETH_FRAME_LEN) ) {
- dev->stats.rx_length_errors++;
- eth16i_reset(dev);
- return;
- }
- else {
- eth16i_skip_packet(dev);
- dev->stats.rx_dropped++;
- }
- }
- else { /* Ok so now we should have a good packet */
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 3);
- if( skb == NULL ) {
- printk(KERN_WARNING "%s: Could'n allocate memory for packet (len %d)\n",
- dev->name, pkt_len);
- eth16i_skip_packet(dev);
- dev->stats.rx_dropped++;
- break;
- }
-
- skb_reserve(skb,2);
-
- /*
- Now let's get the packet out of buffer.
- size is (pkt_len + 1) >> 1, cause we are now reading words
- and it have to be even aligned.
- */
-
- if(ioaddr < 0x1000)
- insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
- (pkt_len + 1) >> 1);
- else {
- unsigned char *buf = skb_put(skb, pkt_len);
- unsigned char frag = pkt_len % 4;
-
- insl(ioaddr + DATAPORT, buf, pkt_len >> 2);
-
- if(frag != 0) {
- unsigned short rest[2];
- rest[0] = inw( ioaddr + DATAPORT );
- if(frag == 3)
- rest[1] = inw( ioaddr + DATAPORT );
-
- memcpy(buf + (pkt_len & 0xfffc), (char *)rest, frag);
- }
- }
-
- skb->protocol=eth_type_trans(skb, dev);
-
- if( eth16i_debug > 5 ) {
- int i;
- printk(KERN_DEBUG "%s: Received packet of length %d.\n",
- dev->name, pkt_len);
- for(i = 0; i < 14; i++)
- printk(KERN_DEBUG " %02x", skb->data[i]);
- printk(KERN_DEBUG ".\n");
- }
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
-
- } /* else */
-
- if(--boguscount <= 0)
- break;
-
- } /* while */
-}
-
-static irqreturn_t eth16i_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct eth16i_local *lp;
- int ioaddr = 0, status;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- /* Turn off all interrupts from adapter */
- outw(ETH16I_INTR_OFF, ioaddr + TX_INTR_REG);
-
- /* eth16i_tx won't be called */
- spin_lock(&lp->lock);
-
- status = inw(ioaddr + TX_STATUS_REG); /* Get the status */
- outw(status, ioaddr + TX_STATUS_REG); /* Clear status bits */
-
- if (status)
- handled = 1;
-
- if(eth16i_debug > 3)
- printk(KERN_DEBUG "%s: Interrupt with status %04x.\n", dev->name, status);
-
- if( status & 0x7f00 ) {
-
- dev->stats.rx_errors++;
-
- if(status & (BUS_RD_ERR << 8) )
- printk(KERN_WARNING "%s: Bus read error.\n",dev->name);
- if(status & (SHORT_PKT_ERR << 8) ) dev->stats.rx_length_errors++;
- if(status & (ALIGN_ERR << 8) ) dev->stats.rx_frame_errors++;
- if(status & (CRC_ERR << 8) ) dev->stats.rx_crc_errors++;
- if(status & (RX_BUF_OVERFLOW << 8) ) dev->stats.rx_over_errors++;
- }
- if( status & 0x001a) {
-
- dev->stats.tx_errors++;
-
- if(status & CR_LOST) dev->stats.tx_carrier_errors++;
- if(status & TX_JABBER_ERR) dev->stats.tx_window_errors++;
-
-#if 0
- if(status & COLLISION) {
- dev->stats.collisions +=
- ((inb(ioaddr+TRANSMIT_MODE_REG) & 0xF0) >> 4);
- }
-#endif
- if(status & COLLISIONS_16) {
- if(lp->col_16 < MAX_COL_16) {
- lp->col_16++;
- dev->stats.collisions++;
- /* Resume transmitting, skip failed packet */
- outb(0x02, ioaddr + COL_16_REG);
- }
- else {
- printk(KERN_WARNING "%s: bailing out due to many consecutive 16-in-a-row collisions. Network cable problem?\n", dev->name);
- }
- }
- }
-
- if( status & 0x00ff ) { /* Let's check the transmit status reg */
-
- if(status & TX_DONE) { /* The transmit has been done */
- dev->stats.tx_packets = lp->tx_buffered_packets;
- dev->stats.tx_bytes += lp->tx_buffered_bytes;
- lp->col_16 = 0;
-
- if(lp->tx_queue) { /* Is there still packets ? */
- /* There was packet(s) so start transmitting and write also
- how many packets there is to be sended */
- outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- lp->tx_started = 1;
- }
- else {
- lp->tx_started = 0;
- }
- netif_wake_queue(dev);
- }
- }
-
- if( ( status & 0x8000 ) ||
- ( (inb(ioaddr + RECEIVE_MODE_REG) & RX_BUFFER_EMPTY) == 0) ) {
- eth16i_rx(dev); /* We have packet in receive buffer */
- }
-
- /* Turn interrupts back on */
- outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
-
- if(lp->tx_queue_len < lp->tx_buf_size - (ETH_FRAME_LEN + 2)) {
- /* There is still more room for one more packet in tx buffer */
- netif_wake_queue(dev);
- }
-
- spin_unlock(&lp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static void eth16i_skip_packet(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- inw(ioaddr + DATAPORT);
- inw(ioaddr + DATAPORT);
- inw(ioaddr + DATAPORT);
-
- outb(SKIP_RX_PACKET, ioaddr + FILTER_SELF_RX_REG);
- while( inb( ioaddr + FILTER_SELF_RX_REG ) != 0);
-}
-
-static void eth16i_reset(struct net_device *dev)
-{
- struct eth16i_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- if(eth16i_debug > 1)
- printk(KERN_DEBUG "%s: Resetting device.\n", dev->name);
-
- BITSET(ioaddr + CONFIG_REG_0, DLC_EN);
- outw(0xffff, ioaddr + TX_STATUS_REG);
- eth16i_select_regbank(2, ioaddr);
-
- lp->tx_started = 0;
- lp->tx_buf_busy = 0;
- lp->tx_queue = 0;
- lp->tx_queue_len = 0;
- BITCLR(ioaddr + CONFIG_REG_0, DLC_EN);
-}
-
-static void eth16i_multicast(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
- {
- outb(3, ioaddr + RECEIVE_MODE_REG);
- } else {
- outb(2, ioaddr + RECEIVE_MODE_REG);
- }
-}
-
-static void eth16i_select_regbank(unsigned char banknbr, int ioaddr)
-{
- unsigned char data;
-
- data = inb(ioaddr + CONFIG_REG_1);
- outb( ((data & 0xF3) | ( (banknbr & 0x03) << 2)), ioaddr + CONFIG_REG_1);
-}
-
-#ifdef MODULE
-
-static ushort eth16i_parse_mediatype(const char* s)
-{
- if(!s)
- return E_PORT_FROM_EPROM;
-
- if (!strncmp(s, "bnc", 3))
- return E_PORT_BNC;
- else if (!strncmp(s, "tp", 2))
- return E_PORT_TP;
- else if (!strncmp(s, "dix", 3))
- return E_PORT_DIX;
- else if (!strncmp(s, "auto", 4))
- return E_PORT_AUTO;
- else
- return E_PORT_FROM_EPROM;
-}
-
-#define MAX_ETH16I_CARDS 4 /* Max number of Eth16i cards per module */
-
-static struct net_device *dev_eth16i[MAX_ETH16I_CARDS];
-static int io[MAX_ETH16I_CARDS];
-#if 0
-static int irq[MAX_ETH16I_CARDS];
-#endif
-static char* mediatype[MAX_ETH16I_CARDS];
-static int debug = -1;
-
-MODULE_AUTHOR("Mika Kuoppala <miku@iki.fi>");
-MODULE_DESCRIPTION("ICL EtherTeam 16i/32 driver");
-MODULE_LICENSE("GPL");
-
-
-module_param_array(io, int, NULL, 0);
-MODULE_PARM_DESC(io, "eth16i I/O base address(es)");
-
-#if 0
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(irq, "eth16i interrupt request number");
-#endif
-
-module_param_array(mediatype, charp, NULL, 0);
-MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,eprom)");
-
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
- struct net_device *dev;
-
- for (this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
- dev = alloc_etherdev(sizeof(struct eth16i_local));
- if (!dev)
- break;
-
- dev->base_addr = io[this_dev];
-
- if(debug != -1)
- eth16i_debug = debug;
-
- if(eth16i_debug > 1)
- printk(KERN_NOTICE "eth16i(%d): interface type %s\n", this_dev, mediatype[this_dev] ? mediatype[this_dev] : "none" );
-
- dev->if_port = eth16i_parse_mediatype(mediatype[this_dev]);
-
- if(io[this_dev] == 0) {
- if (this_dev != 0) { /* Only autoprobe 1st one */
- free_netdev(dev);
- break;
- }
-
- printk(KERN_NOTICE "eth16i.c: Presently autoprobing (not recommended) for a single card.\n");
- }
-
- if (do_eth16i_probe(dev) == 0) {
- dev_eth16i[found++] = dev;
- continue;
- }
- printk(KERN_WARNING "eth16i.c No Eth16i card found (i/o = 0x%x).\n",
- io[this_dev]);
- free_netdev(dev);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for(this_dev = 0; this_dev < MAX_ETH16I_CARDS; this_dev++) {
- struct net_device *dev = dev_eth16i[this_dev];
-
- if (netdev_priv(dev)) {
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- release_region(dev->base_addr, ETH16I_IO_EXTENT);
- free_netdev(dev);
- }
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/3c505.c b/drivers/net/ethernet/i825xx/3c505.c
deleted file mode 100644
index 6a5c21b82c5..00000000000
--- a/drivers/net/ethernet/i825xx/3c505.c
+++ /dev/null
@@ -1,1671 +0,0 @@
-/*
- * Linux Ethernet device driver for the 3Com Etherlink Plus (3C505)
- * By Craig Southeren, Juha Laiho and Philip Blundell
- *
- * 3c505.c This module implements an interface to the 3Com
- * Etherlink Plus (3c505) Ethernet card. Linux device
- * driver interface reverse engineered from the Linux 3C509
- * device drivers. Some 3C505 information gleaned from
- * the Crynwr packet driver. Still this driver would not
- * be here without 3C505 technical reference provided by
- * 3Com.
- *
- * $Id: 3c505.c,v 1.10 1996/04/16 13:06:27 phil Exp $
- *
- * Authors: Linux 3c505 device driver by
- * Craig Southeren, <craigs@ineluki.apana.org.au>
- * Final debugging by
- * Andrew Tridgell, <tridge@nimbus.anu.edu.au>
- * Auto irq/address, tuning, cleanup and v1.1.4+ kernel mods by
- * Juha Laiho, <jlaiho@ichaos.nullnet.fi>
- * Linux 3C509 driver by
- * Donald Becker, <becker@super.org>
- * (Now at <becker@scyld.com>)
- * Crynwr packet driver by
- * Krishnan Gopalan and Gregg Stefancik,
- * Clemson University Engineering Computer Operations.
- * Portions of the code have been adapted from the 3c505
- * driver for NCSA Telnet by Bruce Orchard and later
- * modified by Warren Van Houten and krus@diku.dk.
- * 3C505 technical information provided by
- * Terry Murphy, of 3Com Network Adapter Division
- * Linux 1.3.0 changes by
- * Alan Cox <Alan.Cox@linux.org>
- * More debugging, DMA support, currently maintained by
- * Philip Blundell <philb@gnu.org>
- * Multicard/soft configurable dma channel/rev 2 hardware support
- * by Christopher Collins <ccollins@pcug.org.au>
- * Ethtool support (jgarzik), 11/17/2001
- */
-
-#define DRV_NAME "3c505"
-#define DRV_VERSION "1.10a"
-
-
-/* Theory of operation:
- *
- * The 3c505 is quite an intelligent board. All communication with it is done
- * by means of Primary Command Blocks (PCBs); these are transferred using PIO
- * through the command register. The card has 256k of on-board RAM, which is
- * used to buffer received packets. It might seem at first that more buffers
- * are better, but in fact this isn't true. From my tests, it seems that
- * more than about 10 buffers are unnecessary, and there is a noticeable
- * performance hit in having more active on the card. So the majority of the
- * card's memory isn't, in fact, used. Sadly, the card only has one transmit
- * buffer and, short of loading our own firmware into it (which is what some
- * drivers resort to) there's nothing we can do about this.
- *
- * We keep up to 4 "receive packet" commands active on the board at a time.
- * When a packet comes in, so long as there is a receive command active, the
- * board will send us a "packet received" PCB and then add the data for that
- * packet to the DMA queue. If a DMA transfer is not already in progress, we
- * set one up to start uploading the data. We have to maintain a list of
- * backlogged receive packets, because the card may decide to tell us about
- * a newly-arrived packet at any time, and we may not be able to start a DMA
- * transfer immediately (ie one may already be going on). We can't NAK the
- * PCB, because then it would throw the packet away.
- *
- * Trying to send a PCB to the card at the wrong moment seems to have bad
- * effects. If we send it a transmit PCB while a receive DMA is happening,
- * it will just NAK the PCB and so we will have wasted our time. Worse, it
- * sometimes seems to interrupt the transfer. The majority of the low-level
- * code is protected by one huge semaphore -- "busy" -- which is set whenever
- * it probably isn't safe to do anything to the card. The receive routine
- * must gain a lock on "busy" before it can start a DMA transfer, and the
- * transmit routine must gain a lock before it sends the first PCB to the card.
- * The send_pcb() routine also has an internal semaphore to protect it against
- * being re-entered (which would be disastrous) -- this is needed because
- * several things can happen asynchronously (re-priming the receiver and
- * asking the card for statistics, for example). send_pcb() will also refuse
- * to talk to the card at all if a DMA upload is happening. The higher-level
- * networking code will reschedule a later retry if some part of the driver
- * is blocked. In practice, this doesn't seem to happen very often.
- */
-
-/* This driver may now work with revision 2.x hardware, since all the read
- * operations on the HCR have been removed (we now keep our own softcopy).
- * But I don't have an old card to test it on.
- *
- * This has had the bad effect that the autoprobe routine is now a bit
- * less friendly to other devices. However, it was never very good.
- * before, so I doubt it will hurt anybody.
- */
-
-/* The driver is a mess. I took Craig's and Juha's code, and hacked it firstly
- * to make it more reliable, and secondly to add DMA mode. Many things could
- * probably be done better; the concurrency protection is particularly awful.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-
-#include "3c505.h"
-
-/*********************************************************
- *
- * define debug messages here as common strings to reduce space
- *
- *********************************************************/
-
-#define timeout_msg "*** timeout at %s:%s (line %d) ***\n"
-#define TIMEOUT_MSG(lineno) \
- pr_notice(timeout_msg, __FILE__, __func__, (lineno))
-
-#define invalid_pcb_msg "*** invalid pcb length %d at %s:%s (line %d) ***\n"
-#define INVALID_PCB_MSG(len) \
- pr_notice(invalid_pcb_msg, (len), __FILE__, __func__, __LINE__)
-
-#define search_msg "%s: Looking for 3c505 adapter at address %#x..."
-
-#define stilllooking_msg "still looking..."
-
-#define found_msg "found.\n"
-
-#define notfound_msg "not found (reason = %d)\n"
-
-#define couldnot_msg "%s: 3c505 not found\n"
-
-/*********************************************************
- *
- * various other debug stuff
- *
- *********************************************************/
-
-#ifdef ELP_DEBUG
-static int elp_debug = ELP_DEBUG;
-#else
-static int elp_debug;
-#endif
-#define debug elp_debug
-
-/*
- * 0 = no messages (well, some)
- * 1 = messages when high level commands performed
- * 2 = messages when low level commands performed
- * 3 = messages when interrupts received
- */
-
-/*****************************************************************
- *
- * List of I/O-addresses we try to auto-sense
- * Last element MUST BE 0!
- *****************************************************************/
-
-static int addr_list[] __initdata = {0x300, 0x280, 0x310, 0};
-
-/* Dma Memory related stuff */
-
-static unsigned long dma_mem_alloc(int size)
-{
- int order = get_order(size);
- return __get_dma_pages(GFP_KERNEL, order);
-}
-
-
-/*****************************************************************
- *
- * Functions for I/O (note the inline !)
- *
- *****************************************************************/
-
-static inline unsigned char inb_status(unsigned int base_addr)
-{
- return inb(base_addr + PORT_STATUS);
-}
-
-static inline int inb_command(unsigned int base_addr)
-{
- return inb(base_addr + PORT_COMMAND);
-}
-
-static inline void outb_control(unsigned char val, struct net_device *dev)
-{
- outb(val, dev->base_addr + PORT_CONTROL);
- ((elp_device *)(netdev_priv(dev)))->hcr_val = val;
-}
-
-#define HCR_VAL(x) (((elp_device *)(netdev_priv(x)))->hcr_val)
-
-static inline void outb_command(unsigned char val, unsigned int base_addr)
-{
- outb(val, base_addr + PORT_COMMAND);
-}
-
-static inline unsigned int backlog_next(unsigned int n)
-{
- return (n + 1) % BACKLOG_SIZE;
-}
-
-/*****************************************************************
- *
- * useful functions for accessing the adapter
- *
- *****************************************************************/
-
-/*
- * use this routine when accessing the ASF bits as they are
- * changed asynchronously by the adapter
- */
-
-/* get adapter PCB status */
-#define GET_ASF(addr) \
- (get_status(addr)&ASF_PCB_MASK)
-
-static inline int get_status(unsigned int base_addr)
-{
- unsigned long timeout = jiffies + 10*HZ/100;
- register int stat1;
- do {
- stat1 = inb_status(base_addr);
- } while (stat1 != inb_status(base_addr) && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- return stat1;
-}
-
-static inline void set_hsf(struct net_device *dev, int hsf)
-{
- elp_device *adapter = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&adapter->lock, flags);
- outb_control((HCR_VAL(dev) & ~HSF_PCB_MASK) | hsf, dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
-}
-
-static bool start_receive(struct net_device *, pcb_struct *);
-
-static inline void adapter_reset(struct net_device *dev)
-{
- unsigned long timeout;
- elp_device *adapter = netdev_priv(dev);
- unsigned char orig_hcr = adapter->hcr_val;
-
- outb_control(0, dev);
-
- if (inb_status(dev->base_addr) & ACRF) {
- do {
- inb_command(dev->base_addr);
- timeout = jiffies + 2*HZ/100;
- while (time_before_eq(jiffies, timeout) && !(inb_status(dev->base_addr) & ACRF));
- } while (inb_status(dev->base_addr) & ACRF);
- set_hsf(dev, HSF_PCB_NAK);
- }
- outb_control(adapter->hcr_val | ATTN | DIR, dev);
- mdelay(10);
- outb_control(adapter->hcr_val & ~ATTN, dev);
- mdelay(10);
- outb_control(adapter->hcr_val | FLSH, dev);
- mdelay(10);
- outb_control(adapter->hcr_val & ~FLSH, dev);
- mdelay(10);
-
- outb_control(orig_hcr, dev);
- if (!start_receive(dev, &adapter->tx_pcb))
- pr_err("%s: start receive command failed\n", dev->name);
-}
-
-/* Check to make sure that a DMA transfer hasn't timed out. This should
- * never happen in theory, but seems to occur occasionally if the card gets
- * prodded at the wrong time.
- */
-static inline void check_3c505_dma(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- if (adapter->dmaing && time_after(jiffies, adapter->current_dma.start_time + 10)) {
- unsigned long flags, f;
- pr_err("%s: DMA %s timed out, %d bytes left\n", dev->name,
- adapter->current_dma.direction ? "download" : "upload",
- get_dma_residue(dev->dma));
- spin_lock_irqsave(&adapter->lock, flags);
- adapter->dmaing = 0;
- adapter->busy = 0;
-
- f=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(f);
-
- if (adapter->rx_active)
- adapter->rx_active--;
- outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
- }
-}
-
-/* Primitive functions used by send_pcb() */
-static inline bool send_pcb_slow(unsigned int base_addr, unsigned char byte)
-{
- unsigned long timeout;
- outb_command(byte, base_addr);
- for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
- if (inb_status(base_addr) & HCRE)
- return false;
- }
- pr_warning("3c505: send_pcb_slow timed out\n");
- return true;
-}
-
-static inline bool send_pcb_fast(unsigned int base_addr, unsigned char byte)
-{
- unsigned int timeout;
- outb_command(byte, base_addr);
- for (timeout = 0; timeout < 40000; timeout++) {
- if (inb_status(base_addr) & HCRE)
- return false;
- }
- pr_warning("3c505: send_pcb_fast timed out\n");
- return true;
-}
-
-/* Check to see if the receiver needs restarting, and kick it if so */
-static inline void prime_rx(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- while (adapter->rx_active < ELP_RX_PCBS && netif_running(dev)) {
- if (!start_receive(dev, &adapter->itx_pcb))
- break;
- }
-}
-
-/*****************************************************************
- *
- * send_pcb
- * Send a PCB to the adapter.
- *
- * output byte to command reg --<--+
- * wait until HCRE is non zero |
- * loop until all bytes sent -->--+
- * set HSF1 and HSF2 to 1
- * output pcb length
- * wait until ASF give ACK or NAK
- * set HSF1 and HSF2 to 0
- *
- *****************************************************************/
-
-/* This can be quite slow -- the adapter is allowed to take up to 40ms
- * to respond to the initial interrupt.
- *
- * We run initially with interrupts turned on, but with a semaphore set
- * so that nobody tries to re-enter this code. Once the first byte has
- * gone through, we turn interrupts off and then send the others (the
- * timeout is reduced to 500us).
- */
-
-static bool send_pcb(struct net_device *dev, pcb_struct * pcb)
-{
- int i;
- unsigned long timeout;
- elp_device *adapter = netdev_priv(dev);
- unsigned long flags;
-
- check_3c505_dma(dev);
-
- if (adapter->dmaing && adapter->current_dma.direction == 0)
- return false;
-
- /* Avoid contention */
- if (test_and_set_bit(1, &adapter->send_pcb_semaphore)) {
- if (elp_debug >= 3) {
- pr_debug("%s: send_pcb entered while threaded\n", dev->name);
- }
- return false;
- }
- /*
- * load each byte into the command register and
- * wait for the HCRE bit to indicate the adapter
- * had read the byte
- */
- set_hsf(dev, 0);
-
- if (send_pcb_slow(dev->base_addr, pcb->command))
- goto abort;
-
- spin_lock_irqsave(&adapter->lock, flags);
-
- if (send_pcb_fast(dev->base_addr, pcb->length))
- goto sti_abort;
-
- for (i = 0; i < pcb->length; i++) {
- if (send_pcb_fast(dev->base_addr, pcb->data.raw[i]))
- goto sti_abort;
- }
-
- outb_control(adapter->hcr_val | 3, dev); /* signal end of PCB */
- outb_command(2 + pcb->length, dev->base_addr);
-
- /* now wait for the acknowledgement */
- spin_unlock_irqrestore(&adapter->lock, flags);
-
- for (timeout = jiffies + 5*HZ/100; time_before(jiffies, timeout);) {
- switch (GET_ASF(dev->base_addr)) {
- case ASF_PCB_ACK:
- adapter->send_pcb_semaphore = 0;
- return true;
-
- case ASF_PCB_NAK:
-#ifdef ELP_DEBUG
- pr_debug("%s: send_pcb got NAK\n", dev->name);
-#endif
- goto abort;
- }
- }
-
- if (elp_debug >= 1)
- pr_debug("%s: timeout waiting for PCB acknowledge (status %02x)\n",
- dev->name, inb_status(dev->base_addr));
- goto abort;
-
- sti_abort:
- spin_unlock_irqrestore(&adapter->lock, flags);
- abort:
- adapter->send_pcb_semaphore = 0;
- return false;
-}
-
-
-/*****************************************************************
- *
- * receive_pcb
- * Read a PCB from the adapter
- *
- * wait for ACRF to be non-zero ---<---+
- * input a byte |
- * if ASF1 and ASF2 were not both one |
- * before byte was read, loop --->---+
- * set HSF1 and HSF2 for ack
- *
- *****************************************************************/
-
-static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
-{
- int i, j;
- int total_length;
- int stat;
- unsigned long timeout;
- unsigned long flags;
-
- elp_device *adapter = netdev_priv(dev);
-
- set_hsf(dev, 0);
-
- /* get the command code */
- timeout = jiffies + 2*HZ/100;
- while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- return false;
- }
- pcb->command = inb_command(dev->base_addr);
-
- /* read the data length */
- timeout = jiffies + 3*HZ/100;
- while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- pr_info("%s: status %02x\n", dev->name, stat);
- return false;
- }
- pcb->length = inb_command(dev->base_addr);
-
- if (pcb->length > MAX_PCB_DATA) {
- INVALID_PCB_MSG(pcb->length);
- adapter_reset(dev);
- return false;
- }
- /* read the data */
- spin_lock_irqsave(&adapter->lock, flags);
- for (i = 0; i < MAX_PCB_DATA; i++) {
- for (j = 0; j < 20000; j++) {
- stat = get_status(dev->base_addr);
- if (stat & ACRF)
- break;
- }
- pcb->data.raw[i] = inb_command(dev->base_addr);
- if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
- break;
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
- if (i >= MAX_PCB_DATA) {
- INVALID_PCB_MSG(i);
- return false;
- }
- if (j >= 20000) {
- TIMEOUT_MSG(__LINE__);
- return false;
- }
- /* the last "data" byte was really the length! */
- total_length = pcb->data.raw[i];
-
- /* safety check total length vs data length */
- if (total_length != (pcb->length + 2)) {
- if (elp_debug >= 2)
- pr_warning("%s: mangled PCB received\n", dev->name);
- set_hsf(dev, HSF_PCB_NAK);
- return false;
- }
-
- if (pcb->command == CMD_RECEIVE_PACKET_COMPLETE) {
- if (test_and_set_bit(0, (void *) &adapter->busy)) {
- if (backlog_next(adapter->rx_backlog.in) == adapter->rx_backlog.out) {
- set_hsf(dev, HSF_PCB_NAK);
- pr_warning("%s: PCB rejected, transfer in progress and backlog full\n", dev->name);
- pcb->command = 0;
- return true;
- } else {
- pcb->command = 0xff;
- }
- }
- }
- set_hsf(dev, HSF_PCB_ACK);
- return true;
-}
-
-/******************************************************
- *
- * queue a receive command on the adapter so we will get an
- * interrupt when a packet is received.
- *
- ******************************************************/
-
-static bool start_receive(struct net_device *dev, pcb_struct * tx_pcb)
-{
- bool status;
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: restarting receiver\n", dev->name);
- tx_pcb->command = CMD_RECEIVE_PACKET;
- tx_pcb->length = sizeof(struct Rcv_pkt);
- tx_pcb->data.rcv_pkt.buf_seg
- = tx_pcb->data.rcv_pkt.buf_ofs = 0; /* Unused */
- tx_pcb->data.rcv_pkt.buf_len = 1600;
- tx_pcb->data.rcv_pkt.timeout = 0; /* set timeout to zero */
- status = send_pcb(dev, tx_pcb);
- if (status)
- adapter->rx_active++;
- return status;
-}
-
-/******************************************************
- *
- * extract a packet from the adapter
- * this routine is only called from within the interrupt
- * service routine, so no cli/sti calls are needed
- * note that the length is always assumed to be even
- *
- ******************************************************/
-
-static void receive_packet(struct net_device *dev, int len)
-{
- int rlen;
- elp_device *adapter = netdev_priv(dev);
- void *target;
- struct sk_buff *skb;
- unsigned long flags;
-
- rlen = (len + 1) & ~1;
- skb = netdev_alloc_skb(dev, rlen + 2);
-
- if (!skb) {
- pr_warning("%s: memory squeeze, dropping packet\n", dev->name);
- target = adapter->dma_buffer;
- adapter->current_dma.target = NULL;
- /* FIXME: stats */
- return;
- }
-
- skb_reserve(skb, 2);
- target = skb_put(skb, rlen);
- if ((unsigned long)(target + rlen) >= MAX_DMA_ADDRESS) {
- adapter->current_dma.target = target;
- target = adapter->dma_buffer;
- } else {
- adapter->current_dma.target = NULL;
- }
-
- /* if this happens, we die */
- if (test_and_set_bit(0, (void *) &adapter->dmaing))
- pr_err("%s: rx blocked, DMA in progress, dir %d\n",
- dev->name, adapter->current_dma.direction);
-
- adapter->current_dma.direction = 0;
- adapter->current_dma.length = rlen;
- adapter->current_dma.skb = skb;
- adapter->current_dma.start_time = jiffies;
-
- outb_control(adapter->hcr_val | DIR | TCEN | DMAE, dev);
-
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- clear_dma_ff(dev->dma);
- set_dma_mode(dev->dma, 0x04); /* dma read */
- set_dma_addr(dev->dma, isa_virt_to_bus(target));
- set_dma_count(dev->dma, rlen);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- if (elp_debug >= 3) {
- pr_debug("%s: rx DMA transfer started\n", dev->name);
- }
-
- if (adapter->rx_active)
- adapter->rx_active--;
-
- if (!adapter->busy)
- pr_warning("%s: receive_packet called, busy not set.\n", dev->name);
-}
-
-/******************************************************
- *
- * interrupt handler
- *
- ******************************************************/
-
-static irqreturn_t elp_interrupt(int irq, void *dev_id)
-{
- int len;
- int dlen;
- int icount = 0;
- struct net_device *dev = dev_id;
- elp_device *adapter = netdev_priv(dev);
- unsigned long timeout;
-
- spin_lock(&adapter->lock);
-
- do {
- /*
- * has a DMA transfer finished?
- */
- if (inb_status(dev->base_addr) & DONE) {
- if (!adapter->dmaing)
- pr_warning("%s: phantom DMA completed\n", dev->name);
-
- if (elp_debug >= 3)
- pr_debug("%s: %s DMA complete, status %02x\n", dev->name,
- adapter->current_dma.direction ? "tx" : "rx",
- inb_status(dev->base_addr));
-
- outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev);
- if (adapter->current_dma.direction) {
- dev_kfree_skb_irq(adapter->current_dma.skb);
- } else {
- struct sk_buff *skb = adapter->current_dma.skb;
- if (skb) {
- if (adapter->current_dma.target) {
- /* have already done the skb_put() */
- memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
- }
- skb->protocol = eth_type_trans(skb,dev);
- dev->stats.rx_bytes += skb->len;
- netif_rx(skb);
- }
- }
- adapter->dmaing = 0;
- if (adapter->rx_backlog.in != adapter->rx_backlog.out) {
- int t = adapter->rx_backlog.length[adapter->rx_backlog.out];
- adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out);
- if (elp_debug >= 2)
- pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t);
- receive_packet(dev, t);
- } else {
- adapter->busy = 0;
- }
- } else {
- /* has one timed out? */
- check_3c505_dma(dev);
- }
-
- /*
- * receive a PCB from the adapter
- */
- timeout = jiffies + 3*HZ/100;
- while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) {
- if (receive_pcb(dev, &adapter->irx_pcb)) {
- switch (adapter->irx_pcb.command)
- {
- case 0:
- break;
- /*
- * received a packet - this must be handled fast
- */
- case 0xff:
- case CMD_RECEIVE_PACKET_COMPLETE:
- /* if the device isn't open, don't pass packets up the stack */
- if (!netif_running(dev))
- break;
- len = adapter->irx_pcb.data.rcv_resp.pkt_len;
- dlen = adapter->irx_pcb.data.rcv_resp.buf_len;
- if (adapter->irx_pcb.data.rcv_resp.timeout != 0) {
- pr_err("%s: interrupt - packet not received correctly\n", dev->name);
- } else {
- if (elp_debug >= 3) {
- pr_debug("%s: interrupt - packet received of length %i (%i)\n",
- dev->name, len, dlen);
- }
- if (adapter->irx_pcb.command == 0xff) {
- if (elp_debug >= 2)
- pr_debug("%s: adding packet to backlog (len = %d)\n",
- dev->name, dlen);
- adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen;
- adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in);
- } else {
- receive_packet(dev, dlen);
- }
- if (elp_debug >= 3)
- pr_debug("%s: packet received\n", dev->name);
- }
- break;
-
- /*
- * 82586 configured correctly
- */
- case CMD_CONFIGURE_82586_RESPONSE:
- adapter->got[CMD_CONFIGURE_82586] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - configure response received\n", dev->name);
- break;
-
- /*
- * Adapter memory configuration
- */
- case CMD_CONFIGURE_ADAPTER_RESPONSE:
- adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Adapter memory configuration %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
- /*
- * Multicast list loading
- */
- case CMD_LOAD_MULTICAST_RESPONSE:
- adapter->got[CMD_LOAD_MULTICAST_LIST] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Multicast address list loading %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
- /*
- * Station address setting
- */
- case CMD_SET_ADDRESS_RESPONSE:
- adapter->got[CMD_SET_STATION_ADDRESS] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: Ethernet address setting %s.\n", dev->name,
- adapter->irx_pcb.data.failed ? "failed" : "succeeded");
- break;
-
-
- /*
- * received board statistics
- */
- case CMD_NETWORK_STATISTICS_RESPONSE:
- dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
- dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
- dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
- dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
- dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
- dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
- adapter->got[CMD_NETWORK_STATISTICS] = 1;
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - statistics response received\n", dev->name);
- break;
-
- /*
- * sent a packet
- */
- case CMD_TRANSMIT_PACKET_COMPLETE:
- if (elp_debug >= 3)
- pr_debug("%s: interrupt - packet sent\n", dev->name);
- if (!netif_running(dev))
- break;
- switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
- case 0xffff:
- dev->stats.tx_aborted_errors++;
- pr_info("%s: transmit timed out, network cable problem?\n", dev->name);
- break;
- case 0xfffe:
- dev->stats.tx_fifo_errors++;
- pr_info("%s: transmit timed out, FIFO underrun\n", dev->name);
- break;
- }
- netif_wake_queue(dev);
- break;
-
- /*
- * some unknown PCB
- */
- default:
- pr_debug("%s: unknown PCB received - %2.2x\n",
- dev->name, adapter->irx_pcb.command);
- break;
- }
- } else {
- pr_warning("%s: failed to read PCB on interrupt\n", dev->name);
- adapter_reset(dev);
- }
- }
-
- } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE)));
-
- prime_rx(dev);
-
- /*
- * indicate no longer in interrupt routine
- */
- spin_unlock(&adapter->lock);
- return IRQ_HANDLED;
-}
-
-
-/******************************************************
- *
- * open the board
- *
- ******************************************************/
-
-static int elp_open(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- int retval;
-
- if (elp_debug >= 3)
- pr_debug("%s: request to open device\n", dev->name);
-
- /*
- * make sure we actually found the device
- */
- if (adapter == NULL) {
- pr_err("%s: Opening a non-existent physical device\n", dev->name);
- return -EAGAIN;
- }
- /*
- * disable interrupts on the board
- */
- outb_control(0, dev);
-
- /*
- * clear any pending interrupts
- */
- inb_command(dev->base_addr);
- adapter_reset(dev);
-
- /*
- * no receive PCBs active
- */
- adapter->rx_active = 0;
-
- adapter->busy = 0;
- adapter->send_pcb_semaphore = 0;
- adapter->rx_backlog.in = 0;
- adapter->rx_backlog.out = 0;
-
- spin_lock_init(&adapter->lock);
-
- /*
- * install our interrupt service routine
- */
- if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) {
- pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq);
- return retval;
- }
- if ((retval = request_dma(dev->dma, dev->name))) {
- free_irq(dev->irq, dev);
- pr_err("%s: could not allocate DMA%d channel\n", dev->name, dev->dma);
- return retval;
- }
- adapter->dma_buffer = (void *) dma_mem_alloc(DMA_BUFFER_SIZE);
- if (!adapter->dma_buffer) {
- pr_err("%s: could not allocate DMA buffer\n", dev->name);
- free_dma(dev->dma);
- free_irq(dev->irq, dev);
- return -ENOMEM;
- }
- adapter->dmaing = 0;
-
- /*
- * enable interrupts on the board
- */
- outb_control(CMDE, dev);
-
- /*
- * configure adapter memory: we need 10 multicast addresses, default==0
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 3c505 memory configuration command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
- adapter->tx_pcb.data.memconf.cmd_q = 10;
- adapter->tx_pcb.data.memconf.rcv_q = 20;
- adapter->tx_pcb.data.memconf.mcast = 10;
- adapter->tx_pcb.data.memconf.frame = 20;
- adapter->tx_pcb.data.memconf.rcv_b = 20;
- adapter->tx_pcb.data.memconf.progs = 0;
- adapter->tx_pcb.length = sizeof(struct Memconf);
- adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send memory configuration command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-
-
- /*
- * configure adapter to receive broadcast messages and wait for response
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 82586 configure command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_82586;
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
- adapter->tx_pcb.length = 2;
- adapter->got[CMD_CONFIGURE_82586] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send 82586 configure command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-
- /* enable burst-mode DMA */
- /* outb(0x1, dev->base_addr + PORT_AUXDMA); */
-
- /*
- * queue receive commands to provide buffering
- */
- prime_rx(dev);
- if (elp_debug >= 3)
- pr_debug("%s: %d receive PCBs active\n", dev->name, adapter->rx_active);
-
- /*
- * device is now officially open!
- */
-
- netif_start_queue(dev);
- return 0;
-}
-
-
-/******************************************************
- *
- * send a packet to the adapter
- *
- ******************************************************/
-
-static netdev_tx_t send_packet(struct net_device *dev, struct sk_buff *skb)
-{
- elp_device *adapter = netdev_priv(dev);
- unsigned long target;
- unsigned long flags;
-
- /*
- * make sure the length is even and no shorter than 60 bytes
- */
- unsigned int nlen = (((skb->len < 60) ? 60 : skb->len) + 1) & (~1);
-
- if (test_and_set_bit(0, (void *) &adapter->busy)) {
- if (elp_debug >= 2)
- pr_debug("%s: transmit blocked\n", dev->name);
- return false;
- }
-
- dev->stats.tx_bytes += nlen;
-
- /*
- * send the adapter a transmit packet command. Ignore segment and offset
- * and make sure the length is even
- */
- adapter->tx_pcb.command = CMD_TRANSMIT_PACKET;
- adapter->tx_pcb.length = sizeof(struct Xmit_pkt);
- adapter->tx_pcb.data.xmit_pkt.buf_ofs
- = adapter->tx_pcb.data.xmit_pkt.buf_seg = 0; /* Unused */
- adapter->tx_pcb.data.xmit_pkt.pkt_len = nlen;
-
- if (!send_pcb(dev, &adapter->tx_pcb)) {
- adapter->busy = 0;
- return false;
- }
- /* if this happens, we die */
- if (test_and_set_bit(0, (void *) &adapter->dmaing))
- pr_debug("%s: tx: DMA %d in progress\n", dev->name, adapter->current_dma.direction);
-
- adapter->current_dma.direction = 1;
- adapter->current_dma.start_time = jiffies;
-
- if ((unsigned long)(skb->data + nlen) >= MAX_DMA_ADDRESS || nlen != skb->len) {
- skb_copy_from_linear_data(skb, adapter->dma_buffer, nlen);
- memset(adapter->dma_buffer+skb->len, 0, nlen-skb->len);
- target = isa_virt_to_bus(adapter->dma_buffer);
- }
- else {
- target = isa_virt_to_bus(skb->data);
- }
- adapter->current_dma.skb = skb;
-
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- clear_dma_ff(dev->dma);
- set_dma_mode(dev->dma, 0x48); /* dma memory -> io */
- set_dma_addr(dev->dma, target);
- set_dma_count(dev->dma, nlen);
- outb_control(adapter->hcr_val | DMAE | TCEN, dev);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- if (elp_debug >= 3)
- pr_debug("%s: DMA transfer started\n", dev->name);
-
- return true;
-}
-
-/*
- * The upper layer thinks we timed out
- */
-
-static void elp_timeout(struct net_device *dev)
-{
- int stat;
-
- stat = inb_status(dev->base_addr);
- pr_warning("%s: transmit timed out, lost %s?\n", dev->name,
- (stat & ACRF) ? "interrupt" : "command");
- if (elp_debug >= 1)
- pr_debug("%s: status %#02x\n", dev->name, stat);
- dev->trans_start = jiffies; /* prevent tx timeout */
- dev->stats.tx_dropped++;
- netif_wake_queue(dev);
-}
-
-/******************************************************
- *
- * start the transmitter
- * return 0 if sent OK, else return 1
- *
- ******************************************************/
-
-static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- unsigned long flags;
- elp_device *adapter = netdev_priv(dev);
-
- spin_lock_irqsave(&adapter->lock, flags);
- check_3c505_dma(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request to send packet of length %d\n", dev->name, (int) skb->len);
-
- netif_stop_queue(dev);
-
- /*
- * send the packet at skb->data for skb->len
- */
- if (!send_packet(dev, skb)) {
- if (elp_debug >= 2) {
- pr_debug("%s: failed to transmit packet\n", dev->name);
- }
- spin_unlock_irqrestore(&adapter->lock, flags);
- return NETDEV_TX_BUSY;
- }
- if (elp_debug >= 3)
- pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
-
- prime_rx(dev);
- spin_unlock_irqrestore(&adapter->lock, flags);
- netif_start_queue(dev);
- return NETDEV_TX_OK;
-}
-
-/******************************************************
- *
- * return statistics on the board
- *
- ******************************************************/
-
-static struct net_device_stats *elp_get_stats(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request for stats\n", dev->name);
-
- /* If the device is closed, just return the latest stats we have,
- - we cannot ask from the adapter without interrupts */
- if (!netif_running(dev))
- return &dev->stats;
-
- /* send a get statistics command to the board */
- adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
- adapter->tx_pcb.length = 0;
- adapter->got[CMD_NETWORK_STATISTICS] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send get statistics command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- return &dev->stats;
- }
- }
-
- /* statistics are now up to date */
- return &dev->stats;
-}
-
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-/******************************************************
- *
- * close the board
- *
- ******************************************************/
-
-static int elp_close(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
-
- if (elp_debug >= 3)
- pr_debug("%s: request to close device\n", dev->name);
-
- netif_stop_queue(dev);
-
- /* Someone may request the device statistic information even when
- * the interface is closed. The following will update the statistics
- * structure in the driver, so we'll be able to give current statistics.
- */
- (void) elp_get_stats(dev);
-
- /*
- * disable interrupts on the board
- */
- outb_control(0, dev);
-
- /*
- * release the IRQ
- */
- free_irq(dev->irq, dev);
-
- free_dma(dev->dma);
- free_pages((unsigned long) adapter->dma_buffer, get_order(DMA_BUFFER_SIZE));
-
- return 0;
-}
-
-
-/************************************************************
- *
- * Set multicast list
- * num_addrs==0: clear mc_list
- * num_addrs==-1: set promiscuous mode
- * num_addrs>0: set mc_list
- *
- ************************************************************/
-
-static void elp_set_mc_list(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i;
- unsigned long flags;
-
- if (elp_debug >= 3)
- pr_debug("%s: request to set multicast list\n", dev->name);
-
- spin_lock_irqsave(&adapter->lock, flags);
-
- if (!(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
- /* send a "load multicast list" command to the board, max 10 addrs/cmd */
- /* if num_addrs==0 the list will be cleared */
- adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(adapter->tx_pcb.data.multicast[i++],
- ha->addr, 6);
- adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- pr_err("%s: couldn't send set_multicast command\n", dev->name);
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- while (adapter->got[CMD_LOAD_MULTICAST_LIST] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout)) {
- TIMEOUT_MSG(__LINE__);
- }
- }
- if (!netdev_mc_empty(dev))
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
- else /* num_addrs == 0 */
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
- } else
- adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_PROMISC;
- /*
- * configure adapter to receive messages (as specified above)
- * and wait for response
- */
- if (elp_debug >= 3)
- pr_debug("%s: sending 82586 configure command\n", dev->name);
- adapter->tx_pcb.command = CMD_CONFIGURE_82586;
- adapter->tx_pcb.length = 2;
- adapter->got[CMD_CONFIGURE_82586] = 0;
- if (!send_pcb(dev, &adapter->tx_pcb))
- {
- spin_unlock_irqrestore(&adapter->lock, flags);
- pr_err("%s: couldn't send 82586 configure command\n", dev->name);
- }
- else {
- unsigned long timeout = jiffies + TIMEOUT;
- spin_unlock_irqrestore(&adapter->lock, flags);
- while (adapter->got[CMD_CONFIGURE_82586] == 0 && time_before(jiffies, timeout));
- if (time_after_eq(jiffies, timeout))
- TIMEOUT_MSG(__LINE__);
- }
-}
-
-/************************************************************
- *
- * A couple of tests to see if there's 3C505 or not
- * Called only by elp_autodetect
- ************************************************************/
-
-static int __init elp_sense(struct net_device *dev)
-{
- int addr = dev->base_addr;
- const char *name = dev->name;
- byte orig_HSR;
-
- if (!request_region(addr, ELP_IO_EXTENT, "3c505"))
- return -ENODEV;
-
- orig_HSR = inb_status(addr);
-
- if (elp_debug > 0)
- pr_debug(search_msg, name, addr);
-
- if (orig_HSR == 0xff) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 1);
- goto out;
- }
-
- /* Wait for a while; the adapter may still be booting up */
- if (elp_debug > 0)
- pr_cont(stilllooking_msg);
-
- if (orig_HSR & DIR) {
- /* If HCR.DIR is up, we pull it down. HSR.DIR should follow. */
- outb(0, dev->base_addr + PORT_CONTROL);
- msleep(300);
- if (inb_status(addr) & DIR) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 2);
- goto out;
- }
- } else {
- /* If HCR.DIR is down, we pull it up. HSR.DIR should follow. */
- outb(DIR, dev->base_addr + PORT_CONTROL);
- msleep(300);
- if (!(inb_status(addr) & DIR)) {
- if (elp_debug > 0)
- pr_cont(notfound_msg, 3);
- goto out;
- }
- }
- /*
- * It certainly looks like a 3c505.
- */
- if (elp_debug > 0)
- pr_cont(found_msg);
-
- return 0;
-out:
- release_region(addr, ELP_IO_EXTENT);
- return -ENODEV;
-}
-
-/*************************************************************
- *
- * Search through addr_list[] and try to find a 3C505
- * Called only by eplus_probe
- *************************************************************/
-
-static int __init elp_autodetect(struct net_device *dev)
-{
- int idx = 0;
-
- /* if base address set, then only check that address
- otherwise, run through the table */
- if (dev->base_addr != 0) { /* dev->base_addr == 0 ==> plain autodetect */
- if (elp_sense(dev) == 0)
- return dev->base_addr;
- } else
- while ((dev->base_addr = addr_list[idx++])) {
- if (elp_sense(dev) == 0)
- return dev->base_addr;
- }
-
- /* could not find an adapter */
- if (elp_debug > 0)
- pr_debug(couldnot_msg, dev->name);
-
- return 0; /* Because of this, the layer above will return -ENODEV */
-}
-
-static const struct net_device_ops elp_netdev_ops = {
- .ndo_open = elp_open,
- .ndo_stop = elp_close,
- .ndo_get_stats = elp_get_stats,
- .ndo_start_xmit = elp_start_xmit,
- .ndo_tx_timeout = elp_timeout,
- .ndo_set_rx_mode = elp_set_mc_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/******************************************************
- *
- * probe for an Etherlink Plus board at the specified address
- *
- ******************************************************/
-
-/* There are three situations we need to be able to detect here:
-
- * a) the card is idle
- * b) the card is still booting up
- * c) the card is stuck in a strange state (some DOS drivers do this)
- *
- * In case (a), all is well. In case (b), we wait 10 seconds to see if the
- * card finishes booting, and carry on if so. In case (c), we do a hard reset,
- * loop round, and hope for the best.
- *
- * This is all very unpleasant, but hopefully avoids the problems with the old
- * probe code (which had a 15-second delay if the card was idle, and didn't
- * work at all if it was in a weird state).
- */
-
-static int __init elplus_setup(struct net_device *dev)
-{
- elp_device *adapter = netdev_priv(dev);
- int i, tries, tries1, okay;
- unsigned long timeout;
- unsigned long cookie = 0;
- int err = -ENODEV;
-
- /*
- * setup adapter structure
- */
-
- dev->base_addr = elp_autodetect(dev);
- if (!dev->base_addr)
- return -ENODEV;
-
- adapter->send_pcb_semaphore = 0;
-
- for (tries1 = 0; tries1 < 3; tries1++) {
- outb_control((adapter->hcr_val | CMDE) & ~DIR, dev);
- /* First try to write just one byte, to see if the card is
- * responding at all normally.
- */
- timeout = jiffies + 5*HZ/100;
- okay = 0;
- while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
- if ((inb_status(dev->base_addr) & HCRE)) {
- outb_command(0, dev->base_addr); /* send a spurious byte */
- timeout = jiffies + 5*HZ/100;
- while (time_before(jiffies, timeout) && !(inb_status(dev->base_addr) & HCRE));
- if (inb_status(dev->base_addr) & HCRE)
- okay = 1;
- }
- if (!okay) {
- /* Nope, it's ignoring the command register. This means that
- * either it's still booting up, or it's died.
- */
- pr_err("%s: command register wouldn't drain, ", dev->name);
- if ((inb_status(dev->base_addr) & 7) == 3) {
- /* If the adapter status is 3, it *could* still be booting.
- * Give it the benefit of the doubt for 10 seconds.
- */
- pr_cont("assuming 3c505 still starting\n");
- timeout = jiffies + 10*HZ;
- while (time_before(jiffies, timeout) && (inb_status(dev->base_addr) & 7));
- if (inb_status(dev->base_addr) & 7) {
- pr_err("%s: 3c505 failed to start\n", dev->name);
- } else {
- okay = 1; /* It started */
- }
- } else {
- /* Otherwise, it must just be in a strange
- * state. We probably need to kick it.
- */
- pr_cont("3c505 is sulking\n");
- }
- }
- for (tries = 0; tries < 5 && okay; tries++) {
-
- /*
- * Try to set the Ethernet address, to make sure that the board
- * is working.
- */
- adapter->tx_pcb.command = CMD_STATION_ADDRESS;
- adapter->tx_pcb.length = 0;
- cookie = probe_irq_on();
- if (!send_pcb(dev, &adapter->tx_pcb)) {
- pr_err("%s: could not send first PCB\n", dev->name);
- probe_irq_off(cookie);
- continue;
- }
- if (!receive_pcb(dev, &adapter->rx_pcb)) {
- pr_err("%s: could not read first PCB\n", dev->name);
- probe_irq_off(cookie);
- continue;
- }
- if ((adapter->rx_pcb.command != CMD_ADDRESS_RESPONSE) ||
- (adapter->rx_pcb.length != 6)) {
- pr_err("%s: first PCB wrong (%d, %d)\n", dev->name,
- adapter->rx_pcb.command, adapter->rx_pcb.length);
- probe_irq_off(cookie);
- continue;
- }
- goto okay;
- }
- /* It's broken. Do a hard reset to re-initialise the board,
- * and try again.
- */
- pr_info("%s: resetting adapter\n", dev->name);
- outb_control(adapter->hcr_val | FLSH | ATTN, dev);
- outb_control(adapter->hcr_val & ~(FLSH | ATTN), dev);
- }
- pr_err("%s: failed to initialise 3c505\n", dev->name);
- goto out;
-
- okay:
- if (dev->irq) { /* Is there a preset IRQ? */
- int rpt = probe_irq_off(cookie);
- if (dev->irq != rpt) {
- pr_warning("%s: warning, irq %d configured but %d detected\n", dev->name, dev->irq, rpt);
- }
- /* if dev->irq == probe_irq_off(cookie), all is well */
- } else /* No preset IRQ; just use what we can detect */
- dev->irq = probe_irq_off(cookie);
- switch (dev->irq) { /* Legal, sane? */
- case 0:
- pr_err("%s: IRQ probe failed: check 3c505 jumpers.\n",
- dev->name);
- goto out;
- case 1:
- case 6:
- case 8:
- case 13:
- pr_err("%s: Impossible IRQ %d reported by probe_irq_off().\n",
- dev->name, dev->irq);
- goto out;
- }
- /*
- * Now we have the IRQ number so we can disable the interrupts from
- * the board until the board is opened.
- */
- outb_control(adapter->hcr_val & ~CMDE, dev);
-
- /*
- * copy Ethernet address into structure
- */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = adapter->rx_pcb.data.eth_addr[i];
-
- /* find a DMA channel */
- if (!dev->dma) {
- if (dev->mem_start) {
- dev->dma = dev->mem_start & 7;
- }
- else {
- pr_warning("%s: warning, DMA channel not specified, using default\n", dev->name);
- dev->dma = ELP_DMA;
- }
- }
-
- /*
- * print remainder of startup message
- */
- pr_info("%s: 3c505 at %#lx, irq %d, dma %d, addr %pM, ",
- dev->name, dev->base_addr, dev->irq, dev->dma, dev->dev_addr);
- /*
- * read more information from the adapter
- */
-
- adapter->tx_pcb.command = CMD_ADAPTER_INFO;
- adapter->tx_pcb.length = 0;
- if (!send_pcb(dev, &adapter->tx_pcb) ||
- !receive_pcb(dev, &adapter->rx_pcb) ||
- (adapter->rx_pcb.command != CMD_ADAPTER_INFO_RESPONSE) ||
- (adapter->rx_pcb.length != 10)) {
- pr_cont("not responding to second PCB\n");
- }
- pr_cont("rev %d.%d, %dk\n", adapter->rx_pcb.data.info.major_vers,
- adapter->rx_pcb.data.info.minor_vers, adapter->rx_pcb.data.info.RAM_sz);
-
- /*
- * reconfigure the adapter memory to better suit our purposes
- */
- adapter->tx_pcb.command = CMD_CONFIGURE_ADAPTER_MEMORY;
- adapter->tx_pcb.length = 12;
- adapter->tx_pcb.data.memconf.cmd_q = 8;
- adapter->tx_pcb.data.memconf.rcv_q = 8;
- adapter->tx_pcb.data.memconf.mcast = 10;
- adapter->tx_pcb.data.memconf.frame = 10;
- adapter->tx_pcb.data.memconf.rcv_b = 10;
- adapter->tx_pcb.data.memconf.progs = 0;
- if (!send_pcb(dev, &adapter->tx_pcb) ||
- !receive_pcb(dev, &adapter->rx_pcb) ||
- (adapter->rx_pcb.command != CMD_CONFIGURE_ADAPTER_RESPONSE) ||
- (adapter->rx_pcb.length != 2)) {
- pr_err("%s: could not configure adapter memory\n", dev->name);
- }
- if (adapter->rx_pcb.data.configure) {
- pr_err("%s: adapter configuration failed\n", dev->name);
- }
-
- dev->netdev_ops = &elp_netdev_ops;
- dev->watchdog_timeo = 10*HZ;
- dev->ethtool_ops = &netdev_ethtool_ops; /* local */
-
- dev->mem_start = dev->mem_end = 0;
-
- err = register_netdev(dev);
- if (err)
- goto out;
-
- return 0;
-out:
- release_region(dev->base_addr, ELP_IO_EXTENT);
- return err;
-}
-
-#ifndef MODULE
-struct net_device * __init elplus_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(elp_device));
- int err;
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = elplus_setup(dev);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
- return dev;
-}
-
-#else
-static struct net_device *dev_3c505[ELP_MAX_CARDS];
-static int io[ELP_MAX_CARDS];
-static int irq[ELP_MAX_CARDS];
-static int dma[ELP_MAX_CARDS];
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(dma, int, NULL, 0);
-MODULE_PARM_DESC(io, "EtherLink Plus I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherLink Plus IRQ number(s) (assigned)");
-MODULE_PARM_DESC(dma, "EtherLink Plus DMA channel(s)");
-
-int __init init_module(void)
-{
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
- struct net_device *dev = alloc_etherdev(sizeof(elp_device));
- if (!dev)
- break;
-
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (dma[this_dev]) {
- dev->dma = dma[this_dev];
- } else {
- dev->dma = ELP_DMA;
- pr_warning("3c505.c: warning, using default DMA channel,\n");
- }
- if (io[this_dev] == 0) {
- if (this_dev) {
- free_netdev(dev);
- break;
- }
- pr_notice("3c505.c: module autoprobe not recommended, give io=xx.\n");
- }
- if (elplus_setup(dev) != 0) {
- pr_warning("3c505.c: Failed to register card at 0x%x.\n", io[this_dev]);
- free_netdev(dev);
- break;
- }
- dev_3c505[this_dev] = dev;
- found++;
- }
- if (!found)
- return -ENODEV;
- return 0;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < ELP_MAX_CARDS; this_dev++) {
- struct net_device *dev = dev_3c505[this_dev];
- if (dev) {
- unregister_netdev(dev);
- release_region(dev->base_addr, ELP_IO_EXTENT);
- free_netdev(dev);
- }
- }
-}
-
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/3c505.h b/drivers/net/ethernet/i825xx/3c505.h
deleted file mode 100644
index 04df2a9002b..00000000000
--- a/drivers/net/ethernet/i825xx/3c505.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/*****************************************************************
- *
- * defines for 3Com Etherlink Plus adapter
- *
- *****************************************************************/
-
-#define ELP_DMA 6
-#define ELP_RX_PCBS 4
-#define ELP_MAX_CARDS 4
-
-/*
- * I/O register offsets
- */
-#define PORT_COMMAND 0x00 /* read/write, 8-bit */
-#define PORT_STATUS 0x02 /* read only, 8-bit */
-#define PORT_AUXDMA 0x02 /* write only, 8-bit */
-#define PORT_DATA 0x04 /* read/write, 16-bit */
-#define PORT_CONTROL 0x06 /* read/write, 8-bit */
-
-#define ELP_IO_EXTENT 0x10 /* size of used IO registers */
-
-/*
- * host control registers bits
- */
-#define ATTN 0x80 /* attention */
-#define FLSH 0x40 /* flush data register */
-#define DMAE 0x20 /* DMA enable */
-#define DIR 0x10 /* direction */
-#define TCEN 0x08 /* terminal count interrupt enable */
-#define CMDE 0x04 /* command register interrupt enable */
-#define HSF2 0x02 /* host status flag 2 */
-#define HSF1 0x01 /* host status flag 1 */
-
-/*
- * combinations of HSF flags used for PCB transmission
- */
-#define HSF_PCB_ACK HSF1
-#define HSF_PCB_NAK HSF2
-#define HSF_PCB_END (HSF2|HSF1)
-#define HSF_PCB_MASK (HSF2|HSF1)
-
-/*
- * host status register bits
- */
-#define HRDY 0x80 /* data register ready */
-#define HCRE 0x40 /* command register empty */
-#define ACRF 0x20 /* adapter command register full */
-/* #define DIR 0x10 direction - same as in control register */
-#define DONE 0x08 /* DMA done */
-#define ASF3 0x04 /* adapter status flag 3 */
-#define ASF2 0x02 /* adapter status flag 2 */
-#define ASF1 0x01 /* adapter status flag 1 */
-
-/*
- * combinations of ASF flags used for PCB reception
- */
-#define ASF_PCB_ACK ASF1
-#define ASF_PCB_NAK ASF2
-#define ASF_PCB_END (ASF2|ASF1)
-#define ASF_PCB_MASK (ASF2|ASF1)
-
-/*
- * host aux DMA register bits
- */
-#define DMA_BRST 0x01 /* DMA burst */
-
-/*
- * maximum amount of data allowed in a PCB
- */
-#define MAX_PCB_DATA 62
-
-/*****************************************************************
- *
- * timeout value
- * this is a rough value used for loops to stop them from
- * locking up the whole machine in the case of failure or
- * error conditions
- *
- *****************************************************************/
-
-#define TIMEOUT 300
-
-/*****************************************************************
- *
- * PCB commands
- *
- *****************************************************************/
-
-enum {
- /*
- * host PCB commands
- */
- CMD_CONFIGURE_ADAPTER_MEMORY = 0x01,
- CMD_CONFIGURE_82586 = 0x02,
- CMD_STATION_ADDRESS = 0x03,
- CMD_DMA_DOWNLOAD = 0x04,
- CMD_DMA_UPLOAD = 0x05,
- CMD_PIO_DOWNLOAD = 0x06,
- CMD_PIO_UPLOAD = 0x07,
- CMD_RECEIVE_PACKET = 0x08,
- CMD_TRANSMIT_PACKET = 0x09,
- CMD_NETWORK_STATISTICS = 0x0a,
- CMD_LOAD_MULTICAST_LIST = 0x0b,
- CMD_CLEAR_PROGRAM = 0x0c,
- CMD_DOWNLOAD_PROGRAM = 0x0d,
- CMD_EXECUTE_PROGRAM = 0x0e,
- CMD_SELF_TEST = 0x0f,
- CMD_SET_STATION_ADDRESS = 0x10,
- CMD_ADAPTER_INFO = 0x11,
- NUM_TRANSMIT_CMDS,
-
- /*
- * adapter PCB commands
- */
- CMD_CONFIGURE_ADAPTER_RESPONSE = 0x31,
- CMD_CONFIGURE_82586_RESPONSE = 0x32,
- CMD_ADDRESS_RESPONSE = 0x33,
- CMD_DOWNLOAD_DATA_REQUEST = 0x34,
- CMD_UPLOAD_DATA_REQUEST = 0x35,
- CMD_RECEIVE_PACKET_COMPLETE = 0x38,
- CMD_TRANSMIT_PACKET_COMPLETE = 0x39,
- CMD_NETWORK_STATISTICS_RESPONSE = 0x3a,
- CMD_LOAD_MULTICAST_RESPONSE = 0x3b,
- CMD_CLEAR_PROGRAM_RESPONSE = 0x3c,
- CMD_DOWNLOAD_PROGRAM_RESPONSE = 0x3d,
- CMD_EXECUTE_RESPONSE = 0x3e,
- CMD_SELF_TEST_RESPONSE = 0x3f,
- CMD_SET_ADDRESS_RESPONSE = 0x40,
- CMD_ADAPTER_INFO_RESPONSE = 0x41
-};
-
-/* Definitions for the PCB data structure */
-
-/* Data units */
-typedef unsigned char byte;
-typedef unsigned short int word;
-typedef unsigned long int dword;
-
-/* Data structures */
-struct Memconf {
- word cmd_q,
- rcv_q,
- mcast,
- frame,
- rcv_b,
- progs;
-};
-
-struct Rcv_pkt {
- word buf_ofs,
- buf_seg,
- buf_len,
- timeout;
-};
-
-struct Xmit_pkt {
- word buf_ofs,
- buf_seg,
- pkt_len;
-};
-
-struct Rcv_resp {
- word buf_ofs,
- buf_seg,
- buf_len,
- pkt_len,
- timeout,
- status;
- dword timetag;
-};
-
-struct Xmit_resp {
- word buf_ofs,
- buf_seg,
- c_stat,
- status;
-};
-
-
-struct Netstat {
- dword tot_recv,
- tot_xmit;
- word err_CRC,
- err_align,
- err_res,
- err_ovrrun;
-};
-
-
-struct Selftest {
- word error;
- union {
- word ROM_cksum;
- struct {
- word ofs, seg;
- } RAM;
- word i82586;
- } failure;
-};
-
-struct Info {
- byte minor_vers,
- major_vers;
- word ROM_cksum,
- RAM_sz,
- free_ofs,
- free_seg;
-};
-
-struct Memdump {
- word size,
- off,
- seg;
-};
-
-/*
-Primary Command Block. The most important data structure. All communication
-between the host and the adapter is done with these. (Except for the actual
-Ethernet data, which has different packaging.)
-*/
-typedef struct {
- byte command;
- byte length;
- union {
- struct Memconf memconf;
- word configure;
- struct Rcv_pkt rcv_pkt;
- struct Xmit_pkt xmit_pkt;
- byte multicast[10][6];
- byte eth_addr[6];
- byte failed;
- struct Rcv_resp rcv_resp;
- struct Xmit_resp xmit_resp;
- struct Netstat netstat;
- struct Selftest selftest;
- struct Info info;
- struct Memdump memdump;
- byte raw[62];
- } data;
-} pcb_struct;
-
-/* These defines for 'configure' */
-#define RECV_STATION 0x00
-#define RECV_BROAD 0x01
-#define RECV_MULTI 0x02
-#define RECV_PROMISC 0x04
-#define NO_LOOPBACK 0x00
-#define INT_LOOPBACK 0x08
-#define EXT_LOOPBACK 0x10
-
-/*****************************************************************
- *
- * structure to hold context information for adapter
- *
- *****************************************************************/
-
-#define DMA_BUFFER_SIZE 1600
-#define BACKLOG_SIZE 4
-
-typedef struct {
- volatile short got[NUM_TRANSMIT_CMDS]; /* flags for
- command completion */
- pcb_struct tx_pcb; /* PCB for foreground sending */
- pcb_struct rx_pcb; /* PCB for foreground receiving */
- pcb_struct itx_pcb; /* PCB for background sending */
- pcb_struct irx_pcb; /* PCB for background receiving */
-
- void *dma_buffer;
-
- struct {
- unsigned int length[BACKLOG_SIZE];
- unsigned int in;
- unsigned int out;
- } rx_backlog;
-
- struct {
- unsigned int direction;
- unsigned int length;
- struct sk_buff *skb;
- void *target;
- unsigned long start_time;
- } current_dma;
-
- /* flags */
- unsigned long send_pcb_semaphore;
- unsigned long dmaing;
- unsigned long busy;
-
- unsigned int rx_active; /* number of receive PCBs */
- volatile unsigned char hcr_val; /* what we think the HCR contains */
- spinlock_t lock; /* Interrupt v tx lock */
-} elp_device;
diff --git a/drivers/net/ethernet/i825xx/3c507.c b/drivers/net/ethernet/i825xx/3c507.c
deleted file mode 100644
index e8984b05990..00000000000
--- a/drivers/net/ethernet/i825xx/3c507.c
+++ /dev/null
@@ -1,938 +0,0 @@
-/* 3c507.c: An EtherLink16 device driver for Linux. */
-/*
- Written 1993,1994 by Donald Becker.
-
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
-
- Thanks go to jennings@Montrouge.SMR.slb.com ( Patrick Jennings)
- and jrs@world.std.com (Rick Sladkey) for testing and bugfixes.
- Mark Salazar <leslie@access.digex.net> made the changes for cards with
- only 16K packet buffers.
-
- Things remaining to do:
- Verify that the tx and rx buffers don't have fencepost errors.
- Move the theory of operation and memory map documentation.
- The statistics need to be updated correctly.
-*/
-
-#define DRV_NAME "3c507"
-#define DRV_VERSION "1.10a"
-#define DRV_RELDATE "11/17/2001"
-
-static const char version[] =
- DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Donald Becker (becker@scyld.com)\n";
-
-/*
- Sources:
- This driver wouldn't have been written with the availability of the
- Crynwr driver source code. It provided a known-working implementation
- that filled in the gaping holes of the Intel documentation. Three cheers
- for Russ Nelson.
-
- Intel Microcommunications Databook, Vol. 1, 1990. It provides just enough
- info that the casual reader might think that it documents the i82586 :-<.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/ethtool.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-/* use 0 for production, 1 for verification, 2..7 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-#define debug net_debug
-
-
-/*
- Details of the i82586.
-
- You'll really need the databook to understand the details of this part,
- but the outline is that the i82586 has two separate processing units.
- Both are started from a list of three configuration tables, of which only
- the last, the System Control Block (SCB), is used after reset-time. The SCB
- has the following fields:
- Status word
- Command word
- Tx/Command block addr.
- Rx block addr.
- The command word accepts the following controls for the Tx and Rx units:
- */
-
-#define CUC_START 0x0100
-#define CUC_RESUME 0x0200
-#define CUC_SUSPEND 0x0300
-#define RX_START 0x0010
-#define RX_RESUME 0x0020
-#define RX_SUSPEND 0x0030
-
-/* The Rx unit uses a list of frame descriptors and a list of data buffer
- descriptors. We use full-sized (1518 byte) data buffers, so there is
- a one-to-one pairing of frame descriptors to buffer descriptors.
-
- The Tx ("command") unit executes a list of commands that look like:
- Status word Written by the 82586 when the command is done.
- Command word Command in lower 3 bits, post-command action in upper 3
- Link word The address of the next command.
- Parameters (as needed).
-
- Some definitions related to the Command Word are:
- */
-#define CMD_EOL 0x8000 /* The last command of the list, stop. */
-#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
-#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
-
-enum commands {
- CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
- CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7};
-
-/* Information that need to be kept for each board. */
-struct net_local {
- int last_restart;
- ushort rx_head;
- ushort rx_tail;
- ushort tx_head;
- ushort tx_cmd_link;
- ushort tx_reap;
- ushort tx_pkts_in_ring;
- spinlock_t lock;
- void __iomem *base;
-};
-
-/*
- Details of the EtherLink16 Implementation
- The 3c507 is a generic shared-memory i82586 implementation.
- The host can map 16K, 32K, 48K, or 64K of the 64K memory into
- 0x0[CD][08]0000, or all 64K into 0xF[02468]0000.
- */
-
-/* Offsets from the base I/O address. */
-#define SA_DATA 0 /* Station address data, or 3Com signature. */
-#define MISC_CTRL 6 /* Switch the SA_DATA banks, and bus config bits. */
-#define RESET_IRQ 10 /* Reset the latched IRQ line. */
-#define SIGNAL_CA 11 /* Frob the 82586 Channel Attention line. */
-#define ROM_CONFIG 13
-#define MEM_CONFIG 14
-#define IRQ_CONFIG 15
-#define EL16_IO_EXTENT 16
-
-/* The ID port is used at boot-time to locate the ethercard. */
-#define ID_PORT 0x100
-
-/* Offsets to registers in the mailbox (SCB). */
-#define iSCB_STATUS 0x8
-#define iSCB_CMD 0xA
-#define iSCB_CBL 0xC /* Command BLock offset. */
-#define iSCB_RFA 0xE /* Rx Frame Area offset. */
-
-/* Since the 3c507 maps the shared memory window so that the last byte is
- at 82586 address FFFF, the first byte is at 82586 address 0, 16K, 32K, or
- 48K corresponding to window sizes of 64K, 48K, 32K and 16K respectively.
- We can account for this be setting the 'SBC Base' entry in the ISCP table
- below for all the 16 bit offset addresses, and also adding the 'SCB Base'
- value to all 24 bit physical addresses (in the SCP table and the TX and RX
- Buffer Descriptors).
- -Mark
- */
-#define SCB_BASE ((unsigned)64*1024 - (dev->mem_end - dev->mem_start))
-
-/*
- What follows in 'init_words[]' is the "program" that is downloaded to the
- 82586 memory. It's mostly tables and command blocks, and starts at the
- reset address 0xfffff6. This is designed to be similar to the EtherExpress,
- thus the unusual location of the SCB at 0x0008.
-
- Even with the additional "don't care" values, doing it this way takes less
- program space than initializing the individual tables, and I feel it's much
- cleaner.
-
- The databook is particularly useless for the first two structures, I had
- to use the Crynwr driver as an example.
-
- The memory setup is as follows:
- */
-
-#define CONFIG_CMD 0x0018
-#define SET_SA_CMD 0x0024
-#define SA_OFFSET 0x002A
-#define IDLELOOP 0x30
-#define TDR_CMD 0x38
-#define TDR_TIME 0x3C
-#define DUMP_CMD 0x40
-#define DIAG_CMD 0x48
-#define SET_MC_CMD 0x4E
-#define DUMP_DATA 0x56 /* A 170 byte buffer for dump and Set-MC into. */
-
-#define TX_BUF_START 0x0100
-#define NUM_TX_BUFS 5
-#define TX_BUF_SIZE (1518+14+20+16) /* packet+header+TBD */
-
-#define RX_BUF_START 0x2000
-#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
-#define RX_BUF_END (dev->mem_end - dev->mem_start)
-
-#define TX_TIMEOUT (HZ/20)
-
-/*
- That's it: only 86 bytes to set up the beast, including every extra
- command available. The 170 byte buffer at DUMP_DATA is shared between the
- Dump command (called only by the diagnostic program) and the SetMulticastList
- command.
-
- To complete the memory setup you only have to write the station address at
- SA_OFFSET and create the Tx & Rx buffer lists.
-
- The Tx command chain and buffer list is setup as follows:
- A Tx command table, with the data buffer pointing to...
- A Tx data buffer descriptor. The packet is in a single buffer, rather than
- chaining together several smaller buffers.
- A NoOp command, which initially points to itself,
- And the packet data.
-
- A transmit is done by filling in the Tx command table and data buffer,
- re-writing the NoOp command, and finally changing the offset of the last
- command to point to the current Tx command. When the Tx command is finished,
- it jumps to the NoOp, when it loops until the next Tx command changes the
- "link offset" in the NoOp. This way the 82586 never has to go through the
- slow restart sequence.
-
- The Rx buffer list is set up in the obvious ring structure. We have enough
- memory (and low enough interrupt latency) that we can avoid the complicated
- Rx buffer linked lists by alway associating a full-size Rx data buffer with
- each Rx data frame.
-
- I current use four transmit buffers starting at TX_BUF_START (0x0100), and
- use the rest of memory, from RX_BUF_START to RX_BUF_END, for Rx buffers.
-
- */
-
-static unsigned short init_words[] = {
- /* System Configuration Pointer (SCP). */
- 0x0000, /* Set bus size to 16 bits. */
- 0,0, /* pad words. */
- 0x0000,0x0000, /* ISCP phys addr, set in init_82586_mem(). */
-
- /* Intermediate System Configuration Pointer (ISCP). */
- 0x0001, /* Status word that's cleared when init is done. */
- 0x0008,0,0, /* SCB offset, (skip, skip) */
-
- /* System Control Block (SCB). */
- 0,0xf000|RX_START|CUC_START, /* SCB status and cmd. */
- CONFIG_CMD, /* Command list pointer, points to Configure. */
- RX_BUF_START, /* Rx block list. */
- 0,0,0,0, /* Error count: CRC, align, buffer, overrun. */
-
- /* 0x0018: Configure command. Change to put MAC data with packet. */
- 0, CmdConfigure, /* Status, command. */
- SET_SA_CMD, /* Next command is Set Station Addr. */
- 0x0804, /* "4" bytes of config data, 8 byte FIFO. */
- 0x2e40, /* Magic values, including MAC data location. */
- 0, /* Unused pad word. */
-
- /* 0x0024: Setup station address command. */
- 0, CmdSASetup,
- SET_MC_CMD, /* Next command. */
- 0xaa00,0xb000,0x0bad, /* Station address (to be filled in) */
-
- /* 0x0030: NOP, looping back to itself. Point to first Tx buffer to Tx. */
- 0, CmdNOp, IDLELOOP, 0 /* pad */,
-
- /* 0x0038: A unused Time-Domain Reflectometer command. */
- 0, CmdTDR, IDLELOOP, 0,
-
- /* 0x0040: An unused Dump State command. */
- 0, CmdDump, IDLELOOP, DUMP_DATA,
-
- /* 0x0048: An unused Diagnose command. */
- 0, CmdDiagnose, IDLELOOP,
-
- /* 0x004E: An empty set-multicast-list command. */
- 0, CmdMulticastList, IDLELOOP, 0,
-};
-
-/* Index to functions, as function prototypes. */
-
-static int el16_probe1(struct net_device *dev, int ioaddr);
-static int el16_open(struct net_device *dev);
-static netdev_tx_t el16_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t el16_interrupt(int irq, void *dev_id);
-static void el16_rx(struct net_device *dev);
-static int el16_close(struct net_device *dev);
-static void el16_tx_timeout (struct net_device *dev);
-
-static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad);
-static void init_82586_mem(struct net_device *dev);
-static const struct ethtool_ops netdev_ethtool_ops;
-static void init_rx_bufs(struct net_device *);
-
-static int io = 0x300;
-static int irq;
-static int mem_start;
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, (detachable devices only) allocate space for the
- device and return success.
- */
-
-struct net_device * __init el16_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
- const unsigned *port;
- int err = -ENODEV;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- mem_start = dev->mem_start & 15;
- }
-
- if (io > 0x1ff) /* Check a single specified location. */
- err = el16_probe1(dev, io);
- else if (io != 0)
- err = -ENXIO; /* Don't probe at all. */
- else {
- for (port = ports; *port; port++) {
- err = el16_probe1(dev, *port);
- if (!err)
- break;
- }
- }
-
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- free_irq(dev->irq, dev);
- iounmap(((struct net_local *)netdev_priv(dev))->base);
- release_region(dev->base_addr, EL16_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = el16_open,
- .ndo_stop = el16_close,
- .ndo_start_xmit = el16_send_packet,
- .ndo_tx_timeout = el16_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init el16_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned char init_ID_done;
- int i, irq, irqval, retval;
- struct net_local *lp;
-
- if (init_ID_done == 0) {
- ushort lrs_state = 0xff;
- /* Send the ID sequence to the ID_PORT to enable the board(s). */
- outb(0x00, ID_PORT);
- for(i = 0; i < 255; i++) {
- outb(lrs_state, ID_PORT);
- lrs_state <<= 1;
- if (lrs_state & 0x100)
- lrs_state ^= 0xe7;
- }
- outb(0x00, ID_PORT);
- init_ID_done = 1;
- }
-
- if (!request_region(ioaddr, EL16_IO_EXTENT, DRV_NAME))
- return -ENODEV;
-
- if ((inb(ioaddr) != '*') || (inb(ioaddr + 1) != '3') ||
- (inb(ioaddr + 2) != 'C') || (inb(ioaddr + 3) != 'O')) {
- retval = -ENODEV;
- goto out;
- }
-
- pr_info("%s: 3c507 at %#x,", dev->name, ioaddr);
-
- /* We should make a few more checks here, like the first three octets of
- the S.A. for the manufacturer's code. */
-
- irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
-
- irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev);
- if (irqval) {
- pr_cont("\n");
- pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
- retval = -EAGAIN;
- goto out;
- }
-
- /* We've committed to using the board, and can start filling in *dev. */
- dev->base_addr = ioaddr;
-
- outb(0x01, ioaddr + MISC_CTRL);
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
- pr_cont(" %pM", dev->dev_addr);
-
- if (mem_start)
- net_debug = mem_start & 7;
-
-#ifdef MEM_BASE
- dev->mem_start = MEM_BASE;
- dev->mem_end = dev->mem_start + 0x10000;
-#else
- {
- int base;
- int size;
- char mem_config = inb(ioaddr + MEM_CONFIG);
- if (mem_config & 0x20) {
- size = 64*1024;
- base = 0xf00000 + (mem_config & 0x08 ? 0x080000
- : ((mem_config & 3) << 17));
- } else {
- size = ((mem_config & 3) + 1) << 14;
- base = 0x0c0000 + ( (mem_config & 0x18) << 12);
- }
- dev->mem_start = base;
- dev->mem_end = base + size;
- }
-#endif
-
- dev->if_port = (inb(ioaddr + ROM_CONFIG) & 0x80) ? 1 : 0;
- dev->irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
-
- pr_cont(", IRQ %d, %sternal xcvr, memory %#lx-%#lx.\n", dev->irq,
- dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
-
- if (net_debug)
- pr_debug("%s", version);
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->lock);
- lp->base = ioremap(dev->mem_start, RX_BUF_END);
- if (!lp->base) {
- pr_err("3c507: unable to remap memory\n");
- retval = -EAGAIN;
- goto out1;
- }
-
- dev->netdev_ops = &netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->ethtool_ops = &netdev_ethtool_ops;
- dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
- return 0;
-out1:
- free_irq(dev->irq, dev);
-out:
- release_region(ioaddr, EL16_IO_EXTENT);
- return retval;
-}
-
-static int el16_open(struct net_device *dev)
-{
- /* Initialize the 82586 memory and start it. */
- init_82586_mem(dev);
-
- netif_start_queue(dev);
- return 0;
-}
-
-
-static void el16_tx_timeout (struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- if (net_debug > 1)
- pr_debug("%s: transmit timed out, %s? ", dev->name,
- readw(shmem + iSCB_STATUS) & 0x8000 ? "IRQ conflict" :
- "network cable problem");
- /* Try to restart the adaptor. */
- if (lp->last_restart == dev->stats.tx_packets) {
- if (net_debug > 1)
- pr_cont("Resetting board.\n");
- /* Completely reset the adaptor. */
- init_82586_mem (dev);
- lp->tx_pkts_in_ring = 0;
- } else {
- /* Issue the channel attention signal and hope it "gets better". */
- if (net_debug > 1)
- pr_cont("Kicking board.\n");
- writew(0xf000 | CUC_START | RX_START, shmem + iSCB_CMD);
- outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
- lp->last_restart = dev->stats.tx_packets;
- }
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue (dev);
-}
-
-
-static netdev_tx_t el16_send_packet (struct sk_buff *skb,
- struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned char *buf = skb->data;
-
- netif_stop_queue (dev);
-
- spin_lock_irqsave (&lp->lock, flags);
-
- dev->stats.tx_bytes += length;
- /* Disable the 82586's input to the interrupt line. */
- outb (0x80, ioaddr + MISC_CTRL);
-
- hardware_send_packet (dev, buf, skb->len, length - skb->len);
-
- /* Enable the 82586 interrupt input. */
- outb (0x84, ioaddr + MISC_CTRL);
-
- spin_unlock_irqrestore (&lp->lock, flags);
-
- dev_kfree_skb (skb);
-
- /* You might need to clean up and record Tx statistics here. */
-
- return NETDEV_TX_OK;
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t el16_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status, boguscount = 0;
- ushort ack_cmd = 0;
- void __iomem *shmem;
-
- if (dev == NULL) {
- pr_err("net_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
- shmem = lp->base;
-
- spin_lock(&lp->lock);
-
- status = readw(shmem+iSCB_STATUS);
-
- if (net_debug > 4) {
- pr_debug("%s: 3c507 interrupt, status %4.4x.\n", dev->name, status);
- }
-
- /* Disable the 82586's input to the interrupt line. */
- outb(0x80, ioaddr + MISC_CTRL);
-
- /* Reap the Tx packet buffers. */
- while (lp->tx_pkts_in_ring) {
- unsigned short tx_status = readw(shmem+lp->tx_reap);
- if (!(tx_status & 0x8000)) {
- if (net_debug > 5)
- pr_debug("Tx command incomplete (%#x).\n", lp->tx_reap);
- break;
- }
- /* Tx unsuccessful or some interesting status bit set. */
- if (!(tx_status & 0x2000) || (tx_status & 0x0f3f)) {
- dev->stats.tx_errors++;
- if (tx_status & 0x0600) dev->stats.tx_carrier_errors++;
- if (tx_status & 0x0100) dev->stats.tx_fifo_errors++;
- if (!(tx_status & 0x0040)) dev->stats.tx_heartbeat_errors++;
- if (tx_status & 0x0020) dev->stats.tx_aborted_errors++;
- dev->stats.collisions += tx_status & 0xf;
- }
- dev->stats.tx_packets++;
- if (net_debug > 5)
- pr_debug("Reaped %x, Tx status %04x.\n" , lp->tx_reap, tx_status);
- lp->tx_reap += TX_BUF_SIZE;
- if (lp->tx_reap > RX_BUF_START - TX_BUF_SIZE)
- lp->tx_reap = TX_BUF_START;
-
- lp->tx_pkts_in_ring--;
- /* There is always more space in the Tx ring buffer now. */
- netif_wake_queue(dev);
-
- if (++boguscount > 10)
- break;
- }
-
- if (status & 0x4000) { /* Packet received. */
- if (net_debug > 5)
- pr_debug("Received packet, rx_head %04x.\n", lp->rx_head);
- el16_rx(dev);
- }
-
- /* Acknowledge the interrupt sources. */
- ack_cmd = status & 0xf000;
-
- if ((status & 0x0700) != 0x0200 && netif_running(dev)) {
- if (net_debug)
- pr_debug("%s: Command unit stopped, status %04x, restarting.\n",
- dev->name, status);
- /* If this ever occurs we should really re-write the idle loop, reset
- the Tx list, and do a complete restart of the command unit.
- For now we rely on the Tx timeout if the resume doesn't work. */
- ack_cmd |= CUC_RESUME;
- }
-
- if ((status & 0x0070) != 0x0040 && netif_running(dev)) {
- /* The Rx unit is not ready, it must be hung. Restart the receiver by
- initializing the rx buffers, and issuing an Rx start command. */
- if (net_debug)
- pr_debug("%s: Rx unit stopped, status %04x, restarting.\n",
- dev->name, status);
- init_rx_bufs(dev);
- writew(RX_BUF_START,shmem+iSCB_RFA);
- ack_cmd |= RX_START;
- }
-
- writew(ack_cmd,shmem+iSCB_CMD);
- outb(0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
-
- /* Clear the latched interrupt. */
- outb(0, ioaddr + RESET_IRQ);
-
- /* Enable the 82586's interrupt input. */
- outb(0x84, ioaddr + MISC_CTRL);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-static int el16_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx. */
- writew(RX_SUSPEND | CUC_SUSPEND,shmem+iSCB_CMD);
- outb(0, ioaddr + SIGNAL_CA);
-
- /* Disable the 82586's input to the interrupt line. */
- outb(0x80, ioaddr + MISC_CTRL);
-
- /* We always physically use the IRQ line, so we don't do free_irq(). */
-
- /* Update the statistics here. */
-
- return 0;
-}
-
-/* Initialize the Rx-block list. */
-static void init_rx_bufs(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- void __iomem *write_ptr;
- unsigned short SCB_base = SCB_BASE;
-
- int cur_rxbuf = lp->rx_head = RX_BUF_START;
-
- /* Initialize each Rx frame + data buffer. */
- do { /* While there is room for one more. */
-
- write_ptr = lp->base + cur_rxbuf;
-
- writew(0x0000,write_ptr); /* Status */
- writew(0x0000,write_ptr+=2); /* Command */
- writew(cur_rxbuf + RX_BUF_SIZE,write_ptr+=2); /* Link */
- writew(cur_rxbuf + 22,write_ptr+=2); /* Buffer offset */
- writew(0x0000,write_ptr+=2); /* Pad for dest addr. */
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2); /* Pad for source addr. */
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2);
- writew(0x0000,write_ptr+=2); /* Pad for protocol. */
-
- writew(0x0000,write_ptr+=2); /* Buffer: Actual count */
- writew(-1,write_ptr+=2); /* Buffer: Next (none). */
- writew(cur_rxbuf + 0x20 + SCB_base,write_ptr+=2);/* Buffer: Address low */
- writew(0x0000,write_ptr+=2);
- /* Finally, the number of bytes in the buffer. */
- writew(0x8000 + RX_BUF_SIZE-0x20,write_ptr+=2);
-
- lp->rx_tail = cur_rxbuf;
- cur_rxbuf += RX_BUF_SIZE;
- } while (cur_rxbuf <= RX_BUF_END - RX_BUF_SIZE);
-
- /* Terminate the list by setting the EOL bit, and wrap the pointer to make
- the list a ring. */
- write_ptr = lp->base + lp->rx_tail + 2;
- writew(0xC000,write_ptr); /* Command, mark as last. */
- writew(lp->rx_head,write_ptr+2); /* Link */
-}
-
-static void init_82586_mem(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- void __iomem *shmem = lp->base;
-
- /* Enable loopback to protect the wire while starting up,
- and hold the 586 in reset during the memory initialization. */
- outb(0x20, ioaddr + MISC_CTRL);
-
- /* Fix the ISCP address and base. */
- init_words[3] = SCB_BASE;
- init_words[7] = SCB_BASE;
-
- /* Write the words at 0xfff6 (address-aliased to 0xfffff6). */
- memcpy_toio(lp->base + RX_BUF_END - 10, init_words, 10);
-
- /* Write the words at 0x0000. */
- memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
-
- /* Fill in the station address. */
- memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
-
- /* The Tx-block list is written as needed. We just set up the values. */
- lp->tx_cmd_link = IDLELOOP + 4;
- lp->tx_head = lp->tx_reap = TX_BUF_START;
-
- init_rx_bufs(dev);
-
- /* Start the 586 by releasing the reset line, but leave loopback. */
- outb(0xA0, ioaddr + MISC_CTRL);
-
- /* This was time consuming to track down: you need to give two channel
- attention signals to reliably start up the i82586. */
- outb(0, ioaddr + SIGNAL_CA);
-
- {
- int boguscnt = 50;
- while (readw(shmem+iSCB_STATUS) == 0)
- if (--boguscnt == 0) {
- pr_warning("%s: i82586 initialization timed out with status %04x, cmd %04x.\n",
- dev->name, readw(shmem+iSCB_STATUS), readw(shmem+iSCB_CMD));
- break;
- }
- /* Issue channel-attn -- the 82586 won't start. */
- outb(0, ioaddr + SIGNAL_CA);
- }
-
- /* Disable loopback and enable interrupts. */
- outb(0x84, ioaddr + MISC_CTRL);
- if (net_debug > 4)
- pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
- readw(shmem+iSCB_STATUS));
-}
-
-static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- ushort tx_block = lp->tx_head;
- void __iomem *write_ptr = lp->base + tx_block;
- static char padding[ETH_ZLEN];
-
- /* Set the write pointer to the Tx block, and put out the header. */
- writew(0x0000,write_ptr); /* Tx status */
- writew(CMD_INTR|CmdTx,write_ptr+=2); /* Tx command */
- writew(tx_block+16,write_ptr+=2); /* Next command is a NoOp. */
- writew(tx_block+8,write_ptr+=2); /* Data Buffer offset. */
-
- /* Output the data buffer descriptor. */
- writew((pad + length) | 0x8000,write_ptr+=2); /* Byte count parameter. */
- writew(-1,write_ptr+=2); /* No next data buffer. */
- writew(tx_block+22+SCB_BASE,write_ptr+=2); /* Buffer follows the NoOp command. */
- writew(0x0000,write_ptr+=2); /* Buffer address high bits (always zero). */
-
- /* Output the Loop-back NoOp command. */
- writew(0x0000,write_ptr+=2); /* Tx status */
- writew(CmdNOp,write_ptr+=2); /* Tx command */
- writew(tx_block+16,write_ptr+=2); /* Next is myself. */
-
- /* Output the packet at the write pointer. */
- memcpy_toio(write_ptr+2, buf, length);
- if (pad)
- memcpy_toio(write_ptr+length+2, padding, pad);
-
- /* Set the old command link pointing to this send packet. */
- writew(tx_block,lp->base + lp->tx_cmd_link);
- lp->tx_cmd_link = tx_block + 20;
-
- /* Set the next free tx region. */
- lp->tx_head = tx_block + TX_BUF_SIZE;
- if (lp->tx_head > RX_BUF_START - TX_BUF_SIZE)
- lp->tx_head = TX_BUF_START;
-
- if (net_debug > 4) {
- pr_debug("%s: 3c507 @%x send length = %d, tx_block %3x, next %3x.\n",
- dev->name, ioaddr, length, tx_block, lp->tx_head);
- }
-
- /* Grimly block further packets if there has been insufficient reaping. */
- if (++lp->tx_pkts_in_ring < NUM_TX_BUFS)
- netif_wake_queue(dev);
-}
-
-static void el16_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- void __iomem *shmem = lp->base;
- ushort rx_head = lp->rx_head;
- ushort rx_tail = lp->rx_tail;
- ushort boguscount = 10;
- short frame_status;
-
- while ((frame_status = readw(shmem+rx_head)) < 0) { /* Command complete */
- void __iomem *read_frame = lp->base + rx_head;
- ushort rfd_cmd = readw(read_frame+2);
- ushort next_rx_frame = readw(read_frame+4);
- ushort data_buffer_addr = readw(read_frame+6);
- void __iomem *data_frame = lp->base + data_buffer_addr;
- ushort pkt_len = readw(data_frame);
-
- if (rfd_cmd != 0 || data_buffer_addr != rx_head + 22 ||
- (pkt_len & 0xC000) != 0xC000) {
- pr_err("%s: Rx frame at %#x corrupted, "
- "status %04x cmd %04x next %04x "
- "data-buf @%04x %04x.\n",
- dev->name, rx_head, frame_status, rfd_cmd,
- next_rx_frame, data_buffer_addr, pkt_len);
- } else if ((frame_status & 0x2000) == 0) {
- /* Frame Rxed, but with error. */
- dev->stats.rx_errors++;
- if (frame_status & 0x0800) dev->stats.rx_crc_errors++;
- if (frame_status & 0x0400) dev->stats.rx_frame_errors++;
- if (frame_status & 0x0200) dev->stats.rx_fifo_errors++;
- if (frame_status & 0x0100) dev->stats.rx_over_errors++;
- if (frame_status & 0x0080) dev->stats.rx_length_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- pkt_len &= 0x3fff;
- skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
- pr_err("%s: Memory squeeze, dropping packet.\n",
- dev->name);
- dev->stats.rx_dropped++;
- break;
- }
-
- skb_reserve(skb,2);
-
- /* 'skb->data' points to the start of sk_buff data area. */
- memcpy_fromio(skb_put(skb,pkt_len), data_frame + 10, pkt_len);
-
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
-
- /* Clear the status word and set End-of-List on the rx frame. */
- writew(0,read_frame);
- writew(0xC000,read_frame+2);
- /* Clear the end-of-list on the prev. RFD. */
- writew(0x0000,lp->base + rx_tail + 2);
-
- rx_tail = rx_head;
- rx_head = next_rx_frame;
- if (--boguscount == 0)
- break;
- }
-
- lp->rx_head = rx_head;
- lp->rx_tail = rx_tail;
-}
-
-static void netdev_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
-}
-
-static u32 netdev_get_msglevel(struct net_device *dev)
-{
- return debug;
-}
-
-static void netdev_set_msglevel(struct net_device *dev, u32 level)
-{
- debug = level;
-}
-
-static const struct ethtool_ops netdev_ethtool_ops = {
- .get_drvinfo = netdev_get_drvinfo,
- .get_msglevel = netdev_get_msglevel,
- .set_msglevel = netdev_set_msglevel,
-};
-
-#ifdef MODULE
-static struct net_device *dev_3c507;
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "EtherLink16 I/O base address");
-MODULE_PARM_DESC(irq, "(ignored)");
-
-int __init init_module(void)
-{
- if (io == 0)
- pr_notice("3c507: You should not use auto-probing with insmod!\n");
- dev_3c507 = el16_probe(-1);
- return IS_ERR(dev_3c507) ? PTR_ERR(dev_3c507) : 0;
-}
-
-void __exit
-cleanup_module(void)
-{
- struct net_device *dev = dev_3c507;
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- iounmap(((struct net_local *)netdev_priv(dev))->base);
- release_region(dev->base_addr, EL16_IO_EXTENT);
- free_netdev(dev);
-}
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 6aa927af382..1c54e229e3c 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -95,9 +95,6 @@ static char version[] __initdata =
#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
#define ENABLE_BVME6000_NET
#endif
-#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE)
-#define ENABLE_APRICOT
-#endif
#ifdef ENABLE_MVME16x_NET
#include <asm/mvme16xhw.h>
@@ -120,8 +117,15 @@ static char version[] __initdata =
#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
#define ISCP_BUSY 0x00010000
-#define MACH_IS_APRICOT 0
#else
+#error 82596.c: unknown architecture
+#endif
+
+/*
+ * These were the intel versions, left here for reference. There
+ * are currently no x86 users of this legacy i82596 chip.
+ */
+#if 0
#define WSWAPrfd(x) ((struct i596_rfd *)((long)x))
#define WSWAPrbd(x) ((struct i596_rbd *)((long)x))
#define WSWAPiscp(x) ((struct i596_iscp *)((long)x))
@@ -130,7 +134,6 @@ static char version[] __initdata =
#define WSWAPtbd(x) ((struct i596_tbd *)((long)x))
#define WSWAPchar(x) ((char *)((long)x))
#define ISCP_BUSY 0x0001
-#define MACH_IS_APRICOT 1
#endif
/*
@@ -383,11 +386,6 @@ static inline void CA(struct net_device *dev)
i = *(volatile u32 *) (dev->base_addr);
}
#endif
-#ifdef ENABLE_APRICOT
- if (MACH_IS_APRICOT) {
- outw(0, (short) (dev->base_addr) + 4);
- }
-#endif
}
@@ -617,9 +615,6 @@ static void rebuild_rx_bufs(struct net_device *dev)
static int init_i596_mem(struct net_device *dev)
{
struct i596_private *lp = dev->ml_priv;
-#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT)
- short ioaddr = dev->base_addr;
-#endif
unsigned long flags;
MPU_PORT(dev, PORT_RESET, NULL);
@@ -653,18 +648,6 @@ static int init_i596_mem(struct net_device *dev)
MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
-#elif defined(ENABLE_APRICOT)
-
- {
- u32 scp = virt_to_bus(&lp->scp);
-
- /* change the scp address */
- outw(0, ioaddr);
- outw(0, ioaddr);
- outb(4, ioaddr + 0xf);
- outw(scp | 2, ioaddr);
- outw(scp >> 16, ioaddr);
- }
#endif
lp->last_cmd = jiffies;
@@ -677,10 +660,6 @@ static int init_i596_mem(struct net_device *dev)
if (MACH_IS_BVME6000)
lp->scp.sysbus = 0x0000004c;
#endif
-#ifdef ENABLE_APRICOT
- if (MACH_IS_APRICOT)
- lp->scp.sysbus = 0x00440000;
-#endif
lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
@@ -698,10 +677,6 @@ static int init_i596_mem(struct net_device *dev)
DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
-#if defined(ENABLE_APRICOT)
- (void) inb(ioaddr + 0x10);
- outb(4, ioaddr + 0xf);
-#endif
CA(dev);
if (wait_istat(dev,lp,1000,"initialization timed out"))
@@ -1203,43 +1178,6 @@ struct net_device * __init i82596_probe(int unit)
goto found;
}
#endif
-#ifdef ENABLE_APRICOT
- {
- int checksum = 0;
- int ioaddr = 0x300;
-
- /* this is easy the ethernet interface can only be at 0x300 */
- /* first check nothing is already registered here */
-
- if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) {
- printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr);
- err = -EBUSY;
- goto out;
- }
-
- dev->base_addr = ioaddr;
-
- for (i = 0; i < 8; i++) {
- eth_addr[i] = inb(ioaddr + 8 + i);
- checksum += eth_addr[i];
- }
-
- /* checksum is a multiple of 0x100, got this wrong first time
- some machines have 0x100, some 0x200. The DOS driver doesn't
- even bother with the checksum.
- Some other boards trip the checksum.. but then appear as
- ether address 0. Trap these - AC */
-
- if ((checksum % 0x100) ||
- (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) {
- err = -ENODEV;
- goto out1;
- }
-
- dev->irq = 10;
- goto found;
- }
-#endif
err = -ENODEV;
goto out;
@@ -1296,9 +1234,6 @@ out2:
#endif
free_page ((u32)(dev->mem_start));
out1:
-#ifdef ENABLE_APRICOT
- release_region(dev->base_addr, I596_TOTAL_SIZE);
-#endif
out:
free_netdev(dev);
return ERR_PTR(err);
@@ -1455,10 +1390,6 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id)
*ethirq = 3;
}
#endif
-#ifdef ENABLE_APRICOT
- (void) inb(ioaddr + 0x10);
- outb(4, ioaddr + 0xf);
-#endif
CA(dev);
DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
@@ -1589,11 +1520,6 @@ static void set_multicast_list(struct net_device *dev)
#ifdef MODULE
static struct net_device *dev_82596;
-#ifdef ENABLE_APRICOT
-module_param(irq, int, 0);
-MODULE_PARM_DESC(irq, "Apricot IRQ number");
-#endif
-
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "i82596 debug mask");
@@ -1620,10 +1546,6 @@ void __exit cleanup_module(void)
IOMAP_FULL_CACHING);
#endif
free_page ((u32)(dev_82596->mem_start));
-#ifdef ENABLE_APRICOT
- /* If we don't do this, we can't re-insmod it later. */
- release_region(dev_82596->base_addr, I596_TOTAL_SIZE);
-#endif
free_netdev(dev_82596);
}
diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig
index 959faf7388e..955d929cd00 100644
--- a/drivers/net/ethernet/i825xx/Kconfig
+++ b/drivers/net/ethernet/i825xx/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_I825XX
bool "Intel (82586/82593/82596) devices"
default y
depends on NET_VENDOR_INTEL && (ISA || ISA_DMA_API || ARM || \
- ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
+ ARCH_ACORN || SNI_RM || SUN3 || \
GSC || BVME6000 || MVME16x || EXPERIMENTAL)
---help---
If you have a network (Ethernet) card belonging to this class, say Y
@@ -20,29 +20,6 @@ config NET_VENDOR_I825XX
if NET_VENDOR_I825XX
-config ELPLUS
- tristate "3c505 \"EtherLink Plus\" support"
- depends on ISA && ISA_DMA_API
- ---help---
- Information about this network (Ethernet) card can be found in
- <file:Documentation/networking/3c505.txt>. If you have a card of
- this type, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c505.
-
-config EL16
- tristate "3c507 \"EtherLink 16\" support (EXPERIMENTAL)"
- depends on ISA && EXPERIMENTAL
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called 3c507.
-
config ARM_ETHER1
tristate "Acorn Ether1 support"
depends on ARM && ARCH_ACORN
@@ -50,17 +27,6 @@ config ARM_ETHER1
If you have an Acorn system with one of these (AKA25) network cards,
you should say Y to this option if you wish to use it with Linux.
-config APRICOT
- tristate "Apricot Xen-II on board Ethernet"
- depends on ISA
- ---help---
- If you have a network (Ethernet) controller of this type, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called apricot.
-
config BVME6000_NET
tristate "BVME6000 Ethernet support"
depends on BVME6000
@@ -70,33 +36,6 @@ config BVME6000_NET
in your kernel.
To compile this driver as a module, choose M here.
-config EEXPRESS
- tristate "EtherExpress 16 support"
- depends on ISA
- ---help---
- If you have an EtherExpress16 network (Ethernet) card, say Y and
- read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that the Intel
- EtherExpress16 card used to be regarded as a very poor choice
- because the driver was very unreliable. We now have a new driver
- that should do better.
-
- To compile this driver as a module, choose M here. The module
- will be called eexpress.
-
-config EEXPRESS_PRO
- tristate "EtherExpressPro support/EtherExpress 10 (i82595) support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y. This
- driver supports Intel i82595{FX,TX} based boards. Note however
- that the EtherExpress PRO/100 Ethernet card has its own separate
- driver. Please read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called eepro.
-
config LASI_82596
tristate "Lasi ethernet"
depends on GSC
@@ -104,14 +43,6 @@ config LASI_82596
Say Y here to support the builtin Intel 82596 ethernet controller
found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
-config LP486E
- tristate "LP486E on board Ethernet"
- depends on ISA
- ---help---
- Say Y here to support the 82596-based on-board Ethernet controller
- for the Panther motherboard, which is one of the two shipped in the
- Intel Professional Workstation.
-
config MVME16x_NET
tristate "MVME16x Ethernet support"
depends on MVME16x
@@ -121,17 +52,6 @@ config MVME16x_NET
driver for this chip in your kernel.
To compile this driver as a module, choose M here.
-config NI52
- tristate "NI5210 support"
- depends on ISA
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called ni52.
-
config SNI_82596
tristate "SNI RM ethernet"
depends on SNI_RM
@@ -148,14 +68,4 @@ config SUN3_82586
that this driver does not support 82586-based adapters on additional
VME boards.
-config ZNET
- tristate "Zenith Z-Note support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && ISA_DMA_API && X86
- ---help---
- The Zenith Z-Note notebook computer has a built-in network
- (Ethernet) card, and this is the Linux driver for it. Note that the
- IBM Thinkpad 300 is compatible with the Z-Note and is also supported
- by this driver. Read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
endif # NET_VENDOR_I825XX
diff --git a/drivers/net/ethernet/i825xx/Makefile b/drivers/net/ethernet/i825xx/Makefile
index 6adff85e8ec..8c8dcd29c40 100644
--- a/drivers/net/ethernet/i825xx/Makefile
+++ b/drivers/net/ethernet/i825xx/Makefile
@@ -3,15 +3,7 @@
#
obj-$(CONFIG_ARM_ETHER1) += ether1.o
-obj-$(CONFIG_EEXPRESS) += eexpress.o
-obj-$(CONFIG_EEXPRESS_PRO) += eepro.o
-obj-$(CONFIG_ELPLUS) += 3c505.o
-obj-$(CONFIG_EL16) += 3c507.o
-obj-$(CONFIG_LP486E) += lp486e.o
-obj-$(CONFIG_NI52) += ni52.o
obj-$(CONFIG_SUN3_82586) += sun3_82586.o
-obj-$(CONFIG_ZNET) += znet.o
-obj-$(CONFIG_APRICOT) += 82596.o
obj-$(CONFIG_LASI_82596) += lasi_82596.o
obj-$(CONFIG_SNI_82596) += sni_82596.o
obj-$(CONFIG_MVME16x_NET) += 82596.o
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
deleted file mode 100644
index 7f49fd54c52..00000000000
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ /dev/null
@@ -1,1822 +0,0 @@
-/* eepro.c: Intel EtherExpress Pro/10 device driver for Linux. */
-/*
- Written 1994, 1995,1996 by Bao C. Ha.
-
- Copyright (C) 1994, 1995,1996 by Bao C. Ha.
-
- This software may be used and distributed
- according to the terms of the GNU General Public License,
- incorporated herein by reference.
-
- The author may be reached at bao.ha@srs.gov
- or 418 Hastings Place, Martinez, GA 30907.
-
- Things remaining to do:
- Better record keeping of errors.
- Eliminate transmit interrupt to reduce overhead.
- Implement "concurrent processing". I won't be doing it!
-
- Bugs:
-
- If you have a problem of not detecting the 82595 during a
- reboot (warm reset), disable the FLASH memory should fix it.
- This is a compatibility hardware problem.
-
- Versions:
- 0.13b basic ethtool support (aris, 09/13/2004)
- 0.13a in memory shortage, drop packets also in board
- (Michael Westermann <mw@microdata-pos.de>, 07/30/2002)
- 0.13 irq sharing, rewrote probe function, fixed a nasty bug in
- hardware_send_packet and a major cleanup (aris, 11/08/2001)
- 0.12d fixing a problem with single card detected as eight eth devices
- fixing a problem with sudden drop in card performance
- (chris (asdn@go2.pl), 10/29/2001)
- 0.12c fixing some problems with old cards (aris, 01/08/2001)
- 0.12b misc fixes (aris, 06/26/2000)
- 0.12a port of version 0.12a of 2.2.x kernels to 2.3.x
- (aris (aris@conectiva.com.br), 05/19/2000)
- 0.11e some tweaks about multiple cards support (PdP, jul/aug 1999)
- 0.11d added __initdata, __init stuff; call spin_lock_init
- in eepro_probe1. Replaced "eepro" by dev->name. Augmented
- the code protected by spin_lock in interrupt routine
- (PdP, 12/12/1998)
- 0.11c minor cleanup (PdP, RMC, 09/12/1998)
- 0.11b Pascal Dupuis (dupuis@lei.ucl.ac.be): works as a module
- under 2.1.xx. Debug messages are flagged as KERN_DEBUG to
- avoid console flooding. Added locking at critical parts. Now
- the dawn thing is SMP safe.
- 0.11a Attempt to get 2.1.xx support up (RMC)
- 0.11 Brian Candler added support for multiple cards. Tested as
- a module, no idea if it works when compiled into kernel.
-
- 0.10e Rick Bressler notified me that ifconfig up;ifconfig down fails
- because the irq is lost somewhere. Fixed that by moving
- request_irq and free_irq to eepro_open and eepro_close respectively.
- 0.10d Ugh! Now Wakeup works. Was seriously broken in my first attempt.
- I'll need to find a way to specify an ioport other than
- the default one in the PnP case. PnP definitively sucks.
- And, yes, this is not the only reason.
- 0.10c PnP Wakeup Test for 595FX. uncomment #define PnPWakeup;
- to use.
- 0.10b Should work now with (some) Pro/10+. At least for
- me (and my two cards) it does. _No_ guarantee for
- function with non-Pro/10+ cards! (don't have any)
- (RMC, 9/11/96)
-
- 0.10 Added support for the Etherexpress Pro/10+. The
- IRQ map was changed significantly from the old
- pro/10. The new interrupt map was provided by
- Rainer M. Canavan (Canavan@Zeus.cs.bonn.edu).
- (BCH, 9/3/96)
-
- 0.09 Fixed a race condition in the transmit algorithm,
- which causes crashes under heavy load with fast
- pentium computers. The performance should also
- improve a bit. The size of RX buffer, and hence
- TX buffer, can also be changed via lilo or insmod.
- (BCH, 7/31/96)
-
- 0.08 Implement 32-bit I/O for the 82595TX and 82595FX
- based lan cards. Disable full-duplex mode if TPE
- is not used. (BCH, 4/8/96)
-
- 0.07a Fix a stat report which counts every packet as a
- heart-beat failure. (BCH, 6/3/95)
-
- 0.07 Modified to support all other 82595-based lan cards.
- The IRQ vector of the EtherExpress Pro will be set
- according to the value saved in the EEPROM. For other
- cards, I will do autoirq_request() to grab the next
- available interrupt vector. (BCH, 3/17/95)
-
- 0.06a,b Interim released. Minor changes in the comments and
- print out format. (BCH, 3/9/95 and 3/14/95)
-
- 0.06 First stable release that I am comfortable with. (BCH,
- 3/2/95)
-
- 0.05 Complete testing of multicast. (BCH, 2/23/95)
-
- 0.04 Adding multicast support. (BCH, 2/14/95)
-
- 0.03 First widely alpha release for public testing.
- (BCH, 2/14/95)
-
-*/
-
-static const char version[] =
- "eepro.c: v0.13b 09/13/2004 aris@cathedrallabs.org\n";
-
-#include <linux/module.h>
-
-/*
- Sources:
-
- This driver wouldn't have been written without the availability
- of the Crynwr's Lan595 driver source code. It helps me to
- familiarize with the 82595 chipset while waiting for the Intel
- documentation. I also learned how to detect the 82595 using
- the packet driver's technique.
-
- This driver is written by cutting and pasting the skeleton.c driver
- provided by Donald Becker. I also borrowed the EEPROM routine from
- Donald Becker's 82586 driver.
-
- Datasheet for the Intel 82595 (including the TX and FX version). It
- provides just enough info that the casual reader might think that it
- documents the i82595.
-
- The User Manual for the 82595. It provides a lot of the missing
- information.
-
-*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-#include <linux/ethtool.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define DRV_NAME "eepro"
-#define DRV_VERSION "0.13c"
-
-#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) )
-/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */
-#define SLOW_DOWN inb(0x80)
-/* udelay(2) */
-#define compat_init_data __initdata
-enum iftype { AUI=0, BNC=1, TPE=2 };
-
-/* First, a few definitions that the brave might change. */
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int eepro_portlist[] compat_init_data =
- { 0x300, 0x210, 0x240, 0x280, 0x2C0, 0x200, 0x320, 0x340, 0x360, 0};
-/* note: 0x300 is default, the 595FX supports ALL IO Ports
- from 0x000 to 0x3F0, some of which are reserved in PCs */
-
-/* To try the (not-really PnP Wakeup: */
-/*
-#define PnPWakeup
-*/
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 0
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* The number of low I/O ports used by the ethercard. */
-#define EEPRO_IO_EXTENT 16
-
-/* Different 82595 chips */
-#define LAN595 0
-#define LAN595TX 1
-#define LAN595FX 2
-#define LAN595FX_10ISA 3
-
-/* Information that need to be kept for each board. */
-struct eepro_local {
- unsigned rx_start;
- unsigned tx_start; /* start of the transmit chain */
- int tx_last; /* pointer to last packet in the transmit chain */
- unsigned tx_end; /* end of the transmit chain (plus 1) */
- int eepro; /* 1 for the EtherExpress Pro/10,
- 2 for the EtherExpress Pro/10+,
- 3 for the EtherExpress 10 (blue cards),
- 0 for other 82595-based lan cards. */
- int version; /* a flag to indicate if this is a TX or FX
- version of the 82595 chip. */
- int stepping;
-
- spinlock_t lock; /* Serializing lock */
-
- unsigned rcv_ram; /* pre-calculated space for rx */
- unsigned xmt_ram; /* pre-calculated space for tx */
- unsigned char xmt_bar;
- unsigned char xmt_lower_limit_reg;
- unsigned char xmt_upper_limit_reg;
- short xmt_lower_limit;
- short xmt_upper_limit;
- short rcv_lower_limit;
- short rcv_upper_limit;
- unsigned char eeprom_reg;
- unsigned short word[8];
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00 /* Etherexpress Pro/10 */
-#define SA_ADDR1 0xaa
-#define SA_ADDR2 0x00
-
-#define GetBit(x,y) ((x & (1<<y))>>y)
-
-/* EEPROM Word 0: */
-#define ee_PnP 0 /* Plug 'n Play enable bit */
-#define ee_Word1 1 /* Word 1? */
-#define ee_BusWidth 2 /* 8/16 bit */
-#define ee_FlashAddr 3 /* Flash Address */
-#define ee_FlashMask 0x7 /* Mask */
-#define ee_AutoIO 6 /* */
-#define ee_reserved0 7 /* =0! */
-#define ee_Flash 8 /* Flash there? */
-#define ee_AutoNeg 9 /* Auto Negotiation enabled? */
-#define ee_IO0 10 /* IO Address LSB */
-#define ee_IO0Mask 0x /*...*/
-#define ee_IO1 15 /* IO MSB */
-
-/* EEPROM Word 1: */
-#define ee_IntSel 0 /* Interrupt */
-#define ee_IntMask 0x7
-#define ee_LI 3 /* Link Integrity 0= enabled */
-#define ee_PC 4 /* Polarity Correction 0= enabled */
-#define ee_TPE_AUI 5 /* PortSelection 1=TPE */
-#define ee_Jabber 6 /* Jabber prevention 0= enabled */
-#define ee_AutoPort 7 /* Auto Port Selection 1= Disabled */
-#define ee_SMOUT 8 /* SMout Pin Control 0= Input */
-#define ee_PROM 9 /* Flash EPROM / PROM 0=Flash */
-#define ee_reserved1 10 /* .. 12 =0! */
-#define ee_AltReady 13 /* Alternate Ready, 0=normal */
-#define ee_reserved2 14 /* =0! */
-#define ee_Duplex 15
-
-/* Word2,3,4: */
-#define ee_IA5 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA4 8 /*bit start for individual Addr Byte 5 */
-#define ee_IA3 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA2 8 /*bit start for individual Addr Byte 5 */
-#define ee_IA1 0 /*bit start for individual Addr Byte 5 */
-#define ee_IA0 8 /*bit start for individual Addr Byte 5 */
-
-/* Word 5: */
-#define ee_BNC_TPE 0 /* 0=TPE */
-#define ee_BootType 1 /* 00=None, 01=IPX, 10=ODI, 11=NDIS */
-#define ee_BootTypeMask 0x3
-#define ee_NumConn 3 /* Number of Connections 0= One or Two */
-#define ee_FlashSock 4 /* Presence of Flash Socket 0= Present */
-#define ee_PortTPE 5
-#define ee_PortBNC 6
-#define ee_PortAUI 7
-#define ee_PowerMgt 10 /* 0= disabled */
-#define ee_CP 13 /* Concurrent Processing */
-#define ee_CPMask 0x7
-
-/* Word 6: */
-#define ee_Stepping 0 /* Stepping info */
-#define ee_StepMask 0x0F
-#define ee_BoardID 4 /* Manucaturer Board ID, reserved */
-#define ee_BoardMask 0x0FFF
-
-/* Word 7: */
-#define ee_INT_TO_IRQ 0 /* int to IRQ Mapping = 0x1EB8 for Pro/10+ */
-#define ee_FX_INT2IRQ 0x1EB8 /* the _only_ mapping allowed for FX chips */
-
-/*..*/
-#define ee_SIZE 0x40 /* total EEprom Size */
-#define ee_Checksum 0xBABA /* initial and final value for adding checksum */
-
-
-/* Card identification via EEprom: */
-#define ee_addr_vendor 0x10 /* Word offset for EISA Vendor ID */
-#define ee_addr_id 0x11 /* Word offset for Card ID */
-#define ee_addr_SN 0x12 /* Serial Number */
-#define ee_addr_CRC_8 0x14 /* CRC over last thee Bytes */
-
-
-#define ee_vendor_intel0 0x25 /* Vendor ID Intel */
-#define ee_vendor_intel1 0xD4
-#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
-#define ee_id_eepro10p1 0x31
-
-#define TX_TIMEOUT ((4*HZ)/10)
-
-/* Index to functions, as function prototypes. */
-
-static int eepro_probe1(struct net_device *dev, int autoprobe);
-static int eepro_open(struct net_device *dev);
-static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t eepro_interrupt(int irq, void *dev_id);
-static void eepro_rx(struct net_device *dev);
-static void eepro_transmit_interrupt(struct net_device *dev);
-static int eepro_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void eepro_tx_timeout (struct net_device *dev);
-
-static int read_eeprom(int ioaddr, int location, struct net_device *dev);
-static int hardware_send_packet(struct net_device *dev, void *buf, short length);
-static int eepro_grab_irq(struct net_device *dev);
-
-/*
- Details of the i82595.
-
-You will need either the datasheet or the user manual to understand what
-is going on here. The 82595 is very different from the 82586, 82593.
-
-The receive algorithm in eepro_rx() is just an implementation of the
-RCV ring structure that the Intel 82595 imposes at the hardware level.
-The receive buffer is set at 24K, and the transmit buffer is 8K. I
-am assuming that the total buffer memory is 32K, which is true for the
-Intel EtherExpress Pro/10. If it is less than that on a generic card,
-the driver will be broken.
-
-The transmit algorithm in the hardware_send_packet() is similar to the
-one in the eepro_rx(). The transmit buffer is a ring linked list.
-I just queue the next available packet to the end of the list. In my
-system, the 82595 is so fast that the list seems to always contain a
-single packet. In other systems with faster computers and more congested
-network traffics, the ring linked list should improve performance by
-allowing up to 8K worth of packets to be queued.
-
-The sizes of the receive and transmit buffers can now be changed via lilo
-or insmod. Lilo uses the appended line "ether=io,irq,debug,rx-buffer,eth0"
-where rx-buffer is in KB unit. Modules uses the parameter mem which is
-also in KB unit, for example "insmod io=io-address irq=0 mem=rx-buffer."
-The receive buffer has to be more than 3K or less than 29K. Otherwise,
-it is reset to the default of 24K, and, hence, 8K for the trasnmit
-buffer (transmit-buffer = 32K - receive-buffer).
-
-*/
-#define RAM_SIZE 0x8000
-
-#define RCV_HEADER 8
-#define RCV_DEFAULT_RAM 0x6000
-
-#define XMT_HEADER 8
-#define XMT_DEFAULT_RAM (RAM_SIZE - RCV_DEFAULT_RAM)
-
-#define XMT_START_PRO RCV_DEFAULT_RAM
-#define XMT_START_10 0x0000
-#define RCV_START_PRO 0x0000
-#define RCV_START_10 XMT_DEFAULT_RAM
-
-#define RCV_DONE 0x0008
-#define RX_OK 0x2000
-#define RX_ERROR 0x0d81
-
-#define TX_DONE_BIT 0x0080
-#define TX_OK 0x2000
-#define CHAIN_BIT 0x8000
-#define XMT_STATUS 0x02
-#define XMT_CHAIN 0x04
-#define XMT_COUNT 0x06
-
-#define BANK0_SELECT 0x00
-#define BANK1_SELECT 0x40
-#define BANK2_SELECT 0x80
-
-/* Bank 0 registers */
-#define COMMAND_REG 0x00 /* Register 0 */
-#define MC_SETUP 0x03
-#define XMT_CMD 0x04
-#define DIAGNOSE_CMD 0x07
-#define RCV_ENABLE_CMD 0x08
-#define RCV_DISABLE_CMD 0x0a
-#define STOP_RCV_CMD 0x0b
-#define RESET_CMD 0x0e
-#define POWER_DOWN_CMD 0x18
-#define RESUME_XMT_CMD 0x1c
-#define SEL_RESET_CMD 0x1e
-#define STATUS_REG 0x01 /* Register 1 */
-#define RX_INT 0x02
-#define TX_INT 0x04
-#define EXEC_STATUS 0x30
-#define ID_REG 0x02 /* Register 2 */
-#define R_ROBIN_BITS 0xc0 /* round robin counter */
-#define ID_REG_MASK 0x2c
-#define ID_REG_SIG 0x24
-#define AUTO_ENABLE 0x10
-#define INT_MASK_REG 0x03 /* Register 3 */
-#define RX_STOP_MASK 0x01
-#define RX_MASK 0x02
-#define TX_MASK 0x04
-#define EXEC_MASK 0x08
-#define ALL_MASK 0x0f
-#define IO_32_BIT 0x10
-#define RCV_BAR 0x04 /* The following are word (16-bit) registers */
-#define RCV_STOP 0x06
-
-#define XMT_BAR_PRO 0x0a
-#define XMT_BAR_10 0x0b
-
-#define HOST_ADDRESS_REG 0x0c
-#define IO_PORT 0x0e
-#define IO_PORT_32_BIT 0x0c
-
-/* Bank 1 registers */
-#define REG1 0x01
-#define WORD_WIDTH 0x02
-#define INT_ENABLE 0x80
-#define INT_NO_REG 0x02
-#define RCV_LOWER_LIMIT_REG 0x08
-#define RCV_UPPER_LIMIT_REG 0x09
-
-#define XMT_LOWER_LIMIT_REG_PRO 0x0a
-#define XMT_UPPER_LIMIT_REG_PRO 0x0b
-#define XMT_LOWER_LIMIT_REG_10 0x0b
-#define XMT_UPPER_LIMIT_REG_10 0x0a
-
-/* Bank 2 registers */
-#define XMT_Chain_Int 0x20 /* Interrupt at the end of the transmit chain */
-#define XMT_Chain_ErrStop 0x40 /* Interrupt at the end of the chain even if there are errors */
-#define RCV_Discard_BadFrame 0x80 /* Throw bad frames away, and continue to receive others */
-#define REG2 0x02
-#define PRMSC_Mode 0x01
-#define Multi_IA 0x20
-#define REG3 0x03
-#define TPE_BIT 0x04
-#define BNC_BIT 0x20
-#define REG13 0x0d
-#define FDX 0x00
-#define A_N_ENABLE 0x02
-
-#define I_ADD_REG0 0x04
-#define I_ADD_REG1 0x05
-#define I_ADD_REG2 0x06
-#define I_ADD_REG3 0x07
-#define I_ADD_REG4 0x08
-#define I_ADD_REG5 0x09
-
-#define EEPROM_REG_PRO 0x0a
-#define EEPROM_REG_10 0x0b
-
-#define EESK 0x01
-#define EECS 0x02
-#define EEDI 0x04
-#define EEDO 0x08
-
-/* do a full reset */
-#define eepro_reset(ioaddr) outb(RESET_CMD, ioaddr)
-
-/* do a nice reset */
-#define eepro_sel_reset(ioaddr) { \
- outb(SEL_RESET_CMD, ioaddr); \
- SLOW_DOWN; \
- SLOW_DOWN; \
- }
-
-/* disable all interrupts */
-#define eepro_dis_int(ioaddr) outb(ALL_MASK, ioaddr + INT_MASK_REG)
-
-/* clear all interrupts */
-#define eepro_clear_int(ioaddr) outb(ALL_MASK, ioaddr + STATUS_REG)
-
-/* enable tx/rx */
-#define eepro_en_int(ioaddr) outb(ALL_MASK & ~(RX_MASK | TX_MASK), \
- ioaddr + INT_MASK_REG)
-
-/* enable exec event interrupt */
-#define eepro_en_intexec(ioaddr) outb(ALL_MASK & ~(EXEC_MASK), ioaddr + INT_MASK_REG)
-
-/* enable rx */
-#define eepro_en_rx(ioaddr) outb(RCV_ENABLE_CMD, ioaddr)
-
-/* disable rx */
-#define eepro_dis_rx(ioaddr) outb(RCV_DISABLE_CMD, ioaddr)
-
-/* switch bank */
-#define eepro_sw2bank0(ioaddr) outb(BANK0_SELECT, ioaddr)
-#define eepro_sw2bank1(ioaddr) outb(BANK1_SELECT, ioaddr)
-#define eepro_sw2bank2(ioaddr) outb(BANK2_SELECT, ioaddr)
-
-/* enable interrupt line */
-#define eepro_en_intline(ioaddr) outb(inb(ioaddr + REG1) | INT_ENABLE,\
- ioaddr + REG1)
-
-/* disable interrupt line */
-#define eepro_dis_intline(ioaddr) outb(inb(ioaddr + REG1) & 0x7f, \
- ioaddr + REG1);
-
-/* set diagnose flag */
-#define eepro_diag(ioaddr) outb(DIAGNOSE_CMD, ioaddr)
-
-/* ack for rx int */
-#define eepro_ack_rx(ioaddr) outb (RX_INT, ioaddr + STATUS_REG)
-
-/* ack for tx int */
-#define eepro_ack_tx(ioaddr) outb (TX_INT, ioaddr + STATUS_REG)
-
-/* a complete sel reset */
-#define eepro_complete_selreset(ioaddr) { \
- dev->stats.tx_errors++;\
- eepro_sel_reset(ioaddr);\
- lp->tx_end = \
- lp->xmt_lower_limit;\
- lp->tx_start = lp->tx_end;\
- lp->tx_last = 0;\
- dev->trans_start = jiffies;\
- netif_wake_queue(dev);\
- eepro_en_rx(ioaddr);\
- }
-
-/* Check for a network adaptor of this type, and return '0' if one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- If dev->base_addr == 2, allocate space for the device and return success
- (detachable devices only).
- */
-static int __init do_eepro_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
-#ifdef PnPWakeup
- /* XXXX for multiple cards should this only be run once? */
-
- /* Wakeup: */
- #define WakeupPort 0x279
- #define WakeupSeq {0x6A, 0xB5, 0xDA, 0xED, 0xF6, 0xFB, 0x7D, 0xBE,\
- 0xDF, 0x6F, 0x37, 0x1B, 0x0D, 0x86, 0xC3, 0x61,\
- 0xB0, 0x58, 0x2C, 0x16, 0x8B, 0x45, 0xA2, 0xD1,\
- 0xE8, 0x74, 0x3A, 0x9D, 0xCE, 0xE7, 0x73, 0x43}
-
- {
- unsigned short int WS[32]=WakeupSeq;
-
- if (request_region(WakeupPort, 2, "eepro wakeup")) {
- if (net_debug>5)
- printk(KERN_DEBUG "Waking UP\n");
-
- outb_p(0,WakeupPort);
- outb_p(0,WakeupPort);
- for (i=0; i<32; i++) {
- outb_p(WS[i],WakeupPort);
- if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
- }
-
- release_region(WakeupPort, 2);
- } else
- printk(KERN_WARNING "PnP wakeup region busy!\n");
- }
-#endif
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return eepro_probe1(dev, 0);
-
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; eepro_portlist[i]; i++) {
- dev->base_addr = eepro_portlist[i];
- dev->irq = irq;
- if (eepro_probe1(dev, 1) == 0)
- return 0;
- }
-
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init eepro_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct eepro_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_eepro_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static void __init printEEPROMInfo(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned short Word;
- int i,j;
-
- j = ee_Checksum;
- for (i = 0; i < 8; i++)
- j += lp->word[i];
- for ( ; i < ee_SIZE; i++)
- j += read_eeprom(ioaddr, i, dev);
-
- printk(KERN_DEBUG "Checksum: %#x\n",j&0xffff);
-
- Word = lp->word[0];
- printk(KERN_DEBUG "Word0:\n");
- printk(KERN_DEBUG " Plug 'n Pray: %d\n",GetBit(Word,ee_PnP));
- printk(KERN_DEBUG " Buswidth: %d\n",(GetBit(Word,ee_BusWidth)+1)*8 );
- printk(KERN_DEBUG " AutoNegotiation: %d\n",GetBit(Word,ee_AutoNeg));
- printk(KERN_DEBUG " IO Address: %#x\n", (Word>>ee_IO0)<<4);
-
- if (net_debug>4) {
- Word = lp->word[1];
- printk(KERN_DEBUG "Word1:\n");
- printk(KERN_DEBUG " INT: %d\n", Word & ee_IntMask);
- printk(KERN_DEBUG " LI: %d\n", GetBit(Word,ee_LI));
- printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
- printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
- printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
- printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort));
- printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
- }
-
- Word = lp->word[5];
- printk(KERN_DEBUG "Word5:\n");
- printk(KERN_DEBUG " BNC: %d\n",GetBit(Word,ee_BNC_TPE));
- printk(KERN_DEBUG " NumConnectors: %d\n",GetBit(Word,ee_NumConn));
- printk(KERN_DEBUG " Has ");
- if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
- if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
- if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
- printk(KERN_DEBUG "port(s)\n");
-
- Word = lp->word[6];
- printk(KERN_DEBUG "Word6:\n");
- printk(KERN_DEBUG " Stepping: %d\n",Word & ee_StepMask);
- printk(KERN_DEBUG " BoardID: %d\n",Word>>ee_BoardID);
-
- Word = lp->word[7];
- printk(KERN_DEBUG "Word7:\n");
- printk(KERN_DEBUG " INT to IRQ:\n");
-
- for (i=0, j=0; i<15; i++)
- if (GetBit(Word,i)) printk(KERN_DEBUG " INT%d -> IRQ %d;",j++,i);
-
- printk(KERN_DEBUG "\n");
-}
-
-/* function to recalculate the limits of buffer based on rcv_ram */
-static void eepro_recalc (struct net_device *dev)
-{
- struct eepro_local * lp;
-
- lp = netdev_priv(dev);
- lp->xmt_ram = RAM_SIZE - lp->rcv_ram;
-
- if (lp->eepro == LAN595FX_10ISA) {
- lp->xmt_lower_limit = XMT_START_10;
- lp->xmt_upper_limit = (lp->xmt_ram - 2);
- lp->rcv_lower_limit = lp->xmt_ram;
- lp->rcv_upper_limit = (RAM_SIZE - 2);
- }
- else {
- lp->rcv_lower_limit = RCV_START_PRO;
- lp->rcv_upper_limit = (lp->rcv_ram - 2);
- lp->xmt_lower_limit = lp->rcv_ram;
- lp->xmt_upper_limit = (RAM_SIZE - 2);
- }
-}
-
-/* prints boot-time info */
-static void __init eepro_print_info (struct net_device *dev)
-{
- struct eepro_local * lp = netdev_priv(dev);
- int i;
- const char * ifmap[] = {"AUI", "10Base2", "10BaseT"};
-
- i = inb(dev->base_addr + ID_REG);
- printk(KERN_DEBUG " id: %#x ",i);
- printk(" io: %#x ", (unsigned)dev->base_addr);
-
- switch (lp->eepro) {
- case LAN595FX_10ISA:
- printk("%s: Intel EtherExpress 10 ISA\n at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595FX:
- printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595TX:
- printk("%s: Intel EtherExpress Pro/10 ISA at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- case LAN595:
- printk("%s: Intel 82595-based lan card at %#x,",
- dev->name, (unsigned)dev->base_addr);
- break;
- }
-
- printk(" %pM", dev->dev_addr);
-
- if (net_debug > 3)
- printk(KERN_DEBUG ", %dK RCV buffer",
- (int)(lp->rcv_ram)/1024);
-
- if (dev->irq > 2)
- printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
- else
- printk(", %s.\n", ifmap[dev->if_port]);
-
- if (net_debug > 3) {
- i = lp->word[5];
- if (i & 0x2000) /* bit 13 of EEPROM word 5 */
- printk(KERN_DEBUG "%s: Concurrent Processing is "
- "enabled but not used!\n", dev->name);
- }
-
- /* Check the station address for the manufacturer's code */
- if (net_debug>3)
- printEEPROMInfo(dev);
-}
-
-static const struct ethtool_ops eepro_ethtool_ops;
-
-static const struct net_device_ops eepro_netdev_ops = {
- .ndo_open = eepro_open,
- .ndo_stop = eepro_close,
- .ndo_start_xmit = eepro_send_packet,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_tx_timeout = eepro_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* This is the real probe routine. Linux has a history of friendly device
- probes on the ISA bus. A good device probe avoids doing writes, and
- verifies that the correct device exists and functions. */
-
-static int __init eepro_probe1(struct net_device *dev, int autoprobe)
-{
- unsigned short station_addr[3], id, counter;
- int i;
- struct eepro_local *lp;
- int ioaddr = dev->base_addr;
- int err;
-
- /* Grab the region so we can find another board if autoIRQ fails. */
- if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
- if (!autoprobe)
- printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
- ioaddr);
- return -EBUSY;
- }
-
- /* Now, we are going to check for the signature of the
- ID_REG (register 2 of bank 0) */
-
- id = inb(ioaddr + ID_REG);
-
- if ((id & ID_REG_MASK) != ID_REG_SIG)
- goto exit;
-
- /* We seem to have the 82595 signature, let's
- play with its counter (last 2 bits of
- register 2 of bank 0) to be sure. */
-
- counter = id & R_ROBIN_BITS;
-
- if ((inb(ioaddr + ID_REG) & R_ROBIN_BITS) != (counter + 0x40))
- goto exit;
-
- lp = netdev_priv(dev);
- memset(lp, 0, sizeof(struct eepro_local));
- lp->xmt_bar = XMT_BAR_PRO;
- lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_PRO;
- lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_PRO;
- lp->eeprom_reg = EEPROM_REG_PRO;
- spin_lock_init(&lp->lock);
-
- /* Now, get the ethernet hardware address from
- the EEPROM */
- station_addr[0] = read_eeprom(ioaddr, 2, dev);
-
- /* FIXME - find another way to know that we've found
- * an Etherexpress 10
- */
- if (station_addr[0] == 0x0000 || station_addr[0] == 0xffff) {
- lp->eepro = LAN595FX_10ISA;
- lp->eeprom_reg = EEPROM_REG_10;
- lp->xmt_lower_limit_reg = XMT_LOWER_LIMIT_REG_10;
- lp->xmt_upper_limit_reg = XMT_UPPER_LIMIT_REG_10;
- lp->xmt_bar = XMT_BAR_10;
- station_addr[0] = read_eeprom(ioaddr, 2, dev);
- }
-
- /* get all words at once. will be used here and for ethtool */
- for (i = 0; i < 8; i++) {
- lp->word[i] = read_eeprom(ioaddr, i, dev);
- }
- station_addr[1] = lp->word[3];
- station_addr[2] = lp->word[4];
-
- if (!lp->eepro) {
- if (lp->word[7] == ee_FX_INT2IRQ)
- lp->eepro = 2;
- else if (station_addr[2] == SA_ADDR1)
- lp->eepro = 1;
- }
-
- /* Fill in the 'dev' fields. */
- for (i=0; i < 6; i++)
- dev->dev_addr[i] = ((unsigned char *) station_addr)[5-i];
-
- /* RX buffer must be more than 3K and less than 29K */
- if (dev->mem_end < 3072 || dev->mem_end > 29696)
- lp->rcv_ram = RCV_DEFAULT_RAM;
-
- /* calculate {xmt,rcv}_{lower,upper}_limit */
- eepro_recalc(dev);
-
- if (GetBit(lp->word[5], ee_BNC_TPE))
- dev->if_port = BNC;
- else
- dev->if_port = TPE;
-
- if (dev->irq < 2 && lp->eepro != 0) {
- /* Mask off INT number */
- int count = lp->word[1] & 7;
- unsigned irqMask = lp->word[7];
-
- while (count--)
- irqMask &= irqMask - 1;
-
- count = ffs(irqMask);
-
- if (count)
- dev->irq = count - 1;
-
- if (dev->irq < 2) {
- printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
- goto exit;
- } else if (dev->irq == 2) {
- dev->irq = 9;
- }
- }
-
- dev->netdev_ops = &eepro_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->ethtool_ops = &eepro_ethtool_ops;
-
- /* print boot time info */
- eepro_print_info(dev);
-
- /* reset 82595 */
- eepro_reset(ioaddr);
-
- err = register_netdev(dev);
- if (err)
- goto err;
- return 0;
-exit:
- err = -ENODEV;
-err:
- release_region(dev->base_addr, EEPRO_IO_EXTENT);
- return err;
-}
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-
-static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
-static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
-static int eepro_grab_irq(struct net_device *dev)
-{
- static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
- const int *irqp = irqlist;
- int temp_reg, ioaddr = dev->base_addr;
-
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- /* Enable the interrupt line. */
- eepro_en_intline(ioaddr);
-
- /* be CAREFUL, BANK 0 now */
- eepro_sw2bank0(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Let EXEC event to interrupt */
- eepro_en_intexec(ioaddr);
-
- do {
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- temp_reg = inb(ioaddr + INT_NO_REG);
- outb((temp_reg & 0xf8) | irqrmap[*irqp], ioaddr + INT_NO_REG);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- if (request_irq (*irqp, NULL, IRQF_SHARED, "bogus", dev) != EBUSY) {
- unsigned long irq_mask;
- /* Twinkle the interrupt, and check if it's seen */
- irq_mask = probe_irq_on();
-
- eepro_diag(ioaddr); /* RESET the 82595 */
- mdelay(20);
-
- if (*irqp == probe_irq_off(irq_mask)) /* It's a good IRQ line */
- break;
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
- }
- } while (*++irqp);
-
- eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
-
- /* Disable the physical interrupt line. */
- eepro_dis_intline(ioaddr);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- /* Mask all the interrupts. */
- eepro_dis_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- return dev->irq;
-}
-
-static int eepro_open(struct net_device *dev)
-{
- unsigned short temp_reg, old8, old9;
- int irqMask;
- int i, ioaddr = dev->base_addr;
- struct eepro_local *lp = netdev_priv(dev);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: entering eepro_open routine.\n", dev->name);
-
- irqMask = lp->word[7];
-
- if (lp->eepro == LAN595FX_10ISA) {
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 3;\n");
- }
- else if (irqMask == ee_FX_INT2IRQ) /* INT to IRQ Mask */
- {
- lp->eepro = 2; /* Yes, an Intel EtherExpress Pro/10+ */
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 2;\n");
- }
-
- else if ((dev->dev_addr[0] == SA_ADDR0 &&
- dev->dev_addr[1] == SA_ADDR1 &&
- dev->dev_addr[2] == SA_ADDR2))
- {
- lp->eepro = 1;
- if (net_debug > 3) printk(KERN_DEBUG "p->eepro = 1;\n");
- } /* Yes, an Intel EtherExpress Pro/10 */
-
- else lp->eepro = 0; /* No, it is a generic 82585 lan card */
-
- /* Get the interrupt vector for the 82595 */
- if (dev->irq < 2 && eepro_grab_irq(dev) == 0) {
- printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) {
- printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
- return -EAGAIN;
- }
-
- /* Initialize the 82595. */
-
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- temp_reg = inb(ioaddr + lp->eeprom_reg);
-
- lp->stepping = temp_reg >> 5; /* Get the stepping number of the 595 */
-
- if (net_debug > 3)
- printk(KERN_DEBUG "The stepping of the 82595 is %d\n", lp->stepping);
-
- if (temp_reg & 0x10) /* Check the TurnOff Enable bit */
- outb(temp_reg & 0xef, ioaddr + lp->eeprom_reg);
- for (i=0; i < 6; i++)
- outb(dev->dev_addr[i] , ioaddr + I_ADD_REG0 + i);
-
- temp_reg = inb(ioaddr + REG1); /* Setup Transmit Chaining */
- outb(temp_reg | XMT_Chain_Int | XMT_Chain_ErrStop /* and discard bad RCV frames */
- | RCV_Discard_BadFrame, ioaddr + REG1);
-
- temp_reg = inb(ioaddr + REG2); /* Match broadcast */
- outb(temp_reg | 0x14, ioaddr + REG2);
-
- temp_reg = inb(ioaddr + REG3);
- outb(temp_reg & 0x3f, ioaddr + REG3); /* clear test mode */
-
- /* Set the receiving mode */
- eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
-
- /* Set the interrupt vector */
- temp_reg = inb(ioaddr + INT_NO_REG);
- if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
- outb((temp_reg & 0xf8) | irqrmap2[dev->irq], ioaddr + INT_NO_REG);
- else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
-
-
- temp_reg = inb(ioaddr + INT_NO_REG);
- if (lp->eepro == LAN595FX || lp->eepro == LAN595FX_10ISA)
- outb((temp_reg & 0xf0) | irqrmap2[dev->irq] | 0x08,ioaddr+INT_NO_REG);
- else outb((temp_reg & 0xf8) | irqrmap[dev->irq], ioaddr + INT_NO_REG);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "eepro_open: content of INT Reg is %x\n", temp_reg);
-
-
- /* Initialize the RCV and XMT upper and lower limits */
- outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
- outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
- outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
- outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
-
- /* Enable the interrupt line. */
- eepro_en_intline(ioaddr);
-
- /* Switch back to Bank 0 */
- eepro_sw2bank0(ioaddr);
-
- /* Let RX and TX events to interrupt */
- eepro_en_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Initialize RCV */
- outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
- lp->rx_start = lp->rcv_lower_limit;
- outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
-
- /* Initialize XMT */
- outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
- lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
- lp->tx_last = 0;
-
- /* Check for the i82595TX and i82595FX */
- old8 = inb(ioaddr + 8);
- outb(~old8, ioaddr + 8);
-
- if ((temp_reg = inb(ioaddr + 8)) == old8) {
- if (net_debug > 3)
- printk(KERN_DEBUG "i82595 detected!\n");
- lp->version = LAN595;
- }
- else {
- lp->version = LAN595TX;
- outb(old8, ioaddr + 8);
- old9 = inb(ioaddr + 9);
-
- if (irqMask==ee_FX_INT2IRQ) {
- if (net_debug > 3) {
- printk(KERN_DEBUG "IrqMask: %#x\n",irqMask);
- printk(KERN_DEBUG "i82595FX detected!\n");
- }
- lp->version = LAN595FX;
- outb(old9, ioaddr + 9);
- if (dev->if_port != TPE) { /* Hopefully, this will fix the
- problem of using Pentiums and
- pro/10 w/ BNC. */
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- temp_reg = inb(ioaddr + REG13);
- /* disable the full duplex mode since it is not
- applicable with the 10Base2 cable. */
- outb(temp_reg & ~(FDX | A_N_ENABLE), REG13);
- eepro_sw2bank0(ioaddr); /* be CAREFUL, BANK 0 now */
- }
- }
- else if (net_debug > 3) {
- printk(KERN_DEBUG "temp_reg: %#x ~old9: %#x\n",temp_reg,((~old9)&0xff));
- printk(KERN_DEBUG "i82595TX detected!\n");
- }
- }
-
- eepro_sel_reset(ioaddr);
-
- netif_start_queue(dev);
-
- if (net_debug > 3)
- printk(KERN_DEBUG "%s: exiting eepro_open routine.\n", dev->name);
-
- /* enabling rx */
- eepro_en_rx(ioaddr);
-
- return 0;
-}
-
-static void eepro_tx_timeout (struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* if (net_debug > 1) */
- printk (KERN_ERR "%s: transmit timed out, %s?\n", dev->name,
- "network cable problem");
- /* This is not a duplicate. One message for the console,
- one for the log file */
- printk (KERN_DEBUG "%s: transmit timed out, %s?\n", dev->name,
- "network cable problem");
- eepro_complete_selreset(ioaddr);
-}
-
-
-static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- unsigned long flags;
- int ioaddr = dev->base_addr;
- short length = skb->len;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- netif_stop_queue (dev);
-
- eepro_dis_int(ioaddr);
- spin_lock_irqsave(&lp->lock, flags);
-
- {
- unsigned char *buf = skb->data;
-
- if (hardware_send_packet(dev, buf, length))
- /* we won't wake queue here because we're out of space */
- dev->stats.tx_dropped++;
- else {
- dev->stats.tx_bytes+=skb->len;
- netif_wake_queue(dev);
- }
-
- }
-
- dev_kfree_skb (skb);
-
- /* You might need to clean up and record Tx statistics here. */
- /* dev->stats.tx_aborted_errors++; */
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name);
-
- eepro_en_int(ioaddr);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- return NETDEV_TX_OK;
-}
-
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-
-static irqreturn_t
-eepro_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct eepro_local *lp;
- int ioaddr, status, boguscount = 20;
- int handled = 0;
-
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_interrupt routine.\n", dev->name);
-
- ioaddr = dev->base_addr;
-
- while (((status = inb(ioaddr + STATUS_REG)) & (RX_INT|TX_INT)) && (boguscount--))
- {
- handled = 1;
- if (status & RX_INT) {
- if (net_debug > 4)
- printk(KERN_DEBUG "%s: packet received interrupt.\n", dev->name);
-
- eepro_dis_int(ioaddr);
-
- /* Get the received packets */
- eepro_ack_rx(ioaddr);
- eepro_rx(dev);
-
- eepro_en_int(ioaddr);
- }
- if (status & TX_INT) {
- if (net_debug > 4)
- printk(KERN_DEBUG "%s: packet transmit interrupt.\n", dev->name);
-
-
- eepro_dis_int(ioaddr);
-
- /* Process the status of transmitted packets */
- eepro_ack_tx(ioaddr);
- eepro_transmit_interrupt(dev);
-
- eepro_en_int(ioaddr);
- }
- }
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_interrupt routine.\n", dev->name);
-
- spin_unlock(&lp->lock);
- return IRQ_RETVAL(handled);
-}
-
-static int eepro_close(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short temp_reg;
-
- netif_stop_queue(dev);
-
- eepro_sw2bank1(ioaddr); /* Switch back to Bank 1 */
-
- /* Disable the physical interrupt line. */
- temp_reg = inb(ioaddr + REG1);
- outb(temp_reg & 0x7f, ioaddr + REG1);
-
- eepro_sw2bank0(ioaddr); /* Switch back to Bank 0 */
-
- /* Flush the Tx and disable Rx. */
- outb(STOP_RCV_CMD, ioaddr);
- lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
- lp->tx_last = 0;
-
- /* Mask all the interrupts. */
- eepro_dis_int(ioaddr);
-
- /* clear all interrupts */
- eepro_clear_int(ioaddr);
-
- /* Reset the 82595 */
- eepro_reset(ioaddr);
-
- /* release the interrupt */
- free_irq(dev->irq, dev);
-
- /* Update the statistics here. What statistics? */
-
- return 0;
-}
-
-/* Set or clear the multicast filter for this adaptor.
- */
-static void
-set_multicast_list(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- unsigned short mode;
- struct netdev_hw_addr *ha;
- int mc_count = netdev_mc_count(dev);
-
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
- {
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode | PRMSC_Mode, ioaddr + REG2);
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- }
-
- else if (mc_count == 0)
- {
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode & 0xd6, ioaddr + REG2); /* Turn off Multi-IA and PRMSC_Mode bits */
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- }
-
- else
- {
- unsigned short status, *eaddrs;
- int i, boguscount = 0;
-
- /* Disable RX and TX interrupts. Necessary to avoid
- corruption of the HOST_ADDRESS_REG by interrupt
- service routines. */
- eepro_dis_int(ioaddr);
-
- eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
- mode = inb(ioaddr + REG2);
- outb(mode | Multi_IA, ioaddr + REG2);
- mode = inb(ioaddr + REG3);
- outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
- eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
- outw(lp->tx_end, ioaddr + HOST_ADDRESS_REG);
- outw(MC_SETUP, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(6 * (mc_count + 1), ioaddr + IO_PORT);
-
- netdev_for_each_mc_addr(ha, dev) {
- eaddrs = (unsigned short *) ha->addr;
- outw(*eaddrs++, ioaddr + IO_PORT);
- outw(*eaddrs++, ioaddr + IO_PORT);
- outw(*eaddrs++, ioaddr + IO_PORT);
- }
-
- eaddrs = (unsigned short *) dev->dev_addr;
- outw(eaddrs[0], ioaddr + IO_PORT);
- outw(eaddrs[1], ioaddr + IO_PORT);
- outw(eaddrs[2], ioaddr + IO_PORT);
- outw(lp->tx_end, ioaddr + lp->xmt_bar);
- outb(MC_SETUP, ioaddr);
-
- /* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
-
- if (lp->tx_start != lp->tx_end)
- {
- /* update the next address and the chain bit in the
- last packet */
- outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
- outw(i, ioaddr + IO_PORT);
- outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
- status = inw(ioaddr + IO_PORT);
- outw(status | CHAIN_BIT, ioaddr + IO_PORT);
- lp->tx_end = i ;
- }
- else {
- lp->tx_start = lp->tx_end = i ;
- }
-
- /* Acknowledge that the MC setup is done */
- do { /* We should be doing this in the eepro_interrupt()! */
- SLOW_DOWN;
- SLOW_DOWN;
- if (inb(ioaddr + STATUS_REG) & 0x08)
- {
- i = inb(ioaddr);
- outb(0x08, ioaddr + STATUS_REG);
-
- if (i & 0x20) { /* command ABORTed */
- printk(KERN_NOTICE "%s: multicast setup failed.\n",
- dev->name);
- break;
- } else if ((i & 0x0f) == 0x03) { /* MC-Done */
- printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, mc_count,
- mc_count > 1 ? "es":"");
- break;
- }
- }
- } while (++boguscount < 100);
-
- /* Re-enable RX and TX interrupts */
- eepro_en_int(ioaddr);
- }
- if (lp->eepro == LAN595FX_10ISA) {
- eepro_complete_selreset(ioaddr);
- }
- else
- eepro_en_rx(ioaddr);
-}
-
-/* The horrible routine to read a word from the serial EEPROM. */
-/* IMPORTANT - the 82595 will be set to Bank 0 after the eeprom is read */
-
-/* The delay between EEPROM clock transitions. */
-#define eeprom_delay() { udelay(40); }
-#define EE_READ_CMD (6 << 6)
-
-static int
-read_eeprom(int ioaddr, int location, struct net_device *dev)
-{
- int i;
- unsigned short retval = 0;
- struct eepro_local *lp = netdev_priv(dev);
- short ee_addr = ioaddr + lp->eeprom_reg;
- int read_cmd = location | EE_READ_CMD;
- short ctrl_val = EECS ;
-
- /* XXXX - black magic */
- eepro_sw2bank1(ioaddr);
- outb(0x00, ioaddr + STATUS_REG);
- /* XXXX - black magic */
-
- eepro_sw2bank2(ioaddr);
- outb(ctrl_val, ee_addr);
-
- /* Shift the read command bits out. */
- for (i = 8; i >= 0; i--) {
- short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI
- : ctrl_val;
- outb(outval, ee_addr);
- outb(outval | EESK, ee_addr); /* EEPROM clock tick. */
- eeprom_delay();
- outb(outval, ee_addr); /* Finish EEPROM a clock tick. */
- eeprom_delay();
- }
- outb(ctrl_val, ee_addr);
-
- for (i = 16; i > 0; i--) {
- outb(ctrl_val | EESK, ee_addr); eeprom_delay();
- retval = (retval << 1) | ((inb(ee_addr) & EEDO) ? 1 : 0);
- outb(ctrl_val, ee_addr); eeprom_delay();
- }
-
- /* Terminate the EEPROM access. */
- ctrl_val &= ~EECS;
- outb(ctrl_val | EESK, ee_addr);
- eeprom_delay();
- outb(ctrl_val, ee_addr);
- eeprom_delay();
- eepro_sw2bank0(ioaddr);
- return retval;
-}
-
-static int
-hardware_send_packet(struct net_device *dev, void *buf, short length)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- unsigned status, tx_available, last, end;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering hardware_send_packet routine.\n", dev->name);
-
- /* determine how much of the transmit buffer space is available */
- if (lp->tx_end > lp->tx_start)
- tx_available = lp->xmt_ram - (lp->tx_end - lp->tx_start);
- else if (lp->tx_end < lp->tx_start)
- tx_available = lp->tx_start - lp->tx_end;
- else tx_available = lp->xmt_ram;
-
- if (((((length + 3) >> 1) << 1) + 2*XMT_HEADER) >= tx_available) {
- /* No space available ??? */
- return 1;
- }
-
- last = lp->tx_end;
- end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
-
- if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
- if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
- /* Arrrr!!!, must keep the xmt header together,
- several days were lost to chase this one down. */
- last = lp->xmt_lower_limit;
- end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
- }
- else end = lp->xmt_lower_limit + (end -
- lp->xmt_upper_limit + 2);
- }
-
- outw(last, ioaddr + HOST_ADDRESS_REG);
- outw(XMT_CMD, ioaddr + IO_PORT);
- outw(0, ioaddr + IO_PORT);
- outw(end, ioaddr + IO_PORT);
- outw(length, ioaddr + IO_PORT);
-
- if (lp->version == LAN595)
- outsw(ioaddr + IO_PORT, buf, (length + 3) >> 1);
- else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
- unsigned short temp = inb(ioaddr + INT_MASK_REG);
- outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
- outsl(ioaddr + IO_PORT_32_BIT, buf, (length + 3) >> 2);
- outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
- }
-
- /* A dummy read to flush the DRAM write pipeline */
- status = inw(ioaddr + IO_PORT);
-
- if (lp->tx_start == lp->tx_end) {
- outw(last, ioaddr + lp->xmt_bar);
- outb(XMT_CMD, ioaddr);
- lp->tx_start = last; /* I don't like to change tx_start here */
- }
- else {
- /* update the next address and the chain bit in the
- last packet */
-
- if (lp->tx_end != last) {
- outw(lp->tx_last + XMT_CHAIN, ioaddr + HOST_ADDRESS_REG);
- outw(last, ioaddr + IO_PORT);
- }
-
- outw(lp->tx_last + XMT_COUNT, ioaddr + HOST_ADDRESS_REG);
- status = inw(ioaddr + IO_PORT);
- outw(status | CHAIN_BIT, ioaddr + IO_PORT);
-
- /* Continue the transmit command */
- outb(RESUME_XMT_CMD, ioaddr);
- }
-
- lp->tx_last = last;
- lp->tx_end = end;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting hardware_send_packet routine.\n", dev->name);
-
- return 0;
-}
-
-static void
-eepro_rx(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- short boguscount = 20;
- short rcv_car = lp->rx_start;
- unsigned rcv_event, rcv_status, rcv_next_frame, rcv_size;
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: entering eepro_rx routine.\n", dev->name);
-
- /* Set the read pointer to the start of the RCV */
- outw(rcv_car, ioaddr + HOST_ADDRESS_REG);
-
- rcv_event = inw(ioaddr + IO_PORT);
-
- while (rcv_event == RCV_DONE) {
-
- rcv_status = inw(ioaddr + IO_PORT);
- rcv_next_frame = inw(ioaddr + IO_PORT);
- rcv_size = inw(ioaddr + IO_PORT);
-
- if ((rcv_status & (RX_OK | RX_ERROR)) == RX_OK) {
-
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- dev->stats.rx_bytes+=rcv_size;
- rcv_size &= 0x3fff;
- skb = netdev_alloc_skb(dev, rcv_size + 5);
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
- lp->rx_start = rcv_next_frame;
- outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
-
- break;
- }
- skb_reserve(skb,2);
-
- if (lp->version == LAN595)
- insw(ioaddr+IO_PORT, skb_put(skb,rcv_size), (rcv_size + 3) >> 1);
- else { /* LAN595TX or LAN595FX, capable of 32-bit I/O processing */
- unsigned short temp = inb(ioaddr + INT_MASK_REG);
- outb(temp | IO_32_BIT, ioaddr + INT_MASK_REG);
- insl(ioaddr+IO_PORT_32_BIT, skb_put(skb,rcv_size),
- (rcv_size + 3) >> 2);
- outb(temp & ~(IO_32_BIT), ioaddr + INT_MASK_REG);
- }
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- }
-
- else { /* Not sure will ever reach here,
- I set the 595 to discard bad received frames */
- dev->stats.rx_errors++;
-
- if (rcv_status & 0x0100)
- dev->stats.rx_over_errors++;
-
- else if (rcv_status & 0x0400)
- dev->stats.rx_frame_errors++;
-
- else if (rcv_status & 0x0800)
- dev->stats.rx_crc_errors++;
-
- printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
- dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
- }
-
- if (rcv_status & 0x1000)
- dev->stats.rx_length_errors++;
-
- rcv_car = lp->rx_start + RCV_HEADER + rcv_size;
- lp->rx_start = rcv_next_frame;
-
- if (--boguscount == 0)
- break;
-
- outw(rcv_next_frame, ioaddr + HOST_ADDRESS_REG);
- rcv_event = inw(ioaddr + IO_PORT);
-
- }
- if (rcv_car == 0)
- rcv_car = lp->rcv_upper_limit | 0xff;
-
- outw(rcv_car - 1, ioaddr + RCV_STOP);
-
- if (net_debug > 5)
- printk(KERN_DEBUG "%s: exiting eepro_rx routine.\n", dev->name);
-}
-
-static void
-eepro_transmit_interrupt(struct net_device *dev)
-{
- struct eepro_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- short boguscount = 25;
- short xmt_status;
-
- while ((lp->tx_start != lp->tx_end) && boguscount--) {
-
- outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
- xmt_status = inw(ioaddr+IO_PORT);
-
- if (!(xmt_status & TX_DONE_BIT))
- break;
-
- xmt_status = inw(ioaddr+IO_PORT);
- lp->tx_start = inw(ioaddr+IO_PORT);
-
- netif_wake_queue (dev);
-
- if (xmt_status & TX_OK)
- dev->stats.tx_packets++;
- else {
- dev->stats.tx_errors++;
- if (xmt_status & 0x0400) {
- dev->stats.tx_carrier_errors++;
- printk(KERN_DEBUG "%s: carrier error\n",
- dev->name);
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- }
- else {
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- printk(KERN_DEBUG "%s: XMT status = %#x\n",
- dev->name, xmt_status);
- }
- }
- if (xmt_status & 0x000f) {
- dev->stats.collisions += (xmt_status & 0x000f);
- }
-
- if ((xmt_status & 0x0040) == 0x0) {
- dev->stats.tx_heartbeat_errors++;
- }
- }
-}
-
-static int eepro_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct eepro_local *lp = netdev_priv(dev);
-
- cmd->supported = SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_Autoneg;
- cmd->advertising = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_Autoneg;
-
- if (GetBit(lp->word[5], ee_PortTPE)) {
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
- }
- if (GetBit(lp->word[5], ee_PortBNC)) {
- cmd->supported |= SUPPORTED_BNC;
- cmd->advertising |= ADVERTISED_BNC;
- }
- if (GetBit(lp->word[5], ee_PortAUI)) {
- cmd->supported |= SUPPORTED_AUI;
- cmd->advertising |= ADVERTISED_AUI;
- }
-
- ethtool_cmd_speed_set(cmd, SPEED_10);
-
- if (dev->if_port == TPE && lp->word[1] & ee_Duplex) {
- cmd->duplex = DUPLEX_FULL;
- }
- else {
- cmd->duplex = DUPLEX_HALF;
- }
-
- cmd->port = dev->if_port;
- cmd->phy_address = dev->base_addr;
- cmd->transceiver = XCVR_INTERNAL;
-
- if (lp->word[0] & ee_AutoNeg) {
- cmd->autoneg = 1;
- }
-
- return 0;
-}
-
-static void eepro_ethtool_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *drvinfo)
-{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
- "ISA 0x%lx", dev->base_addr);
-}
-
-static const struct ethtool_ops eepro_ethtool_ops = {
- .get_settings = eepro_ethtool_get_settings,
- .get_drvinfo = eepro_ethtool_get_drvinfo,
-};
-
-#ifdef MODULE
-
-#define MAX_EEPRO 8
-static struct net_device *dev_eepro[MAX_EEPRO];
-
-static int io[MAX_EEPRO] = {
- [0 ... MAX_EEPRO-1] = -1
-};
-static int irq[MAX_EEPRO];
-static int mem[MAX_EEPRO] = { /* Size of the rx buffer in KB */
- [0 ... MAX_EEPRO-1] = RCV_DEFAULT_RAM/1024
-};
-static int autodetect;
-
-static int n_eepro;
-/* For linux 2.1.xx */
-
-MODULE_AUTHOR("Pascal Dupuis and others");
-MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
-MODULE_LICENSE("GPL");
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(mem, int, NULL, 0);
-module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
-MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
-MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
-
-int __init init_module(void)
-{
- struct net_device *dev;
- int i;
- if (io[0] == -1 && autodetect == 0) {
- printk(KERN_WARNING "eepro_init_module: Probe is very dangerous in ISA boards!\n");
- printk(KERN_WARNING "eepro_init_module: Please add \"autodetect=1\" to force probe\n");
- return -ENODEV;
- }
- else if (autodetect) {
- /* if autodetect is set then we must force detection */
- for (i = 0; i < MAX_EEPRO; i++) {
- io[i] = 0;
- }
-
- printk(KERN_INFO "eepro_init_module: Auto-detecting boards (May God protect us...)\n");
- }
-
- for (i = 0; i < MAX_EEPRO && io[i] != -1; i++) {
- dev = alloc_etherdev(sizeof(struct eepro_local));
- if (!dev)
- break;
-
- dev->mem_end = mem[i];
- dev->base_addr = io[i];
- dev->irq = irq[i];
-
- if (do_eepro_probe(dev) == 0) {
- dev_eepro[n_eepro++] = dev;
- continue;
- }
- free_netdev(dev);
- break;
- }
-
- if (n_eepro)
- printk(KERN_INFO "%s", version);
-
- return n_eepro ? 0 : -ENODEV;
-}
-
-void __exit
-cleanup_module(void)
-{
- int i;
-
- for (i=0; i<n_eepro; i++) {
- struct net_device *dev = dev_eepro[i];
- unregister_netdev(dev);
- release_region(dev->base_addr, EEPRO_IO_EXTENT);
- free_netdev(dev);
- }
-}
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/i825xx/eexpress.c b/drivers/net/ethernet/i825xx/eexpress.c
deleted file mode 100644
index 7a6a2f04c5b..00000000000
--- a/drivers/net/ethernet/i825xx/eexpress.c
+++ /dev/null
@@ -1,1661 +0,0 @@
-/* Intel EtherExpress 16 device driver for Linux
- *
- * Written by John Sullivan, 1995
- * based on original code by Donald Becker, with changes by
- * Alan Cox and Pauline Middelink.
- *
- * Support for 8-bit mode by Zoltan Szilagyi <zoltans@cs.arizona.edu>
- *
- * Many modifications, and currently maintained, by
- * Philip Blundell <philb@gnu.org>
- * Added the Compaq LTE Alan Cox <alan@lxorguk.ukuu.org.uk>
- * Added MCA support Adam Fritzler (now deleted)
- *
- * Note - this driver is experimental still - it has problems on faster
- * machines. Someone needs to sit down and go through it line by line with
- * a databook...
- */
-
-/* The EtherExpress 16 is a fairly simple card, based on a shared-memory
- * design using the i82586 Ethernet coprocessor. It bears no relationship,
- * as far as I know, to the similarly-named "EtherExpress Pro" range.
- *
- * Historically, Linux support for these cards has been very bad. However,
- * things seem to be getting better slowly.
- */
-
-/* If your card is confused about what sort of interface it has (eg it
- * persistently reports "10baseT" when none is fitted), running 'SOFTSET /BART'
- * or 'SOFTSET /LISA' from DOS seems to help.
- */
-
-/* Here's the scoop on memory mapping.
- *
- * There are three ways to access EtherExpress card memory: either using the
- * shared-memory mapping, or using PIO through the dataport, or using PIO
- * through the "shadow memory" ports.
- *
- * The shadow memory system works by having the card map some of its memory
- * as follows:
- *
- * (the low five bits of the SMPTR are ignored)
- *
- * base+0x4000..400f memory at SMPTR+0..15
- * base+0x8000..800f memory at SMPTR+16..31
- * base+0xc000..c007 dubious stuff (memory at SMPTR+16..23 apparently)
- * base+0xc008..c00f memory at 0x0008..0x000f
- *
- * This last set (the one at c008) is particularly handy because the SCB
- * lives at 0x0008. So that set of ports gives us easy random access to data
- * in the SCB without having to mess around setting up pointers and the like.
- * We always use this method to access the SCB (via the scb_xx() functions).
- *
- * Dataport access works by aiming the appropriate (read or write) pointer
- * at the first address you're interested in, and then reading or writing from
- * the dataport. The pointers auto-increment after each transfer. We use
- * this for data transfer.
- *
- * We don't use the shared-memory system because it allegedly doesn't work on
- * all cards, and because it's a bit more prone to go wrong (it's one more
- * thing to configure...).
- */
-
-/* Known bugs:
- *
- * - The card seems to want to give us two interrupts every time something
- * happens, where just one would be better.
- */
-
-/*
- *
- * Note by Zoltan Szilagyi 10-12-96:
- *
- * I've succeeded in eliminating the "CU wedged" messages, and hence the
- * lockups, which were only occurring with cards running in 8-bit mode ("force
- * 8-bit operation" in Intel's SoftSet utility). This version of the driver
- * sets the 82586 and the ASIC to 8-bit mode at startup; it also stops the
- * CU before submitting a packet for transmission, and then restarts it as soon
- * as the process of handing the packet is complete. This is definitely an
- * unnecessary slowdown if the card is running in 16-bit mode; therefore one
- * should detect 16-bit vs 8-bit mode from the EEPROM settings and act
- * accordingly. In 8-bit mode with this bugfix I'm getting about 150 K/s for
- * ftp's, which is significantly better than I get in DOS, so the overhead of
- * stopping and restarting the CU with each transmit is not prohibitive in
- * practice.
- *
- * Update by David Woodhouse 11/5/99:
- *
- * I've seen "CU wedged" messages in 16-bit mode, on the Alpha architecture.
- * I assume that this is because 16-bit accesses are actually handled as two
- * 8-bit accesses.
- */
-
-#ifdef __alpha__
-#define LOCKUP16 1
-#endif
-#ifndef LOCKUP16
-#define LOCKUP16 0
-#endif
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/in.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#ifndef NET_DEBUG
-#define NET_DEBUG 4
-#endif
-
-#include "eexpress.h"
-
-#define EEXP_IO_EXTENT 16
-
-/*
- * Private data declarations
- */
-
-struct net_local
-{
- unsigned long last_tx; /* jiffies when last transmit started */
- unsigned long init_time; /* jiffies when eexp_hw_init586 called */
- unsigned short rx_first; /* first rx buf, same as RX_BUF_START */
- unsigned short rx_last; /* last rx buf */
- unsigned short rx_ptr; /* first rx buf to look at */
- unsigned short tx_head; /* next free tx buf */
- unsigned short tx_reap; /* first in-use tx buf */
- unsigned short tx_tail; /* previous tx buf to tx_head */
- unsigned short tx_link; /* last known-executing tx buf */
- unsigned short last_tx_restart; /* set to tx_link when we
- restart the CU */
- unsigned char started;
- unsigned short rx_buf_start;
- unsigned short rx_buf_end;
- unsigned short num_tx_bufs;
- unsigned short num_rx_bufs;
- unsigned char width; /* 0 for 16bit, 1 for 8bit */
- unsigned char was_promisc;
- unsigned char old_mc_count;
- spinlock_t lock;
-};
-
-/* This is the code and data that is downloaded to the EtherExpress card's
- * memory at boot time.
- */
-
-static unsigned short start_code[] = {
-/* 0x0000 */
- 0x0001, /* ISCP: busy - cleared after reset */
- 0x0008,0x0000,0x0000, /* offset,address (lo,hi) of SCB */
-
- 0x0000,0x0000, /* SCB: status, commands */
- 0x0000,0x0000, /* links to first command block,
- first receive descriptor */
- 0x0000,0x0000, /* CRC error, alignment error counts */
- 0x0000,0x0000, /* out of resources, overrun error counts */
-
- 0x0000,0x0000, /* pad */
- 0x0000,0x0000,
-
-/* 0x20 -- start of 82586 CU program */
-#define CONF_LINK 0x20
- 0x0000,Cmd_Config,
- 0x0032, /* link to next command */
- 0x080c, /* 12 bytes follow : fifo threshold=8 */
- 0x2e40, /* don't rx bad frames
- * SRDY/ARDY => ext. sync. : preamble len=8
- * take addresses from data buffers
- * 6 bytes/address
- */
- 0x6000, /* default backoff method & priority
- * interframe spacing = 0x60 */
- 0xf200, /* slot time=0x200
- * max collision retry = 0xf */
-#define CONF_PROMISC 0x2e
- 0x0000, /* no HDLC : normal CRC : enable broadcast
- * disable promiscuous/multicast modes */
- 0x003c, /* minimum frame length = 60 octets) */
-
- 0x0000,Cmd_SetAddr,
- 0x003e, /* link to next command */
-#define CONF_HWADDR 0x38
- 0x0000,0x0000,0x0000, /* hardware address placed here */
-
- 0x0000,Cmd_MCast,
- 0x0076, /* link to next command */
-#define CONF_NR_MULTICAST 0x44
- 0x0000, /* number of bytes in multicast address(es) */
-#define CONF_MULTICAST 0x46
- 0x0000, 0x0000, 0x0000, /* some addresses */
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
- 0x0000, 0x0000, 0x0000,
-
-#define CONF_DIAG_RESULT 0x76
- 0x0000, Cmd_Diag,
- 0x007c, /* link to next command */
-
- 0x0000,Cmd_TDR|Cmd_INT,
- 0x0084,
-#define CONF_TDR_RESULT 0x82
- 0x0000,
-
- 0x0000,Cmd_END|Cmd_Nop, /* end of configure sequence */
- 0x0084 /* dummy link */
-};
-
-/* maps irq number to EtherExpress magic value */
-static char irqrmap[] = { 0,0,1,2,3,4,0,0,0,1,5,6,0,0,0,0 };
-
-/*
- * Prototypes for Linux interface
- */
-
-static int eexp_open(struct net_device *dev);
-static int eexp_close(struct net_device *dev);
-static void eexp_timeout(struct net_device *dev);
-static netdev_tx_t eexp_xmit(struct sk_buff *buf,
- struct net_device *dev);
-
-static irqreturn_t eexp_irq(int irq, void *dev_addr);
-static void eexp_set_multicast(struct net_device *dev);
-
-/*
- * Prototypes for hardware access functions
- */
-
-static void eexp_hw_rx_pio(struct net_device *dev);
-static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
- unsigned short len);
-static int eexp_hw_probe(struct net_device *dev,unsigned short ioaddr);
-static unsigned short eexp_hw_readeeprom(unsigned short ioaddr,
- unsigned char location);
-
-static unsigned short eexp_hw_lasttxstat(struct net_device *dev);
-static void eexp_hw_txrestart(struct net_device *dev);
-
-static void eexp_hw_txinit (struct net_device *dev);
-static void eexp_hw_rxinit (struct net_device *dev);
-
-static void eexp_hw_init586 (struct net_device *dev);
-static void eexp_setup_filter (struct net_device *dev);
-
-static char *eexp_ifmap[]={"AUI", "BNC", "RJ45"};
-enum eexp_iftype {AUI=0, BNC=1, TPE=2};
-
-#define STARTED_RU 2
-#define STARTED_CU 1
-
-/*
- * Primitive hardware access functions.
- */
-
-static inline unsigned short scb_status(struct net_device *dev)
-{
- return inw(dev->base_addr + 0xc008);
-}
-
-static inline unsigned short scb_rdcmd(struct net_device *dev)
-{
- return inw(dev->base_addr + 0xc00a);
-}
-
-static inline void scb_command(struct net_device *dev, unsigned short cmd)
-{
- outw(cmd, dev->base_addr + 0xc00a);
-}
-
-static inline void scb_wrcbl(struct net_device *dev, unsigned short val)
-{
- outw(val, dev->base_addr + 0xc00c);
-}
-
-static inline void scb_wrrfa(struct net_device *dev, unsigned short val)
-{
- outw(val, dev->base_addr + 0xc00e);
-}
-
-static inline void set_loopback(struct net_device *dev)
-{
- outb(inb(dev->base_addr + Config) | 2, dev->base_addr + Config);
-}
-
-static inline void clear_loopback(struct net_device *dev)
-{
- outb(inb(dev->base_addr + Config) & ~2, dev->base_addr + Config);
-}
-
-static inline unsigned short int SHADOW(short int addr)
-{
- addr &= 0x1f;
- if (addr > 0xf) addr += 0x3ff0;
- return addr + 0x4000;
-}
-
-/*
- * Linux interface
- */
-
-/*
- * checks for presence of EtherExpress card
- */
-
-static int __init do_express_probe(struct net_device *dev)
-{
- unsigned short *port;
- static unsigned short ports[] = { 0x240,0x300,0x310,0x270,0x320,0x340,0 };
- unsigned short ioaddr = dev->base_addr;
- int dev_irq = dev->irq;
- int err;
-
- dev->if_port = 0xff; /* not set */
-
- if (ioaddr&0xfe00) {
- if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress"))
- return -EBUSY;
- err = eexp_hw_probe(dev,ioaddr);
- release_region(ioaddr, EEXP_IO_EXTENT);
- return err;
- } else if (ioaddr)
- return -ENXIO;
-
- for (port=&ports[0] ; *port ; port++ )
- {
- unsigned short sum = 0;
- int i;
- if (!request_region(*port, EEXP_IO_EXTENT, "EtherExpress"))
- continue;
- for ( i=0 ; i<4 ; i++ )
- {
- unsigned short t;
- t = inb(*port + ID_PORT);
- sum |= (t>>4) << ((t & 0x03)<<2);
- }
- if (sum==0xbaba && !eexp_hw_probe(dev,*port)) {
- release_region(*port, EEXP_IO_EXTENT);
- return 0;
- }
- release_region(*port, EEXP_IO_EXTENT);
- dev->irq = dev_irq;
- }
- return -ENODEV;
-}
-
-#ifndef MODULE
-struct net_device * __init express_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_express_probe(dev);
- if (!err)
- return dev;
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-/*
- * open and initialize the adapter, ready for use
- */
-
-static int eexp_open(struct net_device *dev)
-{
- int ret;
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_open()\n", dev->name);
-#endif
-
- if (!dev->irq || !irqrmap[dev->irq])
- return -ENXIO;
-
- ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev);
- if (ret)
- return ret;
-
- if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr);
- goto err_out1;
- }
- if (!request_region(ioaddr+0x4000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0x4000);
- goto err_out2;
- }
- if (!request_region(ioaddr+0x8000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0x8000);
- goto err_out3;
- }
- if (!request_region(ioaddr+0xc000, EEXP_IO_EXTENT, "EtherExpress shadow")) {
- printk(KERN_WARNING "EtherExpress io port %x, is busy.\n"
- , ioaddr+0xc000);
- goto err_out4;
- }
-
- if (lp->width) {
- printk("%s: forcing ASIC to 8-bit mode\n", dev->name);
- outb(inb(dev->base_addr+Config)&~4, dev->base_addr+Config);
- }
-
- eexp_hw_init586(dev);
- netif_start_queue(dev);
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: leaving eexp_open()\n", dev->name);
-#endif
- return 0;
-
- err_out4:
- release_region(ioaddr+0x8000, EEXP_IO_EXTENT);
- err_out3:
- release_region(ioaddr+0x4000, EEXP_IO_EXTENT);
- err_out2:
- release_region(ioaddr, EEXP_IO_EXTENT);
- err_out1:
- free_irq(dev->irq, dev);
- return -EBUSY;
-}
-
-/*
- * close and disable the interface, leaving the 586 in reset.
- */
-
-static int eexp_close(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
-
- int irq = dev->irq;
-
- netif_stop_queue(dev);
-
- outb(SIRQ_dis|irqrmap[irq],ioaddr+SET_IRQ);
- lp->started = 0;
- scb_command(dev, SCB_CUsuspend|SCB_RUsuspend);
- outb(0,ioaddr+SIGNAL_CA);
- free_irq(irq,dev);
- outb(i586_RST,ioaddr+EEPROM_Ctrl);
- release_region(ioaddr, EEXP_IO_EXTENT);
- release_region(ioaddr+0x4000, 16);
- release_region(ioaddr+0x8000, 16);
- release_region(ioaddr+0xc000, 16);
-
- return 0;
-}
-
-/*
- * This gets called when a higher level thinks we are broken. Check that
- * nothing has become jammed in the CU.
- */
-
-static void unstick_cu(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- if (lp->started)
- {
- if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
- {
- if (lp->tx_link==lp->last_tx_restart)
- {
- unsigned short boguscount=200,rsst;
- printk(KERN_WARNING "%s: Retransmit timed out, status %04x, resetting...\n",
- dev->name, scb_status(dev));
- eexp_hw_txinit(dev);
- lp->last_tx_restart = 0;
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- while (!SCB_complete(rsst=scb_status(dev)))
- {
- if (!--boguscount)
- {
- boguscount=200;
- printk(KERN_WARNING "%s: Reset timed out status %04x, retrying...\n",
- dev->name,rsst);
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- }
- }
- netif_wake_queue(dev);
- }
- else
- {
- unsigned short status = scb_status(dev);
- if (SCB_CUdead(status))
- {
- unsigned short txstatus = eexp_hw_lasttxstat(dev);
- printk(KERN_WARNING "%s: Transmit timed out, CU not active status %04x %04x, restarting...\n",
- dev->name, status, txstatus);
- eexp_hw_txrestart(dev);
- }
- else
- {
- unsigned short txstatus = eexp_hw_lasttxstat(dev);
- if (netif_queue_stopped(dev) && !txstatus)
- {
- printk(KERN_WARNING "%s: CU wedged, status %04x %04x, resetting...\n",
- dev->name,status,txstatus);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- }
- else
- {
- printk(KERN_WARNING "%s: transmit timed out\n", dev->name);
- }
- }
- }
- }
- }
- else
- {
- if (time_after(jiffies, lp->init_time + 10))
- {
- unsigned short status = scb_status(dev);
- printk(KERN_WARNING "%s: i82586 startup timed out, status %04x, resetting...\n",
- dev->name, status);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- }
- }
-}
-
-static void eexp_timeout(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-#ifdef CONFIG_SMP
- unsigned long flags;
-#endif
- int status;
-
- disable_irq(dev->irq);
-
- /*
- * Best would be to use synchronize_irq(); spin_lock() here
- * lets make it work first..
- */
-
-#ifdef CONFIG_SMP
- spin_lock_irqsave(&lp->lock, flags);
-#endif
-
- status = scb_status(dev);
- unstick_cu(dev);
- printk(KERN_INFO "%s: transmit timed out, %s?\n", dev->name,
- (SCB_complete(status)?"lost interrupt":
- "board on fire"));
- dev->stats.tx_errors++;
- lp->last_tx = jiffies;
- if (!SCB_complete(status)) {
- scb_command(dev, SCB_CUabort);
- outb(0,dev->base_addr+SIGNAL_CA);
- }
- netif_wake_queue(dev);
-#ifdef CONFIG_SMP
- spin_unlock_irqrestore(&lp->lock, flags);
-#endif
-}
-
-/*
- * Called to transmit a packet, or to allow us to right ourselves
- * if the kernel thinks we've died.
- */
-static netdev_tx_t eexp_xmit(struct sk_buff *buf, struct net_device *dev)
-{
- short length = buf->len;
-#ifdef CONFIG_SMP
- struct net_local *lp = netdev_priv(dev);
- unsigned long flags;
-#endif
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_xmit()\n", dev->name);
-#endif
-
- if (buf->len < ETH_ZLEN) {
- if (skb_padto(buf, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- disable_irq(dev->irq);
-
- /*
- * Best would be to use synchronize_irq(); spin_lock() here
- * lets make it work first..
- */
-
-#ifdef CONFIG_SMP
- spin_lock_irqsave(&lp->lock, flags);
-#endif
-
- {
- unsigned short *data = (unsigned short *)buf->data;
-
- dev->stats.tx_bytes += length;
-
- eexp_hw_tx_pio(dev,data,length);
- }
- dev_kfree_skb(buf);
-#ifdef CONFIG_SMP
- spin_unlock_irqrestore(&lp->lock, flags);
-#endif
- enable_irq(dev->irq);
- return NETDEV_TX_OK;
-}
-
-/*
- * Handle an EtherExpress interrupt
- * If we've finished initializing, start the RU and CU up.
- * If we've already started, reap tx buffers, handle any received packets,
- * check to make sure we've not become wedged.
- */
-
-static unsigned short eexp_start_irq(struct net_device *dev,
- unsigned short status)
-{
- unsigned short ack_cmd = SCB_ack(status);
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
- if ((dev->flags & IFF_UP) && !(lp->started & STARTED_CU)) {
- short diag_status, tdr_status;
- while (SCB_CUstat(status)==2)
- status = scb_status(dev);
-#if NET_DEBUG > 4
- printk("%s: CU went non-active (status %04x)\n",
- dev->name, status);
-#endif
-
- outw(CONF_DIAG_RESULT & ~31, ioaddr + SM_PTR);
- diag_status = inw(ioaddr + SHADOW(CONF_DIAG_RESULT));
- if (diag_status & 1<<11) {
- printk(KERN_WARNING "%s: 82586 failed self-test\n",
- dev->name);
- } else if (!(diag_status & 1<<13)) {
- printk(KERN_WARNING "%s: 82586 self-test failed to complete\n", dev->name);
- }
-
- outw(CONF_TDR_RESULT & ~31, ioaddr + SM_PTR);
- tdr_status = inw(ioaddr + SHADOW(CONF_TDR_RESULT));
- if (tdr_status & (TDR_SHORT|TDR_OPEN)) {
- printk(KERN_WARNING "%s: TDR reports cable %s at %d tick%s\n", dev->name, (tdr_status & TDR_SHORT)?"short":"broken", tdr_status & TDR_TIME, ((tdr_status & TDR_TIME) != 1) ? "s" : "");
- }
- else if (tdr_status & TDR_XCVRPROBLEM) {
- printk(KERN_WARNING "%s: TDR reports transceiver problem\n", dev->name);
- }
- else if (tdr_status & TDR_LINKOK) {
-#if NET_DEBUG > 4
- printk(KERN_DEBUG "%s: TDR reports link OK\n", dev->name);
-#endif
- } else {
- printk("%s: TDR is ga-ga (status %04x)\n", dev->name,
- tdr_status);
- }
-
- lp->started |= STARTED_CU;
- scb_wrcbl(dev, lp->tx_link);
- /* if the RU isn't running, start it now */
- if (!(lp->started & STARTED_RU)) {
- ack_cmd |= SCB_RUstart;
- scb_wrrfa(dev, lp->rx_buf_start);
- lp->rx_ptr = lp->rx_buf_start;
- lp->started |= STARTED_RU;
- }
- ack_cmd |= SCB_CUstart | 0x2000;
- }
-
- if ((dev->flags & IFF_UP) && !(lp->started & STARTED_RU) && SCB_RUstat(status)==4)
- lp->started|=STARTED_RU;
-
- return ack_cmd;
-}
-
-static void eexp_cmd_clear(struct net_device *dev)
-{
- unsigned long int oldtime = jiffies;
- while (scb_rdcmd(dev) && (time_before(jiffies, oldtime + 10)));
- if (scb_rdcmd(dev)) {
- printk("%s: command didn't clear\n", dev->name);
- }
-}
-
-static irqreturn_t eexp_irq(int dummy, void *dev_info)
-{
- struct net_device *dev = dev_info;
- struct net_local *lp;
- unsigned short ioaddr,status,ack_cmd;
- unsigned short old_read_ptr, old_write_ptr;
-
- lp = netdev_priv(dev);
- ioaddr = dev->base_addr;
-
- spin_lock(&lp->lock);
-
- old_read_ptr = inw(ioaddr+READ_PTR);
- old_write_ptr = inw(ioaddr+WRITE_PTR);
-
- outb(SIRQ_dis|irqrmap[dev->irq], ioaddr+SET_IRQ);
-
- status = scb_status(dev);
-
-#if NET_DEBUG > 4
- printk(KERN_DEBUG "%s: interrupt (status %x)\n", dev->name, status);
-#endif
-
- if (lp->started == (STARTED_CU | STARTED_RU)) {
-
- do {
- eexp_cmd_clear(dev);
-
- ack_cmd = SCB_ack(status);
- scb_command(dev, ack_cmd);
- outb(0,ioaddr+SIGNAL_CA);
-
- eexp_cmd_clear(dev);
-
- if (SCB_complete(status)) {
- if (!eexp_hw_lasttxstat(dev)) {
- printk("%s: tx interrupt but no status\n", dev->name);
- }
- }
-
- if (SCB_rxdframe(status))
- eexp_hw_rx_pio(dev);
-
- status = scb_status(dev);
- } while (status & 0xc000);
-
- if (SCB_RUdead(status))
- {
- printk(KERN_WARNING "%s: RU stopped: status %04x\n",
- dev->name,status);
-#if 0
- printk(KERN_WARNING "%s: cur_rfd=%04x, cur_rbd=%04x\n", dev->name, lp->cur_rfd, lp->cur_rbd);
- outw(lp->cur_rfd, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: [%04x]\n", dev->name, inw(ioaddr+DATAPORT));
- outw(lp->cur_rfd+6, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: rbd is %04x\n", dev->name, rbd= inw(ioaddr+DATAPORT));
- outw(rbd, ioaddr+READ_PTR);
- printk(KERN_WARNING "%s: [%04x %04x] ", dev->name, inw(ioaddr+DATAPORT), inw(ioaddr+DATAPORT));
- outw(rbd+8, ioaddr+READ_PTR);
- printk("[%04x]\n", inw(ioaddr+DATAPORT));
-#endif
- dev->stats.rx_errors++;
-#if 1
- eexp_hw_rxinit(dev);
-#else
- lp->cur_rfd = lp->first_rfd;
-#endif
- scb_wrrfa(dev, lp->rx_buf_start);
- scb_command(dev, SCB_RUstart);
- outb(0,ioaddr+SIGNAL_CA);
- }
- } else {
- if (status & 0x8000)
- ack_cmd = eexp_start_irq(dev, status);
- else
- ack_cmd = SCB_ack(status);
- scb_command(dev, ack_cmd);
- outb(0,ioaddr+SIGNAL_CA);
- }
-
- eexp_cmd_clear(dev);
-
- outb(SIRQ_en|irqrmap[dev->irq], ioaddr+SET_IRQ);
-
-#if NET_DEBUG > 6
- printk("%s: leaving eexp_irq()\n", dev->name);
-#endif
- outw(old_read_ptr, ioaddr+READ_PTR);
- outw(old_write_ptr, ioaddr+WRITE_PTR);
-
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-/*
- * Hardware access functions
- */
-
-/*
- * Set the cable type to use.
- */
-
-static void eexp_hw_set_interface(struct net_device *dev)
-{
- unsigned char oldval = inb(dev->base_addr + 0x300e);
- oldval &= ~0x82;
- switch (dev->if_port) {
- case TPE:
- oldval |= 0x2;
- case BNC:
- oldval |= 0x80;
- break;
- }
- outb(oldval, dev->base_addr+0x300e);
- mdelay(20);
-}
-
-/*
- * Check all the receive buffers, and hand any received packets
- * to the upper levels. Basic sanity check on each frame
- * descriptor, though we don't bother trying to fix broken ones.
- */
-
-static void eexp_hw_rx_pio(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short rx_block = lp->rx_ptr;
- unsigned short boguscount = lp->num_rx_bufs;
- unsigned short ioaddr = dev->base_addr;
- unsigned short status;
-
-#if NET_DEBUG > 6
- printk(KERN_DEBUG "%s: eexp_hw_rx()\n", dev->name);
-#endif
-
- do {
- unsigned short rfd_cmd, rx_next, pbuf, pkt_len;
-
- outw(rx_block, ioaddr + READ_PTR);
- status = inw(ioaddr + DATAPORT);
-
- if (FD_Done(status))
- {
- rfd_cmd = inw(ioaddr + DATAPORT);
- rx_next = inw(ioaddr + DATAPORT);
- pbuf = inw(ioaddr + DATAPORT);
-
- outw(pbuf, ioaddr + READ_PTR);
- pkt_len = inw(ioaddr + DATAPORT);
-
- if (rfd_cmd!=0x0000)
- {
- printk(KERN_WARNING "%s: rfd_cmd not zero:0x%04x\n",
- dev->name, rfd_cmd);
- continue;
- }
- else if (pbuf!=rx_block+0x16)
- {
- printk(KERN_WARNING "%s: rfd and rbd out of sync 0x%04x 0x%04x\n",
- dev->name, rx_block+0x16, pbuf);
- continue;
- }
- else if ((pkt_len & 0xc000)!=0xc000)
- {
- printk(KERN_WARNING "%s: EOF or F not set on received buffer (%04x)\n",
- dev->name, pkt_len & 0xc000);
- continue;
- }
- else if (!FD_OK(status))
- {
- dev->stats.rx_errors++;
- if (FD_CRC(status))
- dev->stats.rx_crc_errors++;
- if (FD_Align(status))
- dev->stats.rx_frame_errors++;
- if (FD_Resrc(status))
- dev->stats.rx_fifo_errors++;
- if (FD_DMA(status))
- dev->stats.rx_over_errors++;
- if (FD_Short(status))
- dev->stats.rx_length_errors++;
- }
- else
- {
- struct sk_buff *skb;
- pkt_len &= 0x3fff;
- skb = netdev_alloc_skb(dev, pkt_len + 16);
- if (skb == NULL)
- {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet\n",dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2);
- outw(pbuf+10, ioaddr+READ_PTR);
- insw(ioaddr+DATAPORT, skb_put(skb,pkt_len),(pkt_len+1)>>1);
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- outw(rx_block, ioaddr+WRITE_PTR);
- outw(0, ioaddr+DATAPORT);
- outw(0, ioaddr+DATAPORT);
- rx_block = rx_next;
- }
- } while (FD_Done(status) && boguscount--);
- lp->rx_ptr = rx_block;
-}
-
-/*
- * Hand a packet to the card for transmission
- * If we get here, we MUST have already checked
- * to make sure there is room in the transmit
- * buffer region.
- */
-
-static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
- unsigned short len)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- if (LOCKUP16 || lp->width) {
- /* Stop the CU so that there is no chance that it
- jumps off to a bogus address while we are writing the
- pointer to the next transmit packet in 8-bit mode --
- this eliminates the "CU wedged" errors in 8-bit mode.
- (Zoltan Szilagyi 10-12-96) */
- scb_command(dev, SCB_CUsuspend);
- outw(0xFFFF, ioaddr+SIGNAL_CA);
- }
-
- outw(lp->tx_head, ioaddr + WRITE_PTR);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
- outw(lp->tx_head+0x08, ioaddr + DATAPORT);
- outw(lp->tx_head+0x0e, ioaddr + DATAPORT);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
- outw(lp->tx_head+0x08, ioaddr + DATAPORT);
-
- outw(0x8000|len, ioaddr + DATAPORT);
- outw(-1, ioaddr + DATAPORT);
- outw(lp->tx_head+0x16, ioaddr + DATAPORT);
- outw(0, ioaddr + DATAPORT);
-
- outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
-
- outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
- outw(lp->tx_head, ioaddr + DATAPORT);
-
- dev->trans_start = jiffies;
- lp->tx_tail = lp->tx_head;
- if (lp->tx_head==TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
- lp->tx_head = TX_BUF_START;
- else
- lp->tx_head += TX_BUF_SIZE;
- if (lp->tx_head != lp->tx_reap)
- netif_wake_queue(dev);
-
- if (LOCKUP16 || lp->width) {
- /* Restart the CU so that the packet can actually
- be transmitted. (Zoltan Szilagyi 10-12-96) */
- scb_command(dev, SCB_CUresume);
- outw(0xFFFF, ioaddr+SIGNAL_CA);
- }
-
- dev->stats.tx_packets++;
- lp->last_tx = jiffies;
-}
-
-static const struct net_device_ops eexp_netdev_ops = {
- .ndo_open = eexp_open,
- .ndo_stop = eexp_close,
- .ndo_start_xmit = eexp_xmit,
- .ndo_set_rx_mode = eexp_set_multicast,
- .ndo_tx_timeout = eexp_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*
- * Sanity check the suspected EtherExpress card
- * Read hardware address, reset card, size memory and initialize buffer
- * memory pointers. These are held in netdev_priv(), in case someone has more
- * than one card in a machine.
- */
-
-static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
-{
- unsigned short hw_addr[3];
- unsigned char buswidth;
- unsigned int memory_size;
- int i;
- unsigned short xsum = 0;
- struct net_local *lp = netdev_priv(dev);
-
- printk("%s: EtherExpress 16 at %#x ",dev->name,ioaddr);
-
- outb(ASIC_RST, ioaddr+EEPROM_Ctrl);
- outb(0, ioaddr+EEPROM_Ctrl);
- udelay(500);
- outb(i586_RST, ioaddr+EEPROM_Ctrl);
-
- hw_addr[0] = eexp_hw_readeeprom(ioaddr,2);
- hw_addr[1] = eexp_hw_readeeprom(ioaddr,3);
- hw_addr[2] = eexp_hw_readeeprom(ioaddr,4);
-
- /* Standard Address or Compaq LTE Address */
- if (!((hw_addr[2]==0x00aa && ((hw_addr[1] & 0xff00)==0x0000)) ||
- (hw_addr[2]==0x0080 && ((hw_addr[1] & 0xff00)==0x5F00))))
- {
- printk(" rejected: invalid address %04x%04x%04x\n",
- hw_addr[2],hw_addr[1],hw_addr[0]);
- return -ENODEV;
- }
-
- /* Calculate the EEPROM checksum. Carry on anyway if it's bad,
- * though.
- */
- for (i = 0; i < 64; i++)
- xsum += eexp_hw_readeeprom(ioaddr, i);
- if (xsum != 0xbaba)
- printk(" (bad EEPROM xsum 0x%02x)", xsum);
-
- dev->base_addr = ioaddr;
- for ( i=0 ; i<6 ; i++ )
- dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
-
- {
- static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 };
- unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
-
- /* Use the IRQ from EEPROM if none was given */
- if (!dev->irq)
- dev->irq = irqmap[setupval>>13];
-
- if (dev->if_port == 0xff) {
- dev->if_port = !(setupval & 0x1000) ? AUI :
- eexp_hw_readeeprom(ioaddr,5) & 0x1 ? TPE : BNC;
- }
-
- buswidth = !((setupval & 0x400) >> 10);
- }
-
- memset(lp, 0, sizeof(struct net_local));
- spin_lock_init(&lp->lock);
-
- printk("(IRQ %d, %s connector, %d-bit bus", dev->irq,
- eexp_ifmap[dev->if_port], buswidth?8:16);
-
- if (!request_region(dev->base_addr + 0x300e, 1, "EtherExpress"))
- return -EBUSY;
-
- eexp_hw_set_interface(dev);
-
- release_region(dev->base_addr + 0x300e, 1);
-
- /* Find out how much RAM we have on the card */
- outw(0, dev->base_addr + WRITE_PTR);
- for (i = 0; i < 32768; i++)
- outw(0, dev->base_addr + DATAPORT);
-
- for (memory_size = 0; memory_size < 64; memory_size++)
- {
- outw(memory_size<<10, dev->base_addr + READ_PTR);
- if (inw(dev->base_addr+DATAPORT))
- break;
- outw(memory_size<<10, dev->base_addr + WRITE_PTR);
- outw(memory_size | 0x5000, dev->base_addr+DATAPORT);
- outw(memory_size<<10, dev->base_addr + READ_PTR);
- if (inw(dev->base_addr+DATAPORT) != (memory_size | 0x5000))
- break;
- }
-
- /* Sort out the number of buffers. We may have 16, 32, 48 or 64k
- * of RAM to play with.
- */
- lp->num_tx_bufs = 4;
- lp->rx_buf_end = 0x3ff6;
- switch (memory_size)
- {
- case 64:
- lp->rx_buf_end += 0x4000;
- case 48:
- lp->num_tx_bufs += 4;
- lp->rx_buf_end += 0x4000;
- case 32:
- lp->rx_buf_end += 0x4000;
- case 16:
- printk(", %dk RAM)\n", memory_size);
- break;
- default:
- printk(") bad memory size (%dk).\n", memory_size);
- return -ENODEV;
- break;
- }
-
- lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
- lp->width = buswidth;
-
- dev->netdev_ops = &eexp_netdev_ops;
- dev->watchdog_timeo = 2*HZ;
-
- return register_netdev(dev);
-}
-
-/*
- * Read a word from the EtherExpress on-board serial EEPROM.
- * The EEPROM contains 64 words of 16 bits.
- */
-static unsigned short __init eexp_hw_readeeprom(unsigned short ioaddr,
- unsigned char location)
-{
- unsigned short cmd = 0x180|(location&0x7f);
- unsigned short rval = 0,wval = EC_CS|i586_RST;
- int i;
-
- outb(EC_CS|i586_RST,ioaddr+EEPROM_Ctrl);
- for (i=0x100 ; i ; i>>=1 )
- {
- if (cmd&i)
- wval |= EC_Wr;
- else
- wval &= ~EC_Wr;
-
- outb(wval,ioaddr+EEPROM_Ctrl);
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- }
- wval &= ~EC_Wr;
- outb(wval,ioaddr+EEPROM_Ctrl);
- for (i=0x8000 ; i ; i>>=1 )
- {
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- if (inb(ioaddr+EEPROM_Ctrl)&EC_Rd)
- rval |= i;
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- }
- wval &= ~EC_CS;
- outb(wval|EC_Clk,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- outb(wval,ioaddr+EEPROM_Ctrl);
- eeprom_delay();
- return rval;
-}
-
-/*
- * Reap tx buffers and return last transmit status.
- * if ==0 then either:
- * a) we're not transmitting anything, so why are we here?
- * b) we've died.
- * otherwise, Stat_Busy(return) means we've still got some packets
- * to transmit, Stat_Done(return) means our buffers should be empty
- * again
- */
-
-static unsigned short eexp_hw_lasttxstat(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short tx_block = lp->tx_reap;
- unsigned short status;
-
- if (!netif_queue_stopped(dev) && lp->tx_head==lp->tx_reap)
- return 0x0000;
-
- do
- {
- outw(tx_block & ~31, dev->base_addr + SM_PTR);
- status = inw(dev->base_addr + SHADOW(tx_block));
- if (!Stat_Done(status))
- {
- lp->tx_link = tx_block;
- return status;
- }
- else
- {
- lp->last_tx_restart = 0;
- dev->stats.collisions += Stat_NoColl(status);
- if (!Stat_OK(status))
- {
- char *whatsup = NULL;
- dev->stats.tx_errors++;
- if (Stat_Abort(status))
- dev->stats.tx_aborted_errors++;
- if (Stat_TNoCar(status)) {
- whatsup = "aborted, no carrier";
- dev->stats.tx_carrier_errors++;
- }
- if (Stat_TNoCTS(status)) {
- whatsup = "aborted, lost CTS";
- dev->stats.tx_carrier_errors++;
- }
- if (Stat_TNoDMA(status)) {
- whatsup = "FIFO underran";
- dev->stats.tx_fifo_errors++;
- }
- if (Stat_TXColl(status)) {
- whatsup = "aborted, too many collisions";
- dev->stats.tx_aborted_errors++;
- }
- if (whatsup)
- printk(KERN_INFO "%s: transmit %s\n",
- dev->name, whatsup);
- }
- else
- dev->stats.tx_packets++;
- }
- if (tx_block == TX_BUF_START+((lp->num_tx_bufs-1)*TX_BUF_SIZE))
- lp->tx_reap = tx_block = TX_BUF_START;
- else
- lp->tx_reap = tx_block += TX_BUF_SIZE;
- netif_wake_queue(dev);
- }
- while (lp->tx_reap != lp->tx_head);
-
- lp->tx_link = lp->tx_tail + 0x08;
-
- return status;
-}
-
-/*
- * This should never happen. It is called when some higher routine detects
- * that the CU has stopped, to try to restart it from the last packet we knew
- * we were working on, or the idle loop if we had finished for the time.
- */
-
-static void eexp_hw_txrestart(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
-
- lp->last_tx_restart = lp->tx_link;
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
-
- {
- unsigned short boguscount=50,failcount=5;
- while (!scb_status(dev))
- {
- if (!--boguscount)
- {
- if (--failcount)
- {
- printk(KERN_WARNING "%s: CU start timed out, status %04x, cmd %04x\n", dev->name, scb_status(dev), scb_rdcmd(dev));
- scb_wrcbl(dev, lp->tx_link);
- scb_command(dev, SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- boguscount = 100;
- }
- else
- {
- printk(KERN_WARNING "%s: Failed to restart CU, resetting board...\n",dev->name);
- eexp_hw_init586(dev);
- netif_wake_queue(dev);
- return;
- }
- }
- }
- }
-}
-
-/*
- * Writes down the list of transmit buffers into card memory. Each
- * entry consists of an 82586 transmit command, followed by a jump
- * pointing to itself. When we want to transmit a packet, we write
- * the data into the appropriate transmit buffer and then modify the
- * preceding jump to point at the new transmit command. This means that
- * the 586 command unit is continuously active.
- */
-
-static void eexp_hw_txinit(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short tx_block = TX_BUF_START;
- unsigned short curtbuf;
- unsigned short ioaddr = dev->base_addr;
-
- for ( curtbuf=0 ; curtbuf<lp->num_tx_bufs ; curtbuf++ )
- {
- outw(tx_block, ioaddr + WRITE_PTR);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(Cmd_INT|Cmd_Xmit, ioaddr + DATAPORT);
- outw(tx_block+0x08, ioaddr + DATAPORT);
- outw(tx_block+0x0e, ioaddr + DATAPORT);
-
- outw(0x0000, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
- outw(tx_block+0x08, ioaddr + DATAPORT);
-
- outw(0x8000, ioaddr + DATAPORT);
- outw(-1, ioaddr + DATAPORT);
- outw(tx_block+0x16, ioaddr + DATAPORT);
- outw(0x0000, ioaddr + DATAPORT);
-
- tx_block += TX_BUF_SIZE;
- }
- lp->tx_head = TX_BUF_START;
- lp->tx_reap = TX_BUF_START;
- lp->tx_tail = tx_block - TX_BUF_SIZE;
- lp->tx_link = lp->tx_tail + 0x08;
- lp->rx_buf_start = tx_block;
-
-}
-
-/*
- * Write the circular list of receive buffer descriptors to card memory.
- * The end of the list isn't marked, which means that the 82586 receive
- * unit will loop until buffers become available (this avoids it giving us
- * "out of resources" messages).
- */
-
-static void eexp_hw_rxinit(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short rx_block = lp->rx_buf_start;
- unsigned short ioaddr = dev->base_addr;
-
- lp->num_rx_bufs = 0;
- lp->rx_first = lp->rx_ptr = rx_block;
- do
- {
- lp->num_rx_bufs++;
-
- outw(rx_block, ioaddr + WRITE_PTR);
-
- outw(0, ioaddr + DATAPORT); outw(0, ioaddr+DATAPORT);
- outw(rx_block + RX_BUF_SIZE, ioaddr+DATAPORT);
- outw(0xffff, ioaddr+DATAPORT);
-
- outw(0x0000, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
- outw(0xdead, ioaddr+DATAPORT);
-
- outw(0x0000, ioaddr+DATAPORT);
- outw(rx_block + RX_BUF_SIZE + 0x16, ioaddr+DATAPORT);
- outw(rx_block + 0x20, ioaddr+DATAPORT);
- outw(0, ioaddr+DATAPORT);
- outw(RX_BUF_SIZE-0x20, ioaddr+DATAPORT);
-
- lp->rx_last = rx_block;
- rx_block += RX_BUF_SIZE;
- } while (rx_block <= lp->rx_buf_end-RX_BUF_SIZE);
-
-
- /* Make first Rx frame descriptor point to first Rx buffer
- descriptor */
- outw(lp->rx_first + 6, ioaddr+WRITE_PTR);
- outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
-
- /* Close Rx frame descriptor ring */
- outw(lp->rx_last + 4, ioaddr+WRITE_PTR);
- outw(lp->rx_first, ioaddr+DATAPORT);
-
- /* Close Rx buffer descriptor ring */
- outw(lp->rx_last + 0x16 + 2, ioaddr+WRITE_PTR);
- outw(lp->rx_first + 0x16, ioaddr+DATAPORT);
-
-}
-
-/*
- * Un-reset the 586, and start the configuration sequence. We don't wait for
- * this to finish, but allow the interrupt handler to start the CU and RU for
- * us. We can't start the receive/transmission system up before we know that
- * the hardware is configured correctly.
- */
-
-static void eexp_hw_init586(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- unsigned short ioaddr = dev->base_addr;
- int i;
-
-#if NET_DEBUG > 6
- printk("%s: eexp_hw_init586()\n", dev->name);
-#endif
-
- lp->started = 0;
-
- set_loopback(dev);
-
- outb(SIRQ_dis|irqrmap[dev->irq],ioaddr+SET_IRQ);
-
- /* Download the startup code */
- outw(lp->rx_buf_end & ~31, ioaddr + SM_PTR);
- outw(lp->width?0x0001:0x0000, ioaddr + 0x8006);
- outw(0x0000, ioaddr + 0x8008);
- outw(0x0000, ioaddr + 0x800a);
- outw(0x0000, ioaddr + 0x800c);
- outw(0x0000, ioaddr + 0x800e);
-
- for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) {
- int j;
- outw(i, ioaddr + SM_PTR);
- for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2)
- outw(start_code[(i+j)/2],
- ioaddr+0x4000+j);
- for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2)
- outw(start_code[(i+j+16)/2],
- ioaddr+0x8000+j);
- }
-
- /* Do we want promiscuous mode or multicast? */
- outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
- i = inw(ioaddr+SHADOW(CONF_PROMISC));
- outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
- ioaddr+SHADOW(CONF_PROMISC));
- lp->was_promisc = dev->flags & IFF_PROMISC;
-#if 0
- eexp_setup_filter(dev);
-#endif
-
- /* Write our hardware address */
- outw(CONF_HWADDR & ~31, ioaddr+SM_PTR);
- outw(((unsigned short *)dev->dev_addr)[0], ioaddr+SHADOW(CONF_HWADDR));
- outw(((unsigned short *)dev->dev_addr)[1],
- ioaddr+SHADOW(CONF_HWADDR+2));
- outw(((unsigned short *)dev->dev_addr)[2],
- ioaddr+SHADOW(CONF_HWADDR+4));
-
- eexp_hw_txinit(dev);
- eexp_hw_rxinit(dev);
-
- outb(0,ioaddr+EEPROM_Ctrl);
- mdelay(5);
-
- scb_command(dev, 0xf000);
- outb(0,ioaddr+SIGNAL_CA);
-
- outw(0, ioaddr+SM_PTR);
-
- {
- unsigned short rboguscount=50,rfailcount=5;
- while (inw(ioaddr+0x4000))
- {
- if (!--rboguscount)
- {
- printk(KERN_WARNING "%s: i82586 reset timed out, kicking...\n",
- dev->name);
- scb_command(dev, 0);
- outb(0,ioaddr+SIGNAL_CA);
- rboguscount = 100;
- if (!--rfailcount)
- {
- printk(KERN_WARNING "%s: i82586 not responding, giving up.\n",
- dev->name);
- return;
- }
- }
- }
- }
-
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, 0xf000|SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
-
- {
- unsigned short iboguscount=50,ifailcount=5;
- while (!scb_status(dev))
- {
- if (!--iboguscount)
- {
- if (--ifailcount)
- {
- printk(KERN_WARNING "%s: i82586 initialization timed out, status %04x, cmd %04x\n",
- dev->name, scb_status(dev), scb_rdcmd(dev));
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, 0xf000|SCB_CUstart);
- outb(0,ioaddr+SIGNAL_CA);
- iboguscount = 100;
- }
- else
- {
- printk(KERN_WARNING "%s: Failed to initialize i82586, giving up.\n",dev->name);
- return;
- }
- }
- }
- }
-
- clear_loopback(dev);
- outb(SIRQ_en|irqrmap[dev->irq],ioaddr+SET_IRQ);
-
- lp->init_time = jiffies;
-#if NET_DEBUG > 6
- printk("%s: leaving eexp_hw_init586()\n", dev->name);
-#endif
-}
-
-static void eexp_setup_filter(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- unsigned short ioaddr = dev->base_addr;
- int count = netdev_mc_count(dev);
- int i;
- if (count > 8) {
- printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
- dev->name, count);
- count = 8;
- }
-
- outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
- outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
- i = 0;
- netdev_for_each_mc_addr(ha, dev) {
- unsigned short *data = (unsigned short *) ha->addr;
-
- if (i == count)
- break;
- outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
- outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
- outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
- outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
- outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
- outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
- i++;
- }
-}
-
-/*
- * Set or clear the multicast filter for this adaptor.
- */
-static void
-eexp_set_multicast(struct net_device *dev)
-{
- unsigned short ioaddr = dev->base_addr;
- struct net_local *lp = netdev_priv(dev);
- int kick = 0, i;
- if ((dev->flags & IFF_PROMISC) != lp->was_promisc) {
- outw(CONF_PROMISC & ~31, ioaddr+SM_PTR);
- i = inw(ioaddr+SHADOW(CONF_PROMISC));
- outw((dev->flags & IFF_PROMISC)?(i|1):(i & ~1),
- ioaddr+SHADOW(CONF_PROMISC));
- lp->was_promisc = dev->flags & IFF_PROMISC;
- kick = 1;
- }
- if (!(dev->flags & IFF_PROMISC)) {
- eexp_setup_filter(dev);
- if (lp->old_mc_count != netdev_mc_count(dev)) {
- kick = 1;
- lp->old_mc_count = netdev_mc_count(dev);
- }
- }
- if (kick) {
- unsigned long oj;
- scb_command(dev, SCB_CUsuspend);
- outb(0, ioaddr+SIGNAL_CA);
- outb(0, ioaddr+SIGNAL_CA);
-#if 0
- printk("%s: waiting for CU to go suspended\n", dev->name);
-#endif
- oj = jiffies;
- while ((SCB_CUstat(scb_status(dev)) == 2) &&
- (time_before(jiffies, oj + 2000)));
- if (SCB_CUstat(scb_status(dev)) == 2)
- printk("%s: warning, CU didn't stop\n", dev->name);
- lp->started &= ~(STARTED_CU);
- scb_wrcbl(dev, CONF_LINK);
- scb_command(dev, SCB_CUstart);
- outb(0, ioaddr+SIGNAL_CA);
- }
-}
-
-
-/*
- * MODULE stuff
- */
-
-#ifdef MODULE
-
-#define EEXP_MAX_CARDS 4 /* max number of cards to support */
-
-static struct net_device *dev_eexp[EEXP_MAX_CARDS];
-static int irq[EEXP_MAX_CARDS];
-static int io[EEXP_MAX_CARDS];
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-MODULE_PARM_DESC(io, "EtherExpress 16 I/O base address(es)");
-MODULE_PARM_DESC(irq, "EtherExpress 16 IRQ number(s)");
-MODULE_LICENSE("GPL");
-
-
-/* Ideally the user would give us io=, irq= for every card. If any parameters
- * are specified, we verify and then use them. If no parameters are given, we
- * autoprobe for one card only.
- */
-int __init init_module(void)
-{
- struct net_device *dev;
- int this_dev, found = 0;
-
- for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
- dev = alloc_etherdev(sizeof(struct net_local));
- dev->irq = irq[this_dev];
- dev->base_addr = io[this_dev];
- if (io[this_dev] == 0) {
- if (this_dev)
- break;
- printk(KERN_NOTICE "eexpress.c: Module autoprobe not recommended, give io=xx.\n");
- }
- if (do_express_probe(dev) == 0) {
- dev_eexp[this_dev] = dev;
- found++;
- continue;
- }
- printk(KERN_WARNING "eexpress.c: Failed to register card at 0x%x.\n", io[this_dev]);
- free_netdev(dev);
- break;
- }
- if (found)
- return 0;
- return -ENXIO;
-}
-
-void __exit cleanup_module(void)
-{
- int this_dev;
-
- for (this_dev = 0; this_dev < EEXP_MAX_CARDS; this_dev++) {
- struct net_device *dev = dev_eexp[this_dev];
- if (dev) {
- unregister_netdev(dev);
- free_netdev(dev);
- }
- }
-}
-#endif
-
-/*
- * Local Variables:
- * c-file-style: "linux"
- * tab-width: 8
- * End:
- */
diff --git a/drivers/net/ethernet/i825xx/eexpress.h b/drivers/net/ethernet/i825xx/eexpress.h
deleted file mode 100644
index dc9c6ea289e..00000000000
--- a/drivers/net/ethernet/i825xx/eexpress.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * eexpress.h: Intel EtherExpress16 defines
- */
-
-/*
- * EtherExpress card register addresses
- * as offsets from the base IO region (dev->base_addr)
- */
-
-#define DATAPORT 0x0000
-#define WRITE_PTR 0x0002
-#define READ_PTR 0x0004
-#define SIGNAL_CA 0x0006
-#define SET_IRQ 0x0007
-#define SM_PTR 0x0008
-#define MEM_Dec 0x000a
-#define MEM_Ctrl 0x000b
-#define MEM_Page_Ctrl 0x000c
-#define Config 0x000d
-#define EEPROM_Ctrl 0x000e
-#define ID_PORT 0x000f
-#define MEM_ECtrl 0x000f
-
-/*
- * card register defines
- */
-
-/* SET_IRQ */
-#define SIRQ_en 0x08
-#define SIRQ_dis 0x00
-
-/* EEPROM_Ctrl */
-#define EC_Clk 0x01
-#define EC_CS 0x02
-#define EC_Wr 0x04
-#define EC_Rd 0x08
-#define ASIC_RST 0x40
-#define i586_RST 0x80
-
-#define eeprom_delay() { udelay(40); }
-
-/*
- * i82586 Memory Configuration
- */
-
-/* (System Configuration Pointer) System start up block, read after 586_RST */
-#define SCP_START 0xfff6
-
-/* Intermediate System Configuration Pointer */
-#define ISCP_START 0x0000
-
-/* System Command Block */
-#define SCB_START 0x0008
-
-/* Start of buffer region. Everything before this is used for control
- * structures and the CU configuration program. The memory layout is
- * determined in eexp_hw_probe(), once we know how much memory is
- * available on the card.
- */
-
-#define TX_BUF_START 0x0100
-
-#define TX_BUF_SIZE ((24+ETH_FRAME_LEN+31)&~0x1f)
-#define RX_BUF_SIZE ((32+ETH_FRAME_LEN+31)&~0x1f)
-
-/*
- * SCB defines
- */
-
-/* these functions take the SCB status word and test the relevant status bit */
-#define SCB_complete(s) (((s) & 0x8000) != 0)
-#define SCB_rxdframe(s) (((s) & 0x4000) != 0)
-#define SCB_CUdead(s) (((s) & 0x2000) != 0)
-#define SCB_RUdead(s) (((s) & 0x1000) != 0)
-#define SCB_ack(s) ((s) & 0xf000)
-
-/* Command unit status: 0=idle, 1=suspended, 2=active */
-#define SCB_CUstat(s) (((s)&0x0300)>>8)
-
-/* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */
-#define SCB_RUstat(s) (((s)&0x0070)>>4)
-
-/* SCB commands */
-#define SCB_CUnop 0x0000
-#define SCB_CUstart 0x0100
-#define SCB_CUresume 0x0200
-#define SCB_CUsuspend 0x0300
-#define SCB_CUabort 0x0400
-#define SCB_resetchip 0x0080
-
-#define SCB_RUnop 0x0000
-#define SCB_RUstart 0x0010
-#define SCB_RUresume 0x0020
-#define SCB_RUsuspend 0x0030
-#define SCB_RUabort 0x0040
-
-/*
- * Command block defines
- */
-
-#define Stat_Done(s) (((s) & 0x8000) != 0)
-#define Stat_Busy(s) (((s) & 0x4000) != 0)
-#define Stat_OK(s) (((s) & 0x2000) != 0)
-#define Stat_Abort(s) (((s) & 0x1000) != 0)
-#define Stat_STFail (((s) & 0x0800) != 0)
-#define Stat_TNoCar(s) (((s) & 0x0400) != 0)
-#define Stat_TNoCTS(s) (((s) & 0x0200) != 0)
-#define Stat_TNoDMA(s) (((s) & 0x0100) != 0)
-#define Stat_TDefer(s) (((s) & 0x0080) != 0)
-#define Stat_TColl(s) (((s) & 0x0040) != 0)
-#define Stat_TXColl(s) (((s) & 0x0020) != 0)
-#define Stat_NoColl(s) ((s) & 0x000f)
-
-/* Cmd_END will end AFTER the command if this is the first
- * command block after an SCB_CUstart, but BEFORE the command
- * for all subsequent commands. Best strategy is to place
- * Cmd_INT on the last command in the sequence, followed by a
- * dummy Cmd_Nop with Cmd_END after this.
- */
-
-#define Cmd_END 0x8000
-#define Cmd_SUS 0x4000
-#define Cmd_INT 0x2000
-
-#define Cmd_Nop 0x0000
-#define Cmd_SetAddr 0x0001
-#define Cmd_Config 0x0002
-#define Cmd_MCast 0x0003
-#define Cmd_Xmit 0x0004
-#define Cmd_TDR 0x0005
-#define Cmd_Dump 0x0006
-#define Cmd_Diag 0x0007
-
-
-/*
- * Frame Descriptor (Receive block) defines
- */
-
-#define FD_Done(s) (((s) & 0x8000) != 0)
-#define FD_Busy(s) (((s) & 0x4000) != 0)
-#define FD_OK(s) (((s) & 0x2000) != 0)
-
-#define FD_CRC(s) (((s) & 0x0800) != 0)
-#define FD_Align(s) (((s) & 0x0400) != 0)
-#define FD_Resrc(s) (((s) & 0x0200) != 0)
-#define FD_DMA(s) (((s) & 0x0100) != 0)
-#define FD_Short(s) (((s) & 0x0080) != 0)
-#define FD_NoEOF(s) (((s) & 0x0040) != 0)
-
-struct rfd_header {
- volatile unsigned long flags;
- volatile unsigned short link;
- volatile unsigned short rbd_offset;
- volatile unsigned short dstaddr1;
- volatile unsigned short dstaddr2;
- volatile unsigned short dstaddr3;
- volatile unsigned short srcaddr1;
- volatile unsigned short srcaddr2;
- volatile unsigned short srcaddr3;
- volatile unsigned short length;
-
- /* This is actually a Receive Buffer Descriptor. The way we
- * arrange memory means that an RBD always follows the RFD that
- * points to it, so they might as well be in the same structure.
- */
- volatile unsigned short actual_count;
- volatile unsigned short next_rbd;
- volatile unsigned short buf_addr1;
- volatile unsigned short buf_addr2;
- volatile unsigned short size;
-};
-
-/* Returned data from the Time Domain Reflectometer */
-
-#define TDR_LINKOK (1<<15)
-#define TDR_XCVRPROBLEM (1<<14)
-#define TDR_OPEN (1<<13)
-#define TDR_SHORT (1<<12)
-#define TDR_TIME 0x7ff
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
deleted file mode 100644
index 3735bfa5360..00000000000
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ /dev/null
@@ -1,1337 +0,0 @@
-/* Intel Professional Workstation/panther ethernet driver */
-/* lp486e.c: A panther 82596 ethernet driver for linux. */
-/*
- History and copyrights:
-
- Driver skeleton
- Written 1993 by Donald Becker.
- Copyright 1993 United States Government as represented by the Director,
- National Security Agency. This software may only be used and
- distributed according to the terms of the GNU General Public License
- as modified by SRC, incorporated herein by reference.
-
- The author may be reached as becker@scyld.com, or C/O
- Scyld Computing Corporation
- 410 Severn Ave., Suite 210
- Annapolis MD 21403
-
- Apricot
- Written 1994 by Mark Evans.
- This driver is for the Apricot 82596 bus-master interface
-
- Modularised 12/94 Mark Evans
-
- Professional Workstation
- Derived from apricot.c by Ard van Breemen
- <ard@murphy.nl>|<ard@cstmel.hobby.nl>|<ard@cstmel.nl.eu.org>
-
- Credits:
- Thanks to Murphy Software BV for letting me write this in their time.
- Well, actually, I get paid doing this...
- (Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
- more information on the Professional Workstation)
-
- Present version
- aeb@cwi.nl
-*/
-/*
- There are currently two motherboards that I know of in the
- professional workstation. The only one that I know is the
- intel panther motherboard. -- ard
-*/
-/*
-The pws is equipped with an intel 82596. This is a very intelligent controller
-which runs its own micro-code. Communication with the hostprocessor is done
-through linked lists of commands and buffers in the hostprocessors memory.
-A complete description of the 82596 is available from intel. Search for
-a file called "29021806.pdf". It is a complete description of the chip itself.
-To use it for the pws some additions are needed regarding generation of
-the PORT and CA signal, and the interrupt glue needed for a pc.
-I/O map:
-PORT SIZE ACTION MEANING
-0xCB0 2 WRITE Lower 16 bits for PORT command
-0xCB2 2 WRITE Upper 16 bits for PORT command, and issue of PORT command
-0xCB4 1 WRITE Generation of CA signal
-0xCB8 1 WRITE Clear interrupt glue
-All other communication is through memory!
-*/
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#define DRV_NAME "lp486e"
-
-/* debug print flags */
-#define LOG_SRCDST 0x80000000
-#define LOG_STATINT 0x40000000
-#define LOG_STARTINT 0x20000000
-
-#define i596_debug debug
-
-static int i596_debug = 0;
-
-static const char * const medianame[] = {
- "10baseT", "AUI",
- "10baseT-FD", "AUI-FD",
-};
-
-#define LP486E_TOTAL_SIZE 16
-
-#define I596_NULL (0xffffffff)
-
-#define CMD_EOL 0x8000 /* The last command of the list, stop. */
-#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
-#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
-
-#define CMD_FLEX 0x0008 /* Enable flexible memory model */
-
-enum commands {
- CmdNOP = 0,
- CmdIASetup = 1,
- CmdConfigure = 2,
- CmdMulticastList = 3,
- CmdTx = 4,
- CmdTDR = 5,
- CmdDump = 6,
- CmdDiagnose = 7
-};
-
-#if 0
-static const char *CUcmdnames[8] = { "NOP", "IASetup", "Configure", "MulticastList",
- "Tx", "TDR", "Dump", "Diagnose" };
-#endif
-
-/* Status word bits */
-#define STAT_CX 0x8000 /* The CU finished executing a command
- with the Interrupt bit set */
-#define STAT_FR 0x4000 /* The RU finished receiving a frame */
-#define STAT_CNA 0x2000 /* The CU left the active state */
-#define STAT_RNR 0x1000 /* The RU left the active state */
-#define STAT_ACK (STAT_CX | STAT_FR | STAT_CNA | STAT_RNR)
-#define STAT_CUS 0x0700 /* Status of CU: 0: idle, 1: suspended,
- 2: active, 3-7: unused */
-#define STAT_RUS 0x00f0 /* Status of RU: 0: idle, 1: suspended,
- 2: no resources, 4: ready,
- 10: no resources due to no more RBDs,
- 12: no more RBDs, other: unused */
-#define STAT_T 0x0008 /* Bus throttle timers loaded */
-#define STAT_ZERO 0x0807 /* Always zero */
-
-#if 0
-static char *CUstates[8] = {
- "idle", "suspended", "active", 0, 0, 0, 0, 0
-};
-static char *RUstates[16] = {
- "idle", "suspended", "no resources", 0, "ready", 0, 0, 0,
- 0, 0, "no RBDs", 0, "out of RBDs", 0, 0, 0
-};
-
-static void
-i596_out_status(int status) {
- int bad = 0;
- char *s;
-
- printk("status %4.4x:", status);
- if (status == 0xffff)
- printk(" strange..\n");
- else {
- if (status & STAT_CX)
- printk(" CU done");
- if (status & STAT_CNA)
- printk(" CU stopped");
- if (status & STAT_FR)
- printk(" got a frame");
- if (status & STAT_RNR)
- printk(" RU stopped");
- if (status & STAT_T)
- printk(" throttled");
- if (status & STAT_ZERO)
- bad = 1;
- s = CUstates[(status & STAT_CUS) >> 8];
- if (!s)
- bad = 1;
- else
- printk(" CU(%s)", s);
- s = RUstates[(status & STAT_RUS) >> 4];
- if (!s)
- bad = 1;
- else
- printk(" RU(%s)", s);
- if (bad)
- printk(" bad status");
- printk("\n");
- }
-}
-#endif
-
-/* Command word bits */
-#define ACK_CX 0x8000
-#define ACK_FR 0x4000
-#define ACK_CNA 0x2000
-#define ACK_RNR 0x1000
-
-#define CUC_START 0x0100
-#define CUC_RESUME 0x0200
-#define CUC_SUSPEND 0x0300
-#define CUC_ABORT 0x0400
-
-#define RX_START 0x0010
-#define RX_RESUME 0x0020
-#define RX_SUSPEND 0x0030
-#define RX_ABORT 0x0040
-
-typedef u32 phys_addr;
-
-static inline phys_addr
-va_to_pa(void *x) {
- return x ? virt_to_bus(x) : I596_NULL;
-}
-
-static inline void *
-pa_to_va(phys_addr x) {
- return (x == I596_NULL) ? NULL : bus_to_virt(x);
-}
-
-/* status bits for cmd */
-#define CMD_STAT_C 0x8000 /* CU command complete */
-#define CMD_STAT_B 0x4000 /* CU command in progress */
-#define CMD_STAT_OK 0x2000 /* CU command completed without errors */
-#define CMD_STAT_A 0x1000 /* CU command abnormally terminated */
-
-struct i596_cmd { /* 8 bytes */
- unsigned short status;
- unsigned short command;
- phys_addr pa_next; /* va_to_pa(struct i596_cmd *next) */
-};
-
-#define EOF 0x8000
-#define SIZE_MASK 0x3fff
-
-struct i596_tbd {
- unsigned short size;
- unsigned short pad;
- phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
- phys_addr pa_data; /* va_to_pa(char *data) */
- struct sk_buff *skb;
-};
-
-struct tx_cmd {
- struct i596_cmd cmd;
- phys_addr pa_tbd; /* va_to_pa(struct i596_tbd *tbd) */
- unsigned short size;
- unsigned short pad;
-};
-
-/* status bits for rfd */
-#define RFD_STAT_C 0x8000 /* Frame reception complete */
-#define RFD_STAT_B 0x4000 /* Frame reception in progress */
-#define RFD_STAT_OK 0x2000 /* Frame received without errors */
-#define RFD_STATUS 0x1fff
-#define RFD_LENGTH_ERR 0x1000
-#define RFD_CRC_ERR 0x0800
-#define RFD_ALIGN_ERR 0x0400
-#define RFD_NOBUFS_ERR 0x0200
-#define RFD_DMA_ERR 0x0100 /* DMA overrun failure to acquire system bus */
-#define RFD_SHORT_FRAME_ERR 0x0080
-#define RFD_NOEOP_ERR 0x0040
-#define RFD_TRUNC_ERR 0x0020
-#define RFD_MULTICAST 0x0002 /* 0: destination had our address
- 1: destination was broadcast/multicast */
-#define RFD_COLLISION 0x0001
-
-/* receive frame descriptor */
-struct i596_rfd {
- unsigned short stat;
- unsigned short cmd;
- phys_addr pa_next; /* va_to_pa(struct i596_rfd *next) */
- phys_addr pa_rbd; /* va_to_pa(struct i596_rbd *rbd) */
- unsigned short count;
- unsigned short size;
- char data[1532];
-};
-
-#define RBD_EL 0x8000
-#define RBD_P 0x4000
-#define RBD_SIZEMASK 0x3fff
-#define RBD_EOF 0x8000
-#define RBD_F 0x4000
-
-/* receive buffer descriptor */
-struct i596_rbd {
- unsigned short size;
- unsigned short pad;
- phys_addr pa_next; /* va_to_pa(struct i596_tbd *next) */
- phys_addr pa_data; /* va_to_pa(char *data) */
- phys_addr pa_prev; /* va_to_pa(struct i596_tbd *prev) */
-
- /* Driver private part */
- struct sk_buff *skb;
-};
-
-#define RX_RING_SIZE 64
-#define RX_SKBSIZE (ETH_FRAME_LEN+10)
-#define RX_RBD_SIZE 32
-
-/* System Control Block - 40 bytes */
-struct i596_scb {
- u16 status; /* 0 */
- u16 command; /* 2 */
- phys_addr pa_cmd; /* 4 - va_to_pa(struct i596_cmd *cmd) */
- phys_addr pa_rfd; /* 8 - va_to_pa(struct i596_rfd *rfd) */
- u32 crc_err; /* 12 */
- u32 align_err; /* 16 */
- u32 resource_err; /* 20 */
- u32 over_err; /* 24 */
- u32 rcvdt_err; /* 28 */
- u32 short_err; /* 32 */
- u16 t_on; /* 36 */
- u16 t_off; /* 38 */
-};
-
-/* Intermediate System Configuration Pointer - 8 bytes */
-struct i596_iscp {
- u32 busy; /* 0 */
- phys_addr pa_scb; /* 4 - va_to_pa(struct i596_scb *scb) */
-};
-
-/* System Configuration Pointer - 12 bytes */
-struct i596_scp {
- u32 sysbus; /* 0 */
- u32 pad; /* 4 */
- phys_addr pa_iscp; /* 8 - va_to_pa(struct i596_iscp *iscp) */
-};
-
-/* Selftest and dump results - needs 16-byte alignment */
-/*
- * The size of the dump area is 304 bytes. When the dump is executed
- * by the Port command an extra word will be appended to the dump area.
- * The extra word is a copy of the Dump status word (containing the
- * C, B, OK bits). [I find 0xa006, with a0 for C+OK and 6 for dump]
- */
-struct i596_dump {
- u16 dump[153]; /* (304 = 130h) + 2 bytes */
-};
-
-struct i596_private { /* aligned to a 16-byte boundary */
- struct i596_scp scp; /* 0 - needs 16-byte alignment */
- struct i596_iscp iscp; /* 12 */
- struct i596_scb scb; /* 20 */
- u32 dummy; /* 60 */
- struct i596_dump dump; /* 64 - needs 16-byte alignment */
-
- struct i596_cmd set_add;
- char eth_addr[8]; /* directly follows set_add */
-
- struct i596_cmd set_conf;
- char i596_config[16]; /* directly follows set_conf */
-
- struct i596_cmd tdr;
- unsigned long tdr_stat; /* directly follows tdr */
-
- int last_restart;
- struct i596_rbd *rbd_list;
- struct i596_rbd *rbd_tail;
- struct i596_rfd *rx_tail;
- struct i596_cmd *cmd_tail;
- struct i596_cmd *cmd_head;
- int cmd_backlog;
- unsigned long last_cmd;
- spinlock_t cmd_lock;
-};
-
-static char init_setup[14] = {
- 0x8E, /* length 14 bytes, prefetch on */
- 0xC8, /* default: fifo to 8, monitor off */
- 0x40, /* default: don't save bad frames (apricot.c had 0x80) */
- 0x2E, /* (default is 0x26)
- No source address insertion, 8 byte preamble */
- 0x00, /* default priority and backoff */
- 0x60, /* default interframe spacing */
- 0x00, /* default slot time LSB */
- 0xf2, /* default slot time and nr of retries */
- 0x00, /* default various bits
- (0: promiscuous mode, 1: broadcast disable,
- 2: encoding mode, 3: transmit on no CRS,
- 4: no CRC insertion, 5: CRC type,
- 6: bit stuffing, 7: padding) */
- 0x00, /* default carrier sense and collision detect */
- 0x40, /* default minimum frame length */
- 0xff, /* (default is 0xff, and that is what apricot.c has;
- elp486.c has 0xfb: Enable crc append in memory.) */
- 0x00, /* default: not full duplex */
- 0x7f /* (default is 0x3f) multi IA */
-};
-
-static int i596_open(struct net_device *dev);
-static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t i596_interrupt(int irq, void *dev_id);
-static int i596_close(struct net_device *dev);
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
-static void print_eth(char *);
-static void set_multicast_list(struct net_device *dev);
-static void i596_tx_timeout(struct net_device *dev);
-
-static int
-i596_timeout(struct net_device *dev, char *msg, int ct) {
- struct i596_private *lp;
- int boguscnt = ct;
-
- lp = netdev_priv(dev);
- while (lp->scb.command) {
- if (--boguscnt == 0) {
- printk("%s: %s timed out - stat %4.4x, cmd %4.4x\n",
- dev->name, msg,
- lp->scb.status, lp->scb.command);
- return 1;
- }
- udelay(5);
- barrier();
- }
- return 0;
-}
-
-static inline int
-init_rx_bufs(struct net_device *dev, int num) {
- struct i596_private *lp;
- struct i596_rfd *rfd;
- int i;
- // struct i596_rbd *rbd;
-
- lp = netdev_priv(dev);
- lp->scb.pa_rfd = I596_NULL;
-
- for (i = 0; i < num; i++) {
- rfd = kmalloc(sizeof(struct i596_rfd), GFP_KERNEL);
- if (rfd == NULL)
- break;
-
- rfd->stat = 0;
- rfd->pa_rbd = I596_NULL;
- rfd->count = 0;
- rfd->size = 1532;
- if (i == 0) {
- rfd->cmd = CMD_EOL;
- lp->rx_tail = rfd;
- } else {
- rfd->cmd = 0;
- }
- rfd->pa_next = lp->scb.pa_rfd;
- lp->scb.pa_rfd = va_to_pa(rfd);
- lp->rx_tail->pa_next = lp->scb.pa_rfd;
- }
-
-#if 0
- for (i = 0; i<RX_RBD_SIZE; i++) {
- rbd = kmalloc(sizeof(struct i596_rbd), GFP_KERNEL);
- if (rbd) {
- rbd->pad = 0;
- rbd->count = 0;
- rbd->skb = dev_alloc_skb(RX_SKBSIZE);
- if (!rbd->skb) {
- printk("dev_alloc_skb failed");
- }
- rbd->next = rfd->rbd;
- if (i) {
- rfd->rbd->prev = rbd;
- rbd->size = RX_SKBSIZE;
- } else {
- rbd->size = (RX_SKBSIZE | RBD_EL);
- lp->rbd_tail = rbd;
- }
-
- rfd->rbd = rbd;
- }
- }
- lp->rbd_tail->next = rfd->rbd;
-#endif
- return i;
-}
-
-static inline void
-remove_rx_bufs(struct net_device *dev) {
- struct i596_private *lp;
- struct i596_rfd *rfd;
-
- lp = netdev_priv(dev);
- lp->rx_tail->pa_next = I596_NULL;
-
- do {
- rfd = pa_to_va(lp->scb.pa_rfd);
- lp->scb.pa_rfd = rfd->pa_next;
- kfree(rfd);
- } while (rfd != lp->rx_tail);
-
- lp->rx_tail = NULL;
-
-#if 0
- for (lp->rbd_list) {
- }
-#endif
-}
-
-#define PORT_RESET 0x00 /* reset 82596 */
-#define PORT_SELFTEST 0x01 /* selftest */
-#define PORT_ALTSCP 0x02 /* alternate SCB address */
-#define PORT_DUMP 0x03 /* dump */
-
-#define IOADDR 0xcb0 /* real constant */
-#define IRQ 10 /* default IRQ - can be changed by ECU */
-
-/* The 82596 requires two 16-bit write cycles for a port command */
-static inline void
-PORT(phys_addr a, unsigned int cmd) {
- if (a & 0xf)
- printk("lp486e.c: PORT: address not aligned\n");
- outw(((a & 0xffff) | cmd), IOADDR);
- outw(((a>>16) & 0xffff), IOADDR+2);
-}
-
-static inline void
-CA(void) {
- outb(0, IOADDR+4);
- udelay(8);
-}
-
-static inline void
-CLEAR_INT(void) {
- outb(0, IOADDR+8);
-}
-
-#if 0
-/* selftest or dump */
-static void
-i596_port_do(struct net_device *dev, int portcmd, char *cmdname) {
- struct i596_private *lp = netdev_priv(dev);
- u16 *outp;
- int i, m;
-
- memset((void *)&(lp->dump), 0, sizeof(struct i596_dump));
- outp = &(lp->dump.dump[0]);
-
- PORT(va_to_pa(outp), portcmd);
- mdelay(30); /* random, unmotivated */
-
- printk("lp486e i82596 %s result:\n", cmdname);
- for (m = ARRAY_SIZE(lp->dump.dump); m && lp->dump.dump[m-1] == 0; m--)
- ;
- for (i = 0; i < m; i++) {
- printk(" %04x", lp->dump.dump[i]);
- if (i%8 == 7)
- printk("\n");
- }
- printk("\n");
-}
-#endif
-
-static int
-i596_scp_setup(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- int boguscnt;
-
- /* Setup SCP, ISCP, SCB */
- /*
- * sysbus bits:
- * only a single byte is significant - here 0x44
- * 0x80: big endian mode (details depend on stepping)
- * 0x40: 1
- * 0x20: interrupt pin is active low
- * 0x10: lock function disabled
- * 0x08: external triggering of bus throttle timers
- * 0x06: 00: 82586 compat mode, 01: segmented mode, 10: linear mode
- * 0x01: unused
- */
- lp->scp.sysbus = 0x00440000; /* linear mode */
- lp->scp.pad = 0; /* must be zero */
- lp->scp.pa_iscp = va_to_pa(&(lp->iscp));
-
- /*
- * The CPU sets the ISCP to 1 before it gives the first CA()
- */
- lp->iscp.busy = 0x0001;
- lp->iscp.pa_scb = va_to_pa(&(lp->scb));
-
- lp->scb.command = 0;
- lp->scb.status = 0;
- lp->scb.pa_cmd = I596_NULL;
- /* lp->scb.pa_rfd has been initialised already */
-
- lp->last_cmd = jiffies;
- lp->cmd_backlog = 0;
- lp->cmd_head = NULL;
-
- /*
- * Reset the 82596.
- * We need to wait 10 systemclock cycles, and
- * 5 serial clock cycles.
- */
- PORT(0, PORT_RESET); /* address part ignored */
- udelay(100);
-
- /*
- * Before the CA signal is asserted, the default SCP address
- * (0x00fffff4) can be changed to a 16-byte aligned value
- */
- PORT(va_to_pa(&lp->scp), PORT_ALTSCP); /* change the scp address */
-
- /*
- * The initialization procedure begins when a
- * Channel Attention signal is asserted after a reset.
- */
-
- CA();
-
- /*
- * The ISCP busy is cleared by the 82596 after the SCB address is read.
- */
- boguscnt = 100;
- while (lp->iscp.busy) {
- if (--boguscnt == 0) {
- /* No i82596 present? */
- printk("%s: i82596 initialization timed out\n",
- dev->name);
- return 1;
- }
- udelay(5);
- barrier();
- }
- /* I find here boguscnt==100, so no delay was required. */
-
- return 0;
-}
-
-static int
-init_i596(struct net_device *dev) {
- struct i596_private *lp;
-
- if (i596_scp_setup(dev))
- return 1;
-
- lp = netdev_priv(dev);
- lp->scb.command = 0;
-
- memcpy ((void *)lp->i596_config, init_setup, 14);
- lp->set_conf.command = CmdConfigure;
- i596_add_cmd(dev, (void *)&lp->set_conf);
-
- memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
- lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, &lp->set_add);
-
- lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, &lp->tdr);
-
- if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
- return 1;
-
- lp->scb.command = RX_START;
- CA();
-
- barrier();
-
- if (lp->scb.command && i596_timeout(dev, "Receive Unit start", 100))
- return 1;
-
- return 0;
-}
-
-/* Receive a single frame */
-static inline int
-i596_rx_one(struct net_device *dev, struct i596_private *lp,
- struct i596_rfd *rfd, int *frames) {
-
- if (rfd->stat & RFD_STAT_OK) {
- /* a good frame */
- int pkt_len = (rfd->count & 0x3fff);
- struct sk_buff *skb = netdev_alloc_skb(dev, pkt_len);
-
- (*frames)++;
-
- if (rfd->cmd & CMD_EOL)
- printk("Received on EOL\n");
-
- if (skb == NULL) {
- printk ("%s: i596_rx Memory squeeze, "
- "dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return 1;
- }
-
- memcpy(skb_put(skb,pkt_len), rfd->data, pkt_len);
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- } else {
-#if 0
- printk("Frame reception error status %04x\n",
- rfd->stat);
-#endif
- dev->stats.rx_errors++;
- if (rfd->stat & RFD_COLLISION)
- dev->stats.collisions++;
- if (rfd->stat & RFD_SHORT_FRAME_ERR)
- dev->stats.rx_length_errors++;
- if (rfd->stat & RFD_DMA_ERR)
- dev->stats.rx_over_errors++;
- if (rfd->stat & RFD_NOBUFS_ERR)
- dev->stats.rx_fifo_errors++;
- if (rfd->stat & RFD_ALIGN_ERR)
- dev->stats.rx_frame_errors++;
- if (rfd->stat & RFD_CRC_ERR)
- dev->stats.rx_crc_errors++;
- if (rfd->stat & RFD_LENGTH_ERR)
- dev->stats.rx_length_errors++;
- }
- rfd->stat = rfd->count = 0;
- return 0;
-}
-
-static int
-i596_rx(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- struct i596_rfd *rfd;
- int frames = 0;
-
- while (1) {
- rfd = pa_to_va(lp->scb.pa_rfd);
- if (!rfd) {
- printk(KERN_ERR "i596_rx: NULL rfd?\n");
- return 0;
- }
-#if 1
- if (rfd->stat && !(rfd->stat & (RFD_STAT_C | RFD_STAT_B)))
- printk("SF:%p-%04x\n", rfd, rfd->stat);
-#endif
- if (!(rfd->stat & RFD_STAT_C))
- break; /* next one not ready */
- if (i596_rx_one(dev, lp, rfd, &frames))
- break; /* out of memory */
- rfd->cmd = CMD_EOL;
- lp->rx_tail->cmd = 0;
- lp->rx_tail = rfd;
- lp->scb.pa_rfd = rfd->pa_next;
- barrier();
- }
-
- return frames;
-}
-
-static void
-i596_cleanup_cmd(struct net_device *dev) {
- struct i596_private *lp;
- struct i596_cmd *cmd;
-
- lp = netdev_priv(dev);
- while (lp->cmd_head) {
- cmd = lp->cmd_head;
-
- lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
- lp->cmd_backlog--;
-
- switch ((cmd->command) & 0x7) {
- case CmdTx: {
- struct tx_cmd *tx_cmd = (struct tx_cmd *) cmd;
- struct i596_tbd * tx_cmd_tbd;
- tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
-
- dev_kfree_skb_any(tx_cmd_tbd->skb);
-
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)tx_cmd);
- netif_wake_queue(dev);
- break;
- }
- case CmdMulticastList: {
- // unsigned short count = *((unsigned short *) (ptr + 1));
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)cmd);
- break;
- }
- default: {
- cmd->pa_next = I596_NULL;
- break;
- }
- }
- barrier();
- }
-
- if (lp->scb.command && i596_timeout(dev, "i596_cleanup_cmd", 100))
- ;
-
- lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
-}
-
-static void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) {
-
- if (lp->scb.command && i596_timeout(dev, "i596_reset", 100))
- ;
-
- netif_stop_queue(dev);
-
- lp->scb.command = CUC_ABORT | RX_ABORT;
- CA();
- barrier();
-
- /* wait for shutdown */
- if (lp->scb.command && i596_timeout(dev, "i596_reset(2)", 400))
- ;
-
- i596_cleanup_cmd(dev);
- i596_rx(dev);
-
- netif_start_queue(dev);
- /*dev_kfree_skb(skb, FREE_WRITE);*/
- init_i596(dev);
-}
-
-static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) {
- struct i596_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
-
- cmd->status = 0;
- cmd->command |= (CMD_EOL | CMD_INTR);
- cmd->pa_next = I596_NULL;
-
- spin_lock_irqsave(&lp->cmd_lock, flags);
-
- if (lp->cmd_head) {
- lp->cmd_tail->pa_next = va_to_pa(cmd);
- } else {
- lp->cmd_head = cmd;
- if (lp->scb.command && i596_timeout(dev, "i596_add_cmd", 100))
- ;
- lp->scb.pa_cmd = va_to_pa(cmd);
- lp->scb.command = CUC_START;
- CA();
- }
- lp->cmd_tail = cmd;
- lp->cmd_backlog++;
-
- lp->cmd_head = pa_to_va(lp->scb.pa_cmd);
- spin_unlock_irqrestore(&lp->cmd_lock, flags);
-
- if (lp->cmd_backlog > 16) {
- int tickssofar = jiffies - lp->last_cmd;
- if (tickssofar < HZ/4)
- return;
-
- printk(KERN_WARNING "%s: command unit timed out, status resetting.\n", dev->name);
- i596_reset(dev, lp, ioaddr);
- }
-}
-
-static int i596_open(struct net_device *dev)
-{
- int i;
-
- i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
- if (i) {
- printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
- return i;
- }
-
- if ((i = init_rx_bufs(dev, RX_RING_SIZE)) < RX_RING_SIZE)
- printk(KERN_ERR "%s: only able to allocate %d receive buffers\n", dev->name, i);
-
- if (i < 4) {
- free_irq(dev->irq, dev);
- return -EAGAIN;
- }
- netif_start_queue(dev);
- init_i596(dev);
- return 0; /* Always succeed */
-}
-
-static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
- struct tx_cmd *tx_cmd;
- short length;
-
- length = skb->len;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
- if (tx_cmd == NULL) {
- printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.tx_dropped++;
- dev_kfree_skb (skb);
- } else {
- struct i596_tbd *tx_cmd_tbd;
- tx_cmd_tbd = (struct i596_tbd *) (tx_cmd + 1);
- tx_cmd->pa_tbd = va_to_pa (tx_cmd_tbd);
- tx_cmd_tbd->pa_next = I596_NULL;
-
- tx_cmd->cmd.command = (CMD_FLEX | CmdTx);
-
- tx_cmd->pad = 0;
- tx_cmd->size = 0;
- tx_cmd_tbd->pad = 0;
- tx_cmd_tbd->size = (EOF | length);
-
- tx_cmd_tbd->pa_data = va_to_pa (skb->data);
- tx_cmd_tbd->skb = skb;
-
- if (i596_debug & LOG_SRCDST)
- print_eth (skb->data);
-
- i596_add_cmd (dev, (struct i596_cmd *) tx_cmd);
-
- dev->stats.tx_packets++;
- }
-
- return NETDEV_TX_OK;
-}
-
-static void
-i596_tx_timeout (struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- /* Transmitter timeout, serious problems. */
- printk(KERN_WARNING "%s: transmit timed out, status resetting.\n", dev->name);
- dev->stats.tx_errors++;
-
- /* Try to restart the adaptor */
- if (lp->last_restart == dev->stats.tx_packets) {
- printk ("Resetting board.\n");
-
- /* Shutdown and restart */
- i596_reset (dev, lp, ioaddr);
- } else {
- /* Issue a channel attention signal */
- printk ("Kicking board.\n");
- lp->scb.command = (CUC_START | RX_START);
- CA();
- lp->last_restart = dev->stats.tx_packets;
- }
- netif_wake_queue(dev);
-}
-
-static void print_eth(char *add)
-{
- int i;
-
- printk ("Dest ");
- for (i = 0; i < 6; i++)
- printk(" %2.2X", (unsigned char) add[i]);
- printk ("\n");
-
- printk ("Source");
- for (i = 0; i < 6; i++)
- printk(" %2.2X", (unsigned char) add[i+6]);
- printk ("\n");
-
- printk ("type %2.2X%2.2X\n",
- (unsigned char) add[12], (unsigned char) add[13]);
-}
-
-static const struct net_device_ops i596_netdev_ops = {
- .ndo_open = i596_open,
- .ndo_stop = i596_close,
- .ndo_start_xmit = i596_start_xmit,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_tx_timeout = i596_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init lp486e_probe(struct net_device *dev) {
- struct i596_private *lp;
- unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
- unsigned char *bios;
- int i, j;
- int ret = -ENOMEM;
- static int probed;
-
- if (probed)
- return -ENODEV;
- probed++;
-
- if (!request_region(IOADDR, LP486E_TOTAL_SIZE, DRV_NAME)) {
- printk(KERN_ERR "lp486e: IO address 0x%x in use\n", IOADDR);
- return -EBUSY;
- }
-
- lp = netdev_priv(dev);
- spin_lock_init(&lp->cmd_lock);
-
- /*
- * Do we really have this thing?
- */
- if (i596_scp_setup(dev)) {
- ret = -ENODEV;
- goto err_out_kfree;
- }
-
- dev->base_addr = IOADDR;
- dev->irq = IRQ;
-
-
- /*
- * How do we find the ethernet address? I don't know.
- * One possibility is to look at the EISA configuration area
- * [0xe8000-0xe9fff]. This contains the ethernet address
- * but not at a fixed address - things depend on setup options.
- *
- * If we find no address, or the wrong address, use
- * ifconfig eth0 hw ether a1:a2:a3:a4:a5:a6
- * with the value found in the BIOS setup.
- */
- bios = bus_to_virt(0xe8000);
- for (j = 0; j < 0x2000; j++) {
- if (bios[j] == 0 && bios[j+1] == 0xaa && bios[j+2] == 0) {
- printk("%s: maybe address at BIOS 0x%x:",
- dev->name, 0xe8000+j);
- for (i = 0; i < 6; i++) {
- eth_addr[i] = bios[i+j];
- printk(" %2.2X", eth_addr[i]);
- }
- printk("\n");
- }
- }
-
- printk("%s: lp486e 82596 at %#3lx, IRQ %d,",
- dev->name, dev->base_addr, dev->irq);
- for (i = 0; i < 6; i++)
- printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]);
- printk("\n");
-
- /* The LP486E-specific entries in the device structure. */
- dev->netdev_ops = &i596_netdev_ops;
- dev->watchdog_timeo = 5*HZ;
-
-#if 0
- /* selftest reports 0x320925ae - don't know what that means */
- i596_port_do(dev, PORT_SELFTEST, "selftest");
- i596_port_do(dev, PORT_DUMP, "dump");
-#endif
- return 0;
-
-err_out_kfree:
- release_region(IOADDR, LP486E_TOTAL_SIZE);
- return ret;
-}
-
-static inline void
-i596_handle_CU_completion(struct net_device *dev,
- struct i596_private *lp,
- unsigned short status,
- unsigned short *ack_cmdp) {
- struct i596_cmd *cmd;
- int frames_out = 0;
- int commands_done = 0;
- int cmd_val;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->cmd_lock, flags);
- cmd = lp->cmd_head;
-
- while (lp->cmd_head && (lp->cmd_head->status & CMD_STAT_C)) {
- cmd = lp->cmd_head;
-
- lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
- lp->cmd_backlog--;
-
- commands_done++;
- cmd_val = cmd->command & 0x7;
-#if 0
- printk("finished CU %s command (%d)\n",
- CUcmdnames[cmd_val], cmd_val);
-#endif
- switch (cmd_val) {
- case CmdTx:
- {
- struct tx_cmd *tx_cmd;
- struct i596_tbd *tx_cmd_tbd;
-
- tx_cmd = (struct tx_cmd *) cmd;
- tx_cmd_tbd = pa_to_va(tx_cmd->pa_tbd);
-
- frames_out++;
- if (cmd->status & CMD_STAT_OK) {
- if (i596_debug)
- print_eth(pa_to_va(tx_cmd_tbd->pa_data));
- } else {
- dev->stats.tx_errors++;
- if (i596_debug)
- printk("transmission failure:%04x\n",
- cmd->status);
- if (cmd->status & 0x0020)
- dev->stats.collisions++;
- if (!(cmd->status & 0x0040))
- dev->stats.tx_heartbeat_errors++;
- if (cmd->status & 0x0400)
- dev->stats.tx_carrier_errors++;
- if (cmd->status & 0x0800)
- dev->stats.collisions++;
- if (cmd->status & 0x1000)
- dev->stats.tx_aborted_errors++;
- }
- dev_kfree_skb_irq(tx_cmd_tbd->skb);
-
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)tx_cmd);
- netif_wake_queue(dev);
- break;
- }
-
- case CmdMulticastList:
- cmd->pa_next = I596_NULL;
- kfree((unsigned char *)cmd);
- break;
-
- case CmdTDR:
- {
- unsigned long status = *((unsigned long *) (cmd + 1));
- if (status & 0x8000) {
- if (i596_debug)
- printk("%s: link ok.\n", dev->name);
- } else {
- if (status & 0x4000)
- printk("%s: Transceiver problem.\n",
- dev->name);
- if (status & 0x2000)
- printk("%s: Termination problem.\n",
- dev->name);
- if (status & 0x1000)
- printk("%s: Short circuit.\n",
- dev->name);
- printk("%s: Time %ld.\n",
- dev->name, status & 0x07ff);
- }
- }
- default:
- cmd->pa_next = I596_NULL;
- lp->last_cmd = jiffies;
-
- }
- barrier();
- }
-
- cmd = lp->cmd_head;
- while (cmd && (cmd != lp->cmd_tail)) {
- cmd->command &= 0x1fff;
- cmd = pa_to_va(cmd->pa_next);
- barrier();
- }
-
- if (lp->cmd_head)
- *ack_cmdp |= CUC_START;
- lp->scb.pa_cmd = va_to_pa(lp->cmd_head);
- spin_unlock_irqrestore(&lp->cmd_lock, flags);
-}
-
-static irqreturn_t
-i596_interrupt(int irq, void *dev_instance)
-{
- struct net_device *dev = dev_instance;
- struct i596_private *lp = netdev_priv(dev);
- unsigned short status, ack_cmd = 0;
- int frames_in = 0;
-
- /*
- * The 82596 examines the command, performs the required action,
- * and then clears the SCB command word.
- */
- if (lp->scb.command && i596_timeout(dev, "interrupt", 40))
- ;
-
- /*
- * The status word indicates the status of the 82596.
- * It is modified only by the 82596.
- *
- * [So, we must not clear it. I find often status 0xffff,
- * which is not one of the values allowed by the docs.]
- */
- status = lp->scb.status;
-#if 0
- if (i596_debug) {
- printk("%s: i596 interrupt, ", dev->name);
- i596_out_status(status);
- }
-#endif
- /* Impossible, but it happens - perhaps when we get
- a receive interrupt but scb.pa_rfd is I596_NULL. */
- if (status == 0xffff) {
- printk("%s: i596_interrupt: got status 0xffff\n", dev->name);
- goto out;
- }
-
- ack_cmd = (status & STAT_ACK);
-
- if (status & (STAT_CX | STAT_CNA))
- i596_handle_CU_completion(dev, lp, status, &ack_cmd);
-
- if (status & (STAT_FR | STAT_RNR)) {
- /* Restart the receive unit when it got inactive somehow */
- if ((status & STAT_RNR) && netif_running(dev))
- ack_cmd |= RX_START;
-
- if (status & STAT_FR) {
- frames_in = i596_rx(dev);
- if (!frames_in)
- printk("receive frame reported, but no frames\n");
- }
- }
-
- /* acknowledge the interrupt */
- /*
- if ((lp->scb.pa_cmd != I596_NULL) && netif_running(dev))
- ack_cmd |= CUC_START;
- */
-
- if (lp->scb.command && i596_timeout(dev, "i596 interrupt", 100))
- ;
-
- lp->scb.command = ack_cmd;
-
- CLEAR_INT();
- CA();
-
- out:
- return IRQ_HANDLED;
-}
-
-static int i596_close(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
-
- netif_stop_queue(dev);
-
- if (i596_debug)
- printk("%s: Shutting down ethercard, status was %4.4x.\n",
- dev->name, lp->scb.status);
-
- lp->scb.command = (CUC_ABORT | RX_ABORT);
- CA();
-
- i596_cleanup_cmd(dev);
-
- if (lp->scb.command && i596_timeout(dev, "i596_close", 200))
- ;
-
- free_irq(dev->irq, dev);
- remove_rx_bufs(dev);
-
- return 0;
-}
-
-/*
-* Set or clear the multicast filter for this adaptor.
-*/
-
-static void set_multicast_list(struct net_device *dev) {
- struct i596_private *lp = netdev_priv(dev);
- struct i596_cmd *cmd;
-
- if (i596_debug > 1)
- printk ("%s: set multicast list %d\n",
- dev->name, netdev_mc_count(dev));
-
- if (!netdev_mc_empty(dev)) {
- struct netdev_hw_addr *ha;
- char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
- netdev_mc_count(dev) * 6, GFP_ATOMIC);
- if (cmd == NULL) {
- printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
- return;
- }
- cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
- cp = ((char *)(cmd + 1))+2;
- netdev_for_each_mc_addr(ha, dev) {
- memcpy(cp, ha->addr, 6);
- cp += 6;
- }
- if (i596_debug & LOG_SRCDST)
- print_eth (((char *)(cmd + 1)) + 2);
- i596_add_cmd(dev, cmd);
- } else {
- if (lp->set_conf.pa_next != I596_NULL) {
- return;
- }
- if (netdev_mc_empty(dev) &&
- !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
- lp->i596_config[8] &= ~0x01;
- } else {
- lp->i596_config[8] |= 0x01;
- }
-
- i596_add_cmd(dev, &lp->set_conf);
- }
-}
-
-MODULE_AUTHOR("Ard van Breemen <ard@cstmel.nl.eu.org>");
-MODULE_DESCRIPTION("Intel Panther onboard i82596 driver");
-MODULE_LICENSE("GPL");
-
-static struct net_device *dev_lp486e;
-static int full_duplex;
-static int options;
-static int io = IOADDR;
-static int irq = IRQ;
-
-module_param(debug, int, 0);
-//module_param(max_interrupt_work, int, 0);
-//module_param(reverse_probe, int, 0);
-//module_param(rx_copybreak, int, 0);
-module_param(options, int, 0);
-module_param(full_duplex, int, 0);
-
-static int __init lp486e_init_module(void) {
- int err;
- struct net_device *dev = alloc_etherdev(sizeof(struct i596_private));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = irq;
- dev->base_addr = io;
- err = lp486e_probe(dev);
- if (err) {
- free_netdev(dev);
- return err;
- }
- err = register_netdev(dev);
- if (err) {
- release_region(dev->base_addr, LP486E_TOTAL_SIZE);
- free_netdev(dev);
- return err;
- }
- dev_lp486e = dev;
- full_duplex = 0;
- options = 0;
- return 0;
-}
-
-static void __exit lp486e_cleanup_module(void) {
- unregister_netdev(dev_lp486e);
- release_region(dev_lp486e->base_addr, LP486E_TOTAL_SIZE);
- free_netdev(dev_lp486e);
-}
-
-module_init(lp486e_init_module);
-module_exit(lp486e_cleanup_module);
diff --git a/drivers/net/ethernet/i825xx/ni52.c b/drivers/net/ethernet/i825xx/ni52.c
deleted file mode 100644
index 272976e1bb0..00000000000
--- a/drivers/net/ethernet/i825xx/ni52.c
+++ /dev/null
@@ -1,1346 +0,0 @@
-/*
- * net-3-driver for the NI5210 card (i82586 Ethernet chip)
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * Alphacode 0.82 (96/09/29) for Linux 2.0.0 (or later)
- * Copyrights (c) 1994,1995,1996 by M.Hipp (hippm@informatik.uni-tuebingen.de)
- * [feel free to mail ....]
- *
- * when using as module: (no autoprobing!)
- * run with e.g:
- * insmod ni52.o io=0x360 irq=9 memstart=0xd0000 memend=0xd4000
- *
- * CAN YOU PLEASE REPORT ME YOUR PERFORMANCE EXPERIENCES !!.
- *
- * If you find a bug, please report me:
- * The kernel panic output and any kmsg from the ni52 driver
- * the ni5210-driver-version and the linux-kernel version
- * how many shared memory (memsize) on the netcard,
- * bootprom: yes/no, base_addr, mem_start
- * maybe the ni5210-card revision and the i82586 version
- *
- * autoprobe for: base_addr: 0x300,0x280,0x360,0x320,0x340
- * mem_start: 0xd0000,0xd2000,0xc8000,0xca000,0xd4000,0xd6000,
- * 0xd8000,0xcc000,0xce000,0xda000,0xdc000
- *
- * sources:
- * skeleton.c from Donald Becker
- *
- * I have also done a look in the following sources: (mail me if you need them)
- * crynwr-packet-driver by Russ Nelson
- * Garret A. Wollman's (fourth) i82586-driver for BSD
- * (before getting an i82596 (yes 596 not 586) manual, the existing drivers
- * helped me a lot to understand this tricky chip.)
- *
- * Known Problems:
- * The internal sysbus seems to be slow. So we often lose packets because of
- * overruns while receiving from a fast remote host.
- * This can slow down TCP connections. Maybe the newer ni5210 cards are
- * better. My experience is, that if a machine sends with more than about
- * 500-600K/s the fifo/sysbus overflows.
- *
- * IMPORTANT NOTE:
- * On fast networks, it's a (very) good idea to have 16K shared memory. With
- * 8K, we can store only 4 receive frames, so it can (easily) happen that a
- * remote machine 'overruns' our system.
- *
- * Known i82586/card problems (I'm sure, there are many more!):
- * Running the NOP-mode, the i82586 sometimes seems to forget to report
- * every xmit-interrupt until we restart the CU.
- * Another MAJOR bug is, that the RU sometimes seems to ignore the EL-Bit
- * in the RBD-Struct which indicates an end of the RBD queue.
- * Instead, the RU fetches another (randomly selected and
- * usually used) RBD and begins to fill it. (Maybe, this happens only if
- * the last buffer from the previous RFD fits exact into the queue and
- * the next RFD can't fetch an initial RBD. Anyone knows more? )
- *
- * results from ftp performance tests with Linux 1.2.5
- * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s)
- * sending in NOP-mode: peak performance up to 530K/s (but better don't
- * run this mode)
- */
-
-/*
- * 29.Sept.96: virt_to_bus changes for new memory scheme
- * 19.Feb.96: more Mcast changes, module support (MH)
- *
- * 18.Nov.95: Mcast changes (AC).
- *
- * 23.April.95: fixed(?) receiving problems by configuring a RFD more
- * than the number of RBD's. Can maybe cause other problems.
- * 18.April.95: Added MODULE support (MH)
- * 17.April.95: MC related changes in init586() and set_multicast_list().
- * removed use of 'jiffies' in init586() (MH)
- *
- * 19.Sep.94: Added Multicast support (not tested yet) (MH)
- *
- * 18.Sep.94: Workaround for 'EL-Bug'. Removed flexible RBD-handling.
- * Now, every RFD has exact one RBD. (MH)
- *
- * 14.Sep.94: added promiscuous mode, a few cleanups (MH)
- *
- * 19.Aug.94: changed request_irq() parameter (MH)
- *
- * 20.July.94: removed cleanup bugs, removed a 16K-mem-probe-bug (MH)
- *
- * 19.July.94: lotsa cleanups .. (MH)
- *
- * 17.July.94: some patches ... verified to run with 1.1.29 (MH)
- *
- * 4.July.94: patches for Linux 1.1.24 (MH)
- *
- * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH)
- *
- * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff,
- * too (MH)
- *
- * < 30.Sep.93: first versions
- */
-
-static int debuglevel; /* debug-printk 0: off 1: a few 2: more */
-static int automatic_resume; /* experimental .. better should be zero */
-static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */
-static int fifo = 0x8; /* don't change */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include "ni52.h"
-
-#define DRV_NAME "ni52"
-
-#define DEBUG /* debug on */
-#define SYSBUSVAL 1 /* 8 Bit */
-
-#define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); }
-#define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); }
-#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
-#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
-
-#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16)))
-#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base
-#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\
- - p->memtop))
-
-/******************* how to calculate the buffers *****************************
-
- * IMPORTANT NOTE: if you configure only one NUM_XMIT_BUFFS, the driver works
- * --------------- in a different (more stable?) mode. Only in this mode it's
- * possible to configure the driver with 'NO_NOPCOMMANDS'
-
-sizeof(scp)=12; sizeof(scb)=16; sizeof(iscp)=8;
-sizeof(scp)+sizeof(iscp)+sizeof(scb) = 36 = INIT
-sizeof(rfd) = 24; sizeof(rbd) = 12;
-sizeof(tbd) = 8; sizeof(transmit_cmd) = 16;
-sizeof(nop_cmd) = 8;
-
- * if you don't know the driver, better do not change these values: */
-
-#define RECV_BUFF_SIZE 1524 /* slightly oversized */
-#define XMIT_BUFF_SIZE 1524 /* slightly oversized */
-#define NUM_XMIT_BUFFS 1 /* config for both, 8K and 16K shmem */
-#define NUM_RECV_BUFFS_8 4 /* config for 8K shared mem */
-#define NUM_RECV_BUFFS_16 9 /* config for 16K shared mem */
-#define NO_NOPCOMMANDS /* only possible with NUM_XMIT_BUFFS=1 */
-
-/**************************************************************************/
-
-
-#define NI52_TOTAL_SIZE 16
-#define NI52_ADDR0 0x02
-#define NI52_ADDR1 0x07
-#define NI52_ADDR2 0x01
-
-static int ni52_probe1(struct net_device *dev, int ioaddr);
-static irqreturn_t ni52_interrupt(int irq, void *dev_id);
-static int ni52_open(struct net_device *dev);
-static int ni52_close(struct net_device *dev);
-static netdev_tx_t ni52_send_packet(struct sk_buff *, struct net_device *);
-static struct net_device_stats *ni52_get_stats(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void ni52_timeout(struct net_device *dev);
-
-/* helper-functions */
-static int init586(struct net_device *dev);
-static int check586(struct net_device *dev, unsigned size);
-static void alloc586(struct net_device *dev);
-static void startrecv586(struct net_device *dev);
-static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr);
-static void ni52_rcv_int(struct net_device *dev);
-static void ni52_xmt_int(struct net_device *dev);
-static void ni52_rnr_int(struct net_device *dev);
-
-struct priv {
- char __iomem *base;
- char __iomem *mapped;
- char __iomem *memtop;
- spinlock_t spinlock;
- int reset;
- struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first;
- struct scp_struct __iomem *scp;
- struct iscp_struct __iomem *iscp;
- struct scb_struct __iomem *scb;
- struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS];
-#if (NUM_XMIT_BUFFS == 1)
- struct transmit_cmd_struct __iomem *xmit_cmds[2];
- struct nop_cmd_struct __iomem *nop_cmds[2];
-#else
- struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS];
- struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS];
-#endif
- int nop_point, num_recv_buffs;
- char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS];
- int xmit_count, xmit_last;
-};
-
-/* wait for command with timeout: */
-static void wait_for_scb_cmd(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- int i;
- for (i = 0; i < 16384; i++) {
- if (readb(&p->scb->cmd_cuc) == 0)
- break;
- udelay(4);
- if (i == 16383) {
- printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",
- dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus));
- if (!p->reset) {
- p->reset = 1;
- ni_reset586();
- }
- }
- }
-}
-
-static void wait_for_scb_cmd_ruc(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- int i;
- for (i = 0; i < 16384; i++) {
- if (readb(&p->scb->cmd_ruc) == 0)
- break;
- udelay(4);
- if (i == 16383) {
- printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
- dev->name, readb(&p->scb->cmd_ruc),
- readb(&p->scb->rus));
- if (!p->reset) {
- p->reset = 1;
- ni_reset586();
- }
- }
- }
-}
-
-static void wait_for_stat_compl(void __iomem *p)
-{
- struct nop_cmd_struct __iomem *addr = p;
- int i;
- for (i = 0; i < 32767; i++) {
- if (readw(&((addr)->cmd_status)) & STAT_COMPL)
- break;
- udelay(32);
- }
-}
-
-/**********************************************
- * close device
- */
-static int ni52_close(struct net_device *dev)
-{
- free_irq(dev->irq, dev);
- ni_reset586(); /* the hard way to stop the receiver */
- netif_stop_queue(dev);
- return 0;
-}
-
-/**********************************************
- * open device
- */
-static int ni52_open(struct net_device *dev)
-{
- int ret;
-
- ni_disint();
- alloc586(dev);
- init586(dev);
- startrecv586(dev);
- ni_enaint();
-
- ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev);
- if (ret) {
- ni_reset586();
- return ret;
- }
- netif_start_queue(dev);
- return 0; /* most done by init */
-}
-
-static int check_iscp(struct net_device *dev, void __iomem *addr)
-{
- struct iscp_struct __iomem *iscp = addr;
- struct priv *p = netdev_priv(dev);
- memset_io(iscp, 0, sizeof(struct iscp_struct));
-
- writel(make24(iscp), &p->scp->iscp);
- writeb(1, &iscp->busy);
-
- ni_reset586();
- ni_attn586();
- mdelay(32); /* wait a while... */
- /* i82586 clears 'busy' after successful init */
- if (readb(&iscp->busy))
- return 0;
- return 1;
-}
-
-/**********************************************
- * Check to see if there's an 82586 out there.
- */
-static int check586(struct net_device *dev, unsigned size)
-{
- struct priv *p = netdev_priv(dev);
- int i;
-
- p->mapped = ioremap(dev->mem_start, size);
- if (!p->mapped)
- return 0;
-
- p->base = p->mapped + size - 0x01000000;
- p->memtop = p->mapped + size;
- p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS);
- p->scb = (struct scb_struct __iomem *) p->mapped;
- p->iscp = (struct iscp_struct __iomem *)p->scp - 1;
- memset_io(p->scp, 0, sizeof(struct scp_struct));
- for (i = 0; i < sizeof(struct scp_struct); i++)
- /* memory was writeable? */
- if (readb((char __iomem *)p->scp + i))
- goto Enodev;
- writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
- if (readb(&p->scp->sysbus) != SYSBUSVAL)
- goto Enodev;
-
- if (!check_iscp(dev, p->mapped))
- goto Enodev;
- if (!check_iscp(dev, p->iscp))
- goto Enodev;
- return 1;
-Enodev:
- iounmap(p->mapped);
- return 0;
-}
-
-/******************************************************************
- * set iscp at the right place, called by ni52_probe1 and open586.
- */
-static void alloc586(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- ni_reset586();
- mdelay(32);
-
- memset_io(p->iscp, 0, sizeof(struct iscp_struct));
- memset_io(p->scp , 0, sizeof(struct scp_struct));
-
- writel(make24(p->iscp), &p->scp->iscp);
- writeb(SYSBUSVAL, &p->scp->sysbus);
- writew(make16(p->scb), &p->iscp->scb_offset);
-
- writeb(1, &p->iscp->busy);
- ni_reset586();
- ni_attn586();
-
- mdelay(32);
-
- if (readb(&p->iscp->busy))
- printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name);
-
- p->reset = 0;
-
- memset_io(p->scb, 0, sizeof(struct scb_struct));
-}
-
-/* set: io,irq,memstart,memend or set it when calling insmod */
-static int irq = 9;
-static int io = 0x300;
-static long memstart; /* e.g 0xd0000 */
-static long memend; /* e.g 0xd4000 */
-
-/**********************************************
- * probe the ni5210-card
- */
-struct net_device * __init ni52_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct priv));
- static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0};
- const int *port;
- struct priv *p;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- p = netdev_priv(dev);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- memstart = dev->mem_start;
- memend = dev->mem_end;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = ni52_probe1(dev, io);
- } else if (io > 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = ports; *port && ni52_probe1(dev, *port) ; port++)
- ;
- if (*port)
- goto got_it;
-#ifdef FULL_IO_PROBE
- for (io = 0x200; io < 0x400 && ni52_probe1(dev, io); io += 8)
- ;
- if (io < 0x400)
- goto got_it;
-#endif
- err = -ENODEV;
- }
- if (err)
- goto out;
-got_it:
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- iounmap(p->mapped);
- release_region(dev->base_addr, NI52_TOTAL_SIZE);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops ni52_netdev_ops = {
- .ndo_open = ni52_open,
- .ndo_stop = ni52_close,
- .ndo_get_stats = ni52_get_stats,
- .ndo_tx_timeout = ni52_timeout,
- .ndo_start_xmit = ni52_send_packet,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init ni52_probe1(struct net_device *dev, int ioaddr)
-{
- int i, size, retval;
- struct priv *priv = netdev_priv(dev);
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
- dev->mem_start = memstart;
- dev->mem_end = memend;
-
- spin_lock_init(&priv->spinlock);
-
- if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
- return -EBUSY;
-
- if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) ||
- !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) {
- retval = -ENODEV;
- goto out;
- }
-
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = inb(dev->base_addr+i);
-
- if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 ||
- dev->dev_addr[2] != NI52_ADDR2) {
- retval = -ENODEV;
- goto out;
- }
-
- printk(KERN_INFO "%s: NI5210 found at %#3lx, ",
- dev->name, dev->base_addr);
-
- /*
- * check (or search) IO-Memory, 8K and 16K
- */
-#ifdef MODULE
- size = dev->mem_end - dev->mem_start;
- if (size != 0x2000 && size != 0x4000) {
- printk("\n");
- printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size);
- retval = -ENODEV;
- goto out;
- }
- if (!check586(dev, size)) {
- printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
- retval = -ENODEV;
- goto out;
- }
-#else
- if (dev->mem_start != 0) {
- /* no auto-mem-probe */
- size = 0x4000; /* check for 16K mem */
- if (!check586(dev, size)) {
- size = 0x2000; /* check for 8K mem */
- if (!check586(dev, size)) {
- printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
- retval = -ENODEV;
- goto out;
- }
- }
- } else {
- static const unsigned long memaddrs[] = {
- 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000,
- 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0
- };
- for (i = 0;; i++) {
- if (!memaddrs[i]) {
- printk(KERN_ERR "?memprobe, Can't find io-memory!\n");
- retval = -ENODEV;
- goto out;
- }
- dev->mem_start = memaddrs[i];
- size = 0x2000; /* check for 8K mem */
- if (check586(dev, size))
- /* 8K-check */
- break;
- size = 0x4000; /* check for 16K mem */
- if (check586(dev, size))
- /* 16K-check */
- break;
- }
- }
- /* set mem_end showed by 'ifconfig' */
- dev->mem_end = dev->mem_start + size;
-#endif
-
- alloc586(dev);
-
- /* set number of receive-buffs according to memsize */
- if (size == 0x2000)
- priv->num_recv_buffs = NUM_RECV_BUFFS_8;
- else
- priv->num_recv_buffs = NUM_RECV_BUFFS_16;
-
- printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
- dev->mem_start, size);
-
- if (dev->irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- ni_reset586();
- ni_attn586();
-
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
- if (!dev->irq) {
- printk("?autoirq, Failed to detect IRQ line!\n");
- retval = -EAGAIN;
- iounmap(priv->mapped);
- goto out;
- }
- printk("IRQ %d (autodetected).\n", dev->irq);
- } else {
- if (dev->irq == 2)
- dev->irq = 9;
- printk("IRQ %d (assigned and not checked!).\n", dev->irq);
- }
-
- dev->netdev_ops = &ni52_netdev_ops;
- dev->watchdog_timeo = HZ/20;
-
- return 0;
-out:
- release_region(ioaddr, NI52_TOTAL_SIZE);
- return retval;
-}
-
-/**********************************************
- * init the chip (ni52-interrupt should be disabled?!)
- * needs a correct 'allocated' memory
- */
-
-static int init586(struct net_device *dev)
-{
- void __iomem *ptr;
- int i, result = 0;
- struct priv *p = netdev_priv(dev);
- struct configure_cmd_struct __iomem *cfg_cmd;
- struct iasetup_cmd_struct __iomem *ias_cmd;
- struct tdr_cmd_struct __iomem *tdr_cmd;
- struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct netdev_hw_addr *ha;
- int num_addrs = netdev_mc_count(dev);
-
- ptr = p->scb + 1;
-
- cfg_cmd = ptr; /* configure-command */
- writew(0, &cfg_cmd->cmd_status);
- writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
- writew(0xFFFF, &cfg_cmd->cmd_link);
-
- /* number of cfg bytes */
- writeb(0x0a, &cfg_cmd->byte_cnt);
- /* fifo-limit (8=tx:32/rx:64) */
- writeb(fifo, &cfg_cmd->fifo);
- /* hold or discard bad recv frames (bit 7) */
- writeb(0x40, &cfg_cmd->sav_bf);
- /* addr_len |!src_insert |pre-len |loopback */
- writeb(0x2e, &cfg_cmd->adr_len);
- writeb(0x00, &cfg_cmd->priority);
- writeb(0x60, &cfg_cmd->ifs);
- writeb(0x00, &cfg_cmd->time_low);
- writeb(0xf2, &cfg_cmd->time_high);
- writeb(0x00, &cfg_cmd->promisc);
- if (dev->flags & IFF_ALLMULTI) {
- int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
- if (num_addrs > len) {
- printk(KERN_ERR "%s: switching to promisc. mode\n",
- dev->name);
- writeb(0x01, &cfg_cmd->promisc);
- }
- }
- if (dev->flags & IFF_PROMISC)
- writeb(0x01, &cfg_cmd->promisc);
- writeb(0x00, &cfg_cmd->carr_coll);
- writew(make16(cfg_cmd), &p->scb->cbl_offset);
- writeb(0, &p->scb->cmd_ruc);
-
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(cfg_cmd);
-
- if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
- (STAT_COMPL|STAT_OK)) {
- printk(KERN_ERR "%s: configure command failed: %x\n",
- dev->name, readw(&cfg_cmd->cmd_status));
- return 1;
- }
-
- /*
- * individual address setup
- */
-
- ias_cmd = ptr;
-
- writew(0, &ias_cmd->cmd_status);
- writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
- writew(0xffff, &ias_cmd->cmd_link);
-
- memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
-
- writew(make16(ias_cmd), &p->scb->cbl_offset);
-
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(ias_cmd);
-
- if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) !=
- (STAT_OK|STAT_COMPL)) {
- printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status));
- return 1;
- }
-
- /*
- * TDR, wire check .. e.g. no resistor e.t.c
- */
-
- tdr_cmd = ptr;
-
- writew(0, &tdr_cmd->cmd_status);
- writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
- writew(0xffff, &tdr_cmd->cmd_link);
- writew(0, &tdr_cmd->status);
-
- writew(make16(tdr_cmd), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
- ni_attn586();
-
- wait_for_stat_compl(tdr_cmd);
-
- if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL))
- printk(KERN_ERR "%s: Problems while running the TDR.\n",
- dev->name);
- else {
- udelay(16);
- result = readw(&tdr_cmd->status);
- writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
- ni_attn586(); /* ack the interrupts */
-
- if (result & TDR_LNK_OK)
- ;
- else if (result & TDR_XCVR_PRB)
- printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n",
- dev->name);
- else if (result & TDR_ET_OPN)
- printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n",
- dev->name, result & TDR_TIMEMASK);
- else if (result & TDR_ET_SRT) {
- /* time == 0 -> strange :-) */
- if (result & TDR_TIMEMASK)
- printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n",
- dev->name, result & TDR_TIMEMASK);
- } else
- printk(KERN_ERR "%s: TDR: Unknown status %04x\n",
- dev->name, result);
- }
-
- /*
- * Multicast setup
- */
- if (num_addrs && !(dev->flags & IFF_PROMISC)) {
- mc_cmd = ptr;
- writew(0, &mc_cmd->cmd_status);
- writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
- writew(0xffff, &mc_cmd->cmd_link);
- writew(num_addrs * 6, &mc_cmd->mc_cnt);
-
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
-
- writew(make16(mc_cmd), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
-
- wait_for_stat_compl(mc_cmd);
-
- if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK))
- != (STAT_COMPL|STAT_OK))
- printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name);
- }
-
- /*
- * alloc nop/xmit-cmds
- */
-#if (NUM_XMIT_BUFFS == 1)
- for (i = 0; i < 2; i++) {
- p->nop_cmds[i] = ptr;
- writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
- writew(0, &p->nop_cmds[i]->cmd_status);
- writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
- ptr = ptr + sizeof(struct nop_cmd_struct);
- }
-#else
- for (i = 0; i < NUM_XMIT_BUFFS; i++) {
- p->nop_cmds[i] = ptr;
- writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
- writew(0, &p->nop_cmds[i]->cmd_status);
- writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
- ptr = ptr + sizeof(struct nop_cmd_struct);
- }
-#endif
-
- ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */
-
- /*
- * alloc xmit-buffs / init xmit_cmds
- */
- for (i = 0; i < NUM_XMIT_BUFFS; i++) {
- /* Transmit cmd/buff 0 */
- p->xmit_cmds[i] = ptr;
- ptr = ptr + sizeof(struct transmit_cmd_struct);
- p->xmit_cbuffs[i] = ptr; /* char-buffs */
- ptr = ptr + XMIT_BUFF_SIZE;
- p->xmit_buffs[i] = ptr; /* TBD */
- ptr = ptr + sizeof(struct tbd_struct);
- if ((void __iomem *)ptr > (void __iomem *)p->iscp) {
- printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
- dev->name);
- return 1;
- }
- memset_io(p->xmit_cmds[i], 0,
- sizeof(struct transmit_cmd_struct));
- memset_io(p->xmit_buffs[i], 0,
- sizeof(struct tbd_struct));
- writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
- &p->xmit_cmds[i]->cmd_link);
- writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status);
- writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd);
- writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset);
- writew(0xffff, &p->xmit_buffs[i]->next);
- writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer);
- }
-
- p->xmit_count = 0;
- p->xmit_last = 0;
-#ifndef NO_NOPCOMMANDS
- p->nop_point = 0;
-#endif
-
- /*
- * 'start transmitter'
- */
-#ifndef NO_NOPCOMMANDS
- writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
-#else
- writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link);
- writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd);
-#endif
-
- /*
- * ack. interrupts
- */
- writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc);
- ni_attn586();
- udelay(16);
-
- ni_enaint();
-
- return 0;
-}
-
-/******************************************************
- * This is a helper routine for ni52_rnr_int() and init586().
- * It sets up the Receive Frame Area (RFA).
- */
-
-static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
-{
- struct rfd_struct __iomem *rfd = ptr;
- struct rbd_struct __iomem *rbd;
- int i;
- struct priv *p = netdev_priv(dev);
-
- memset_io(rfd, 0,
- sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
- p->rfd_first = rfd;
-
- for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) {
- writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)),
- &rfd[i].next);
- writew(0xffff, &rfd[i].rbd_offset);
- }
- /* RU suspend */
- writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
-
- ptr = rfd + (p->num_recv_buffs + rfdadd);
-
- rbd = ptr;
- ptr = rbd + p->num_recv_buffs;
-
- /* clr descriptors */
- memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs));
-
- for (i = 0; i < p->num_recv_buffs; i++) {
- writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
- writew(RECV_BUFF_SIZE, &rbd[i].size);
- writel(make24(ptr), &rbd[i].buffer);
- ptr = ptr + RECV_BUFF_SIZE;
- }
- p->rfd_top = p->rfd_first;
- p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
-
- writew(make16(p->rfd_first), &p->scb->rfa_offset);
- writew(make16(rbd), &p->rfd_first->rbd_offset);
-
- return ptr;
-}
-
-
-/**************************************************
- * Interrupt Handler ...
- */
-
-static irqreturn_t ni52_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- unsigned int stat;
- int cnt = 0;
- struct priv *p;
-
- p = netdev_priv(dev);
-
- if (debuglevel > 1)
- printk("I");
-
- spin_lock(&p->spinlock);
-
- wait_for_scb_cmd(dev); /* wait for last command */
-
- while ((stat = readb(&p->scb->cus) & STAT_MASK)) {
- writeb(stat, &p->scb->cmd_cuc);
- ni_attn586();
-
- if (stat & STAT_FR) /* received a frame */
- ni52_rcv_int(dev);
-
- if (stat & STAT_RNR) { /* RU went 'not ready' */
- printk("(R)");
- if (readb(&p->scb->rus) & RU_SUSPEND) {
- /* special case: RU_SUSPEND */
- wait_for_scb_cmd(dev);
- writeb(RUC_RESUME, &p->scb->cmd_ruc);
- ni_attn586();
- wait_for_scb_cmd_ruc(dev);
- } else {
- printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",
- dev->name, stat, readb(&p->scb->rus));
- ni52_rnr_int(dev);
- }
- }
-
- /* Command with I-bit set complete */
- if (stat & STAT_CX)
- ni52_xmt_int(dev);
-
-#ifndef NO_NOPCOMMANDS
- if (stat & STAT_CNA) { /* CU went 'not ready' */
- if (netif_running(dev))
- printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n",
- dev->name, stat, readb(&p->scb->cus));
- }
-#endif
-
- if (debuglevel > 1)
- printk("%d", cnt++);
-
- /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
- wait_for_scb_cmd(dev);
- if (readb(&p->scb->cmd_cuc)) { /* timed out? */
- printk(KERN_ERR "%s: Acknowledge timed out.\n",
- dev->name);
- ni_disint();
- break;
- }
- }
- spin_unlock(&p->spinlock);
-
- if (debuglevel > 1)
- printk("i");
- return IRQ_HANDLED;
-}
-
-/*******************************************************
- * receive-interrupt
- */
-
-static void ni52_rcv_int(struct net_device *dev)
-{
- int status, cnt = 0;
- unsigned short totlen;
- struct sk_buff *skb;
- struct rbd_struct __iomem *rbd;
- struct priv *p = netdev_priv(dev);
-
- if (debuglevel > 0)
- printk("R");
-
- for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
- rbd = make32(readw(&p->rfd_top->rbd_offset));
- if (status & RFD_OK) { /* frame received without error? */
- totlen = readw(&rbd->status);
- if (totlen & RBD_LAST) {
- /* the first and the last buffer? */
- totlen &= RBD_MASK; /* length of this frame */
- writew(0x00, &rbd->status);
- skb = netdev_alloc_skb(dev, totlen + 2);
- if (skb != NULL) {
- skb_reserve(skb, 2);
- skb_put(skb, totlen);
- memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += totlen;
- } else
- dev->stats.rx_dropped++;
- } else {
- int rstat;
- /* free all RBD's until RBD_LAST is set */
- totlen = 0;
- while (!((rstat = readw(&rbd->status)) & RBD_LAST)) {
- totlen += rstat & RBD_MASK;
- if (!rstat) {
- printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name);
- break;
- }
- writew(0, &rbd->status);
- rbd = make32(readw(&rbd->next));
- }
- totlen += rstat & RBD_MASK;
- writew(0, &rbd->status);
- printk(KERN_ERR "%s: received oversized frame! length: %d\n",
- dev->name, totlen);
- dev->stats.rx_dropped++;
- }
- } else {/* frame !(ok), only with 'save-bad-frames' */
- printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n",
- dev->name, status);
- dev->stats.rx_errors++;
- }
- writeb(0, &p->rfd_top->stat_high);
- writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */
- writew(0xffff, &p->rfd_top->rbd_offset);
- writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
- p->rfd_last = p->rfd_top;
- p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */
- writew(make16(p->rfd_top), &p->scb->rfa_offset);
-
- if (debuglevel > 0)
- printk("%d", cnt++);
- }
-
- if (automatic_resume) {
- wait_for_scb_cmd(dev);
- writeb(RUC_RESUME, &p->scb->cmd_ruc);
- ni_attn586();
- wait_for_scb_cmd_ruc(dev);
- }
-
-#ifdef WAIT_4_BUSY
- {
- int i;
- for (i = 0; i < 1024; i++) {
- if (p->rfd_top->status)
- break;
- udelay(16);
- if (i == 1023)
- printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name);
- }
- }
-#endif
- if (debuglevel > 0)
- printk("r");
-}
-
-/**********************************************************
- * handle 'Receiver went not ready'.
- */
-
-static void ni52_rnr_int(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- dev->stats.rx_errors++;
-
- wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */
- writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */
- ni_attn586();
- wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
-
- alloc_rfa(dev, p->rfd_first);
- /* maybe add a check here, before restarting the RU */
- startrecv586(dev); /* restart RU */
-
- printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n",
- dev->name, readb(&p->scb->rus));
-
-}
-
-/**********************************************************
- * handle xmit - interrupt
- */
-
-static void ni52_xmt_int(struct net_device *dev)
-{
- int status;
- struct priv *p = netdev_priv(dev);
-
- if (debuglevel > 0)
- printk("X");
-
- status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status);
- if (!(status & STAT_COMPL))
- printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name);
-
- if (status & STAT_OK) {
- dev->stats.tx_packets++;
- dev->stats.collisions += (status & TCMD_MAXCOLLMASK);
- } else {
- dev->stats.tx_errors++;
- if (status & TCMD_LATECOLL) {
- printk(KERN_ERR "%s: late collision detected.\n",
- dev->name);
- dev->stats.collisions++;
- } else if (status & TCMD_NOCARRIER) {
- dev->stats.tx_carrier_errors++;
- printk(KERN_ERR "%s: no carrier detected.\n",
- dev->name);
- } else if (status & TCMD_LOSTCTS)
- printk(KERN_ERR "%s: loss of CTS detected.\n",
- dev->name);
- else if (status & TCMD_UNDERRUN) {
- dev->stats.tx_fifo_errors++;
- printk(KERN_ERR "%s: DMA underrun detected.\n",
- dev->name);
- } else if (status & TCMD_MAXCOLL) {
- printk(KERN_ERR "%s: Max. collisions exceeded.\n",
- dev->name);
- dev->stats.collisions += 16;
- }
- }
-#if (NUM_XMIT_BUFFS > 1)
- if ((++p->xmit_last) == NUM_XMIT_BUFFS)
- p->xmit_last = 0;
-#endif
- netif_wake_queue(dev);
-}
-
-/***********************************************************
- * (re)start the receiver
- */
-
-static void startrecv586(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-
- wait_for_scb_cmd(dev);
- wait_for_scb_cmd_ruc(dev);
- writew(make16(p->rfd_first), &p->scb->rfa_offset);
- writeb(RUC_START, &p->scb->cmd_ruc);
- ni_attn586(); /* start cmd. */
- wait_for_scb_cmd_ruc(dev);
- /* wait for accept cmd. (no timeout!!) */
-}
-
-static void ni52_timeout(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
-#ifndef NO_NOPCOMMANDS
- if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */
- netif_wake_queue(dev);
-#ifdef DEBUG
- printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n",
- dev->name);
- printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n",
- dev->name, (int)p->xmit_cmds[0]->cmd_status,
- readw(&p->nop_cmds[0]->cmd_status),
- readw(&p->nop_cmds[1]->cmd_status),
- p->nop_point);
-#endif
- writeb(CUC_ABORT, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
- writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- ni_attn586();
- wait_for_scb_cmd(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
- return 0;
- }
-#endif
- {
-#ifdef DEBUG
- printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n",
- dev->name, readb(&p->scb->cus));
- printk(KERN_ERR "%s: command-stats: %04x %04x\n",
- dev->name,
- readw(&p->xmit_cmds[0]->cmd_status),
- readw(&p->xmit_cmds[1]->cmd_status));
- printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n",
- dev->name);
-#endif
- ni52_close(dev);
- ni52_open(dev);
- }
- dev->trans_start = jiffies; /* prevent tx timeout */
-}
-
-/******************************************************
- * send frame
- */
-
-static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- int len, i;
-#ifndef NO_NOPCOMMANDS
- int next_nop;
-#endif
- struct priv *p = netdev_priv(dev);
-
- if (skb->len > XMIT_BUFF_SIZE) {
- printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len);
- return NETDEV_TX_OK;
- }
-
- netif_stop_queue(dev);
-
- memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
- len = skb->len;
- if (len < ETH_ZLEN) {
- len = ETH_ZLEN;
- memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
- len - skb->len);
- }
-
-#if (NUM_XMIT_BUFFS == 1)
-# ifdef NO_NOPCOMMANDS
-
-#ifdef DEBUG
- if (readb(&p->scb->cus) & CU_ACTIVE) {
- printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
- printk(KERN_ERR "%s: stat: %04x %04x\n",
- dev->name, readb(&p->scb->cus),
- readw(&p->xmit_cmds[0]->cmd_status));
- }
-#endif
- writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
- for (i = 0; i < 16; i++) {
- writew(0, &p->xmit_cmds[0]->cmd_status);
- wait_for_scb_cmd(dev);
- if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND)
- writeb(CUC_RESUME, &p->scb->cmd_cuc);
- else {
- writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset);
- writeb(CUC_START, &p->scb->cmd_cuc);
- }
- ni_attn586();
- if (!i)
- dev_kfree_skb(skb);
- wait_for_scb_cmd(dev);
- /* test it, because CU sometimes doesn't start immediately */
- if (readb(&p->scb->cus) & CU_ACTIVE)
- break;
- if (readw(&p->xmit_cmds[0]->cmd_status))
- break;
- if (i == 15)
- printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name);
- }
-# else
- next_nop = (p->nop_point + 1) & 0x1;
- writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
- writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link);
- writew(make16(p->nop_cmds[next_nop]),
- &p->nop_cmds[next_nop]->cmd_link);
- writew(0, &p->xmit_cmds[0]->cmd_status);
- writew(0, &p->nop_cmds[next_nop]->cmd_status);
-
- writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
- p->nop_point = next_nop;
- dev_kfree_skb(skb);
-# endif
-#else
- writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size);
- next_nop = p->xmit_count + 1
- if (next_nop == NUM_XMIT_BUFFS)
- next_nop = 0;
- writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status);
- /* linkpointer of xmit-command already points to next nop cmd */
- writew(make16(p->nop_cmds[next_nop]),
- &p->nop_cmds[next_nop]->cmd_link);
- writew(0, &p->nop_cmds[next_nop]->cmd_status);
- writew(make16(p->xmit_cmds[p->xmit_count]),
- &p->nop_cmds[p->xmit_count]->cmd_link);
- p->xmit_count = next_nop;
- {
- unsigned long flags;
- spin_lock_irqsave(&p->spinlock);
- if (p->xmit_count != p->xmit_last)
- netif_wake_queue(dev);
- spin_unlock_irqrestore(&p->spinlock);
- }
- dev_kfree_skb(skb);
-#endif
- return NETDEV_TX_OK;
-}
-
-/*******************************************
- * Someone wanna have the statistics
- */
-
-static struct net_device_stats *ni52_get_stats(struct net_device *dev)
-{
- struct priv *p = netdev_priv(dev);
- unsigned short crc, aln, rsc, ovrn;
-
- /* Get error-statistics from the ni82586 */
- crc = readw(&p->scb->crc_errs);
- writew(0, &p->scb->crc_errs);
- aln = readw(&p->scb->aln_errs);
- writew(0, &p->scb->aln_errs);
- rsc = readw(&p->scb->rsc_errs);
- writew(0, &p->scb->rsc_errs);
- ovrn = readw(&p->scb->ovrn_errs);
- writew(0, &p->scb->ovrn_errs);
-
- dev->stats.rx_crc_errors += crc;
- dev->stats.rx_fifo_errors += ovrn;
- dev->stats.rx_frame_errors += aln;
- dev->stats.rx_dropped += rsc;
-
- return &dev->stats;
-}
-
-/********************************************************
- * Set MC list ..
- */
-
-static void set_multicast_list(struct net_device *dev)
-{
- netif_stop_queue(dev);
- ni_disint();
- alloc586(dev);
- init586(dev);
- startrecv586(dev);
- ni_enaint();
- netif_wake_queue(dev);
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni52;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-module_param(memstart, long, 0);
-module_param(memend, long, 0);
-MODULE_PARM_DESC(io, "NI5210 I/O base address,required");
-MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
-MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
-MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
-
-int __init init_module(void)
-{
- if (io <= 0x0 || !memend || !memstart || irq < 2) {
- printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n");
- printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
- return -ENODEV;
- }
- dev_ni52 = ni52_probe(-1);
- if (IS_ERR(dev_ni52))
- return PTR_ERR(dev_ni52);
- return 0;
-}
-
-void __exit cleanup_module(void)
-{
- struct priv *p = netdev_priv(dev_ni52);
- unregister_netdev(dev_ni52);
- iounmap(p->mapped);
- release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
- free_netdev(dev_ni52);
-}
-#endif /* MODULE */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/i825xx/ni52.h b/drivers/net/ethernet/i825xx/ni52.h
deleted file mode 100644
index 0a03b288332..00000000000
--- a/drivers/net/ethernet/i825xx/ni52.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Intel i82586 Ethernet definitions
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * copyrights (c) 1994 by Michael Hipp (hippm@informatik.uni-tuebingen.de)
- *
- * I have done a look in the following sources:
- * crynwr-packet-driver by Russ Nelson
- * Garret A. Wollman's i82586-driver for BSD
- */
-
-
-#define NI52_RESET 0 /* writing to this address, resets the i82586 */
-#define NI52_ATTENTION 1 /* channel attention, kick the 586 */
-#define NI52_TENA 3 /* 2-5 possibly wrong, Xmit enable */
-#define NI52_TDIS 2 /* Xmit disable */
-#define NI52_INTENA 5 /* Interrupt enable */
-#define NI52_INTDIS 4 /* Interrupt disable */
-#define NI52_MAGIC1 6 /* dunno exact function */
-#define NI52_MAGIC2 7 /* dunno exact function */
-
-#define NI52_MAGICVAL1 0x00 /* magic-values for ni5210 card */
-#define NI52_MAGICVAL2 0x55
-
-/*
- * where to find the System Configuration Pointer (SCP)
- */
-#define SCP_DEFAULT_ADDRESS 0xfffff4
-
-
-/*
- * System Configuration Pointer Struct
- */
-
-struct scp_struct
-{
- u16 zero_dum0; /* has to be zero */
- u8 sysbus; /* 0=16Bit,1=8Bit */
- u8 zero_dum1; /* has to be zero for 586 */
- u16 zero_dum2;
- u16 zero_dum3;
- u32 iscp; /* pointer to the iscp-block */
-};
-
-
-/*
- * Intermediate System Configuration Pointer (ISCP)
- */
-struct iscp_struct
-{
- u8 busy; /* 586 clears after successful init */
- u8 zero_dummy; /* has to be zero */
- u16 scb_offset; /* pointeroffset to the scb_base */
- u32 scb_base; /* base-address of all 16-bit offsets */
-};
-
-/*
- * System Control Block (SCB)
- */
-struct scb_struct
-{
- u8 rus;
- u8 cus;
- u8 cmd_ruc; /* command word: RU part */
- u8 cmd_cuc; /* command word: CU part & ACK */
- u16 cbl_offset; /* pointeroffset, command block list */
- u16 rfa_offset; /* pointeroffset, receive frame area */
- u16 crc_errs; /* CRC-Error counter */
- u16 aln_errs; /* alignmenterror counter */
- u16 rsc_errs; /* Resourceerror counter */
- u16 ovrn_errs; /* OVerrunerror counter */
-};
-
-/*
- * possible command values for the command word
- */
-#define RUC_MASK 0x0070 /* mask for RU commands */
-#define RUC_NOP 0x0000 /* NOP-command */
-#define RUC_START 0x0010 /* start RU */
-#define RUC_RESUME 0x0020 /* resume RU after suspend */
-#define RUC_SUSPEND 0x0030 /* suspend RU */
-#define RUC_ABORT 0x0040 /* abort receiver operation immediately */
-
-#define CUC_MASK 0x07 /* mask for CU command */
-#define CUC_NOP 0x00 /* NOP-command */
-#define CUC_START 0x01 /* start execution of 1. cmd on the CBL */
-#define CUC_RESUME 0x02 /* resume after suspend */
-#define CUC_SUSPEND 0x03 /* Suspend CU */
-#define CUC_ABORT 0x04 /* abort command operation immediately */
-
-#define ACK_MASK 0xf0 /* mask for ACK command */
-#define ACK_CX 0x80 /* acknowledges STAT_CX */
-#define ACK_FR 0x40 /* ack. STAT_FR */
-#define ACK_CNA 0x20 /* ack. STAT_CNA */
-#define ACK_RNR 0x10 /* ack. STAT_RNR */
-
-/*
- * possible status values for the status word
- */
-#define STAT_MASK 0xf0 /* mask for cause of interrupt */
-#define STAT_CX 0x80 /* CU finished cmd with its I bit set */
-#define STAT_FR 0x40 /* RU finished receiving a frame */
-#define STAT_CNA 0x20 /* CU left active state */
-#define STAT_RNR 0x10 /* RU left ready state */
-
-#define CU_STATUS 0x7 /* CU status, 0=idle */
-#define CU_SUSPEND 0x1 /* CU is suspended */
-#define CU_ACTIVE 0x2 /* CU is active */
-
-#define RU_STATUS 0x70 /* RU status, 0=idle */
-#define RU_SUSPEND 0x10 /* RU suspended */
-#define RU_NOSPACE 0x20 /* RU no resources */
-#define RU_READY 0x40 /* RU is ready */
-
-/*
- * Receive Frame Descriptor (RFD)
- */
-struct rfd_struct
-{
- u8 stat_low; /* status word */
- u8 stat_high; /* status word */
- u8 rfd_sf; /* 82596 mode only */
- u8 last; /* Bit15,Last Frame on List / Bit14,suspend */
- u16 next; /* linkoffset to next RFD */
- u16 rbd_offset; /* pointeroffset to RBD-buffer */
- u8 dest[6]; /* ethernet-address, destination */
- u8 source[6]; /* ethernet-address, source */
- u16 length; /* 802.3 frame-length */
- u16 zero_dummy; /* dummy */
-};
-
-#define RFD_LAST 0x80 /* last: last rfd in the list */
-#define RFD_SUSP 0x40 /* last: suspend RU after */
-#define RFD_COMPL 0x80
-#define RFD_OK 0x20
-#define RFD_BUSY 0x40
-#define RFD_ERR_LEN 0x10 /* Length error (if enabled length-checking */
-#define RFD_ERR_CRC 0x08 /* CRC error */
-#define RFD_ERR_ALGN 0x04 /* Alignment error */
-#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
-#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
-
-#define RFD_ERR_FTS 0x0080 /* Frame to short */
-#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
-#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
-#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
-#define RFD_COLLDET 0x0001 /* Detected collision during reception */
-
-/*
- * Receive Buffer Descriptor (RBD)
- */
-struct rbd_struct
-{
- u16 status; /* status word,number of used bytes in buff */
- u16 next; /* pointeroffset to next RBD */
- u32 buffer; /* receive buffer address pointer */
- u16 size; /* size of this buffer */
- u16 zero_dummy; /* dummy */
-};
-
-#define RBD_LAST 0x8000 /* last buffer */
-#define RBD_USED 0x4000 /* this buffer has data */
-#define RBD_MASK 0x3fff /* size-mask for length */
-
-/*
- * Statusvalues for Commands/RFD
- */
-#define STAT_COMPL 0x8000 /* status: frame/command is complete */
-#define STAT_BUSY 0x4000 /* status: frame/command is busy */
-#define STAT_OK 0x2000 /* status: frame/command is ok */
-
-/*
- * Action-Commands
- */
-#define CMD_NOP 0x0000 /* NOP */
-#define CMD_IASETUP 0x0001 /* initial address setup command */
-#define CMD_CONFIGURE 0x0002 /* configure command */
-#define CMD_MCSETUP 0x0003 /* MC setup command */
-#define CMD_XMIT 0x0004 /* transmit command */
-#define CMD_TDR 0x0005 /* time domain reflectometer (TDR) command */
-#define CMD_DUMP 0x0006 /* dump command */
-#define CMD_DIAGNOSE 0x0007 /* diagnose command */
-
-/*
- * Action command bits
- */
-#define CMD_LAST 0x8000 /* indicates last command in the CBL */
-#define CMD_SUSPEND 0x4000 /* suspend CU after this CB */
-#define CMD_INT 0x2000 /* generate interrupt after execution */
-
-/*
- * NOP - command
- */
-struct nop_cmd_struct
-{
- u16 cmd_status; /* status of this command */
- u16 cmd_cmd; /* the command itself (+bits) */
- u16 cmd_link; /* offsetpointer to next command */
-};
-
-/*
- * IA Setup command
- */
-struct iasetup_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u8 iaddr[6];
-};
-
-/*
- * Configure command
- */
-struct configure_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u8 byte_cnt; /* size of the config-cmd */
- u8 fifo; /* fifo/recv monitor */
- u8 sav_bf; /* save bad frames (bit7=1)*/
- u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/
- u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */
- u8 ifs; /* inter frame spacing */
- u8 time_low; /* slot time low */
- u8 time_high; /* slot time high(0-2) and max. retries(4-7) */
- u8 promisc; /* promisc-mode(0) , et al (1-7) */
- u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */
- u8 fram_len; /* minimal frame len */
- u8 dummy; /* dummy */
-};
-
-/*
- * Multicast Setup command
- */
-struct mcsetup_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 mc_cnt; /* number of bytes in the MC-List */
- u8 mc_list[0][6]; /* pointer to 6 bytes entries */
-};
-
-/*
- * DUMP command
- */
-struct dump_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 dump_offset; /* pointeroffset to DUMP space */
-};
-
-/*
- * transmit command
- */
-struct transmit_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 tbd_offset; /* pointeroffset to TBD */
- u8 dest[6]; /* destination address of the frame */
- u16 length; /* user defined: 802.3 length / Ether type */
-};
-
-#define TCMD_ERRMASK 0x0fa0
-#define TCMD_MAXCOLLMASK 0x000f
-#define TCMD_MAXCOLL 0x0020
-#define TCMD_HEARTBEAT 0x0040
-#define TCMD_DEFERRED 0x0080
-#define TCMD_UNDERRUN 0x0100
-#define TCMD_LOSTCTS 0x0200
-#define TCMD_NOCARRIER 0x0400
-#define TCMD_LATECOLL 0x0800
-
-struct tdr_cmd_struct
-{
- u16 cmd_status;
- u16 cmd_cmd;
- u16 cmd_link;
- u16 status;
-};
-
-#define TDR_LNK_OK 0x8000 /* No link problem identified */
-#define TDR_XCVR_PRB 0x4000 /* indicates a transceiver problem */
-#define TDR_ET_OPN 0x2000 /* open, no correct termination */
-#define TDR_ET_SRT 0x1000 /* TDR detected a short circuit */
-#define TDR_TIMEMASK 0x07ff /* mask for the time field */
-
-/*
- * Transmit Buffer Descriptor (TBD)
- */
-struct tbd_struct
-{
- u16 size; /* size + EOF-Flag(15) */
- u16 next; /* pointeroffset to next TBD */
- u32 buffer; /* pointer to buffer */
-};
-
-#define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
-
-
-
-
diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c
deleted file mode 100644
index c9479e081b8..00000000000
--- a/drivers/net/ethernet/i825xx/znet.c
+++ /dev/null
@@ -1,928 +0,0 @@
-/* znet.c: An Zenith Z-Note ethernet driver for linux. */
-
-/*
- Written by Donald Becker.
-
- The author may be reached as becker@scyld.com.
- This driver is based on the Linux skeleton driver. The copyright of the
- skeleton driver is held by the United States Government, as represented
- by DIRNSA, and it is released under the GPL.
-
- Thanks to Mike Hollick for alpha testing and suggestions.
-
- References:
- The Crynwr packet driver.
-
- "82593 CSMA/CD Core LAN Controller" Intel datasheet, 1992
- Intel Microcommunications Databook, Vol. 1, 1990.
- As usual with Intel, the documentation is incomplete and inaccurate.
- I had to read the Crynwr packet driver to figure out how to actually
- use the i82593, and guess at what register bits matched the loosely
- related i82586.
-
- Theory of Operation
-
- The i82593 used in the Zenith Z-Note series operates using two(!) slave
- DMA channels, one interrupt, and one 8-bit I/O port.
-
- While there several ways to configure '593 DMA system, I chose the one
- that seemed commensurate with the highest system performance in the face
- of moderate interrupt latency: Both DMA channels are configured as
- recirculating ring buffers, with one channel (#0) dedicated to Rx and
- the other channel (#1) to Tx and configuration. (Note that this is
- different than the Crynwr driver, where the Tx DMA channel is initialized
- before each operation. That approach simplifies operation and Tx error
- recovery, but requires additional I/O in normal operation and precludes
- transmit buffer chaining.)
-
- Both rings are set to 8192 bytes using {TX,RX}_RING_SIZE. This provides
- a reasonable ring size for Rx, while simplifying DMA buffer allocation --
- DMA buffers must not cross a 128K boundary. (In truth the size selection
- was influenced by my lack of '593 documentation. I thus was constrained
- to use the Crynwr '593 initialization table, which sets the Rx ring size
- to 8K.)
-
- Despite my usual low opinion about Intel-designed parts, I must admit
- that the bulk data handling of the i82593 is a good design for
- an integrated system, like a laptop, where using two slave DMA channels
- doesn't pose a problem. I still take issue with using only a single I/O
- port. In the same controlled environment there are essentially no
- limitations on I/O space, and using multiple locations would eliminate
- the need for multiple operations when looking at status registers,
- setting the Rx ring boundary, or switching to promiscuous mode.
-
- I also question Zenith's selection of the '593: one of the advertised
- advantages of earlier Intel parts was that if you figured out the magic
- initialization incantation you could use the same part on many different
- network types. Zenith's use of the "FriendlyNet" (sic) connector rather
- than an on-board transceiver leads me to believe that they were planning
- to take advantage of this. But, uhmmm, the '593 omits all but ethernet
- functionality from the serial subsystem.
- */
-
-/* 10/2002
-
- o Resurected for Linux 2.5+ by Marc Zyngier <maz@wild-wind.fr.eu.org> :
-
- - Removed strange DMA snooping in znet_sent_packet, which lead to
- TX buffer corruption on my laptop.
- - Use init_etherdev stuff.
- - Use kmalloc-ed DMA buffers.
- - Use as few global variables as possible.
- - Use proper resources management.
- - Use wireless/i82593.h as much as possible (structure, constants)
- - Compiles as module or build-in.
- - Now survives unplugging/replugging cable.
-
- Some code was taken from wavelan_cs.
-
- Tested on a vintage Zenith Z-Note 433Lnp+. Probably broken on
- anything else. Testers (and detailed bug reports) are welcome :-).
-
- o TODO :
-
- - Properly handle multicast
- - Understand why some traffic patterns add a 1s latency...
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/i82593.h>
-
-static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n";
-
-#ifndef ZNET_DEBUG
-#define ZNET_DEBUG 1
-#endif
-static unsigned int znet_debug = ZNET_DEBUG;
-module_param (znet_debug, int, 0);
-MODULE_PARM_DESC (znet_debug, "ZNet debug level");
-MODULE_LICENSE("GPL");
-
-/* The DMA modes we need aren't in <dma.h>. */
-#define DMA_RX_MODE 0x14 /* Auto init, I/O to mem, ++, demand. */
-#define DMA_TX_MODE 0x18 /* Auto init, Mem to I/O, ++, demand. */
-#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
-#define RX_BUF_SIZE 8192
-#define TX_BUF_SIZE 8192
-#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
-
-#define TX_TIMEOUT (HZ/10)
-
-struct znet_private {
- int rx_dma, tx_dma;
- spinlock_t lock;
- short sia_base, sia_size, io_size;
- struct i82593_conf_block i593_init;
- /* The starting, current, and end pointers for the packet buffers. */
- ushort *rx_start, *rx_cur, *rx_end;
- ushort *tx_start, *tx_cur, *tx_end;
- ushort tx_buf_len; /* Tx buffer length, in words. */
-};
-
-/* Only one can be built-in;-> */
-static struct net_device *znet_dev;
-
-#define NETIDBLK_MAGIC "NETIDBLK"
-#define NETIDBLK_MAGIC_SIZE 8
-
-struct netidblk {
- char magic[NETIDBLK_MAGIC_SIZE]; /* The magic number (string) "NETIDBLK" */
- unsigned char netid[8]; /* The physical station address */
- char nettype, globalopt;
- char vendor[8]; /* The machine vendor and product name. */
- char product[8];
- char irq1, irq2; /* Interrupts, only one is currently used. */
- char dma1, dma2;
- short dma_mem_misc[8]; /* DMA buffer locations (unused in Linux). */
- short iobase1, iosize1;
- short iobase2, iosize2; /* Second iobase unused. */
- char driver_options; /* Misc. bits */
- char pad;
-};
-
-static int znet_open(struct net_device *dev);
-static netdev_tx_t znet_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t znet_interrupt(int irq, void *dev_id);
-static void znet_rx(struct net_device *dev);
-static int znet_close(struct net_device *dev);
-static void hardware_init(struct net_device *dev);
-static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset);
-static void znet_tx_timeout (struct net_device *dev);
-
-/* Request needed resources */
-static int znet_request_resources (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
-
- if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev))
- goto failed;
- if (request_dma (znet->rx_dma, "ZNet rx"))
- goto free_irq;
- if (request_dma (znet->tx_dma, "ZNet tx"))
- goto free_rx_dma;
- if (!request_region (znet->sia_base, znet->sia_size, "ZNet SIA"))
- goto free_tx_dma;
- if (!request_region (dev->base_addr, znet->io_size, "ZNet I/O"))
- goto free_sia;
-
- return 0; /* Happy ! */
-
- free_sia:
- release_region (znet->sia_base, znet->sia_size);
- free_tx_dma:
- free_dma (znet->tx_dma);
- free_rx_dma:
- free_dma (znet->rx_dma);
- free_irq:
- free_irq (dev->irq, dev);
- failed:
- return -1;
-}
-
-static void znet_release_resources (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
-
- release_region (znet->sia_base, znet->sia_size);
- release_region (dev->base_addr, znet->io_size);
- free_dma (znet->tx_dma);
- free_dma (znet->rx_dma);
- free_irq (dev->irq, dev);
-}
-
-/* Keep the magical SIA stuff in a single function... */
-static void znet_transceiver_power (struct net_device *dev, int on)
-{
- struct znet_private *znet = netdev_priv(dev);
- unsigned char v;
-
- /* Turn on/off the 82501 SIA, using zenith-specific magic. */
- /* Select LAN control register */
- outb(0x10, znet->sia_base);
-
- if (on)
- v = inb(znet->sia_base + 1) | 0x84;
- else
- v = inb(znet->sia_base + 1) & ~0x84;
-
- outb(v, znet->sia_base+1); /* Turn on/off LAN power (bit 2). */
-}
-
-/* Init the i82593, with current promisc/mcast configuration.
- Also used from hardware_init. */
-static void znet_set_multicast_list (struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
- short ioaddr = dev->base_addr;
- struct i82593_conf_block *cfblk = &znet->i593_init;
-
- memset(cfblk, 0x00, sizeof(struct i82593_conf_block));
-
- /* The configuration block. What an undocumented nightmare.
- The first set of values are those suggested (without explanation)
- for ethernet in the Intel 82586 databook. The rest appear to be
- completely undocumented, except for cryptic notes in the Crynwr
- packet driver. This driver uses the Crynwr values verbatim. */
-
- /* maz : Rewritten to take advantage of the wanvelan includes.
- At least we have names, not just blind values */
-
- /* Byte 0 */
- cfblk->fifo_limit = 10; /* = 16 B rx and 80 B tx fifo thresholds */
- cfblk->forgnesi = 0; /* 0=82C501, 1=AMD7992B compatibility */
- cfblk->fifo_32 = 1;
- cfblk->d6mod = 0; /* Run in i82593 advanced mode */
- cfblk->throttle_enb = 1;
-
- /* Byte 1 */
- cfblk->throttle = 8; /* Continuous w/interrupts, 128-clock DMA. */
- cfblk->cntrxint = 0; /* enable continuous mode receive interrupts */
- cfblk->contin = 1; /* enable continuous mode */
-
- /* Byte 2 */
- cfblk->addr_len = ETH_ALEN;
- cfblk->acloc = 1; /* Disable source addr insertion by i82593 */
- cfblk->preamb_len = 2; /* 8 bytes preamble */
- cfblk->loopback = 0; /* Loopback off */
-
- /* Byte 3 */
- cfblk->lin_prio = 0; /* Default priorities & backoff methods. */
- cfblk->tbofstop = 0;
- cfblk->exp_prio = 0;
- cfblk->bof_met = 0;
-
- /* Byte 4 */
- cfblk->ifrm_spc = 6; /* 96 bit times interframe spacing */
-
- /* Byte 5 */
- cfblk->slottim_low = 0; /* 512 bit times slot time (low) */
-
- /* Byte 6 */
- cfblk->slottim_hi = 2; /* 512 bit times slot time (high) */
- cfblk->max_retr = 15; /* 15 collisions retries */
-
- /* Byte 7 */
- cfblk->prmisc = ((dev->flags & IFF_PROMISC) ? 1 : 0); /* Promiscuous mode */
- cfblk->bc_dis = 0; /* Enable broadcast reception */
- cfblk->crs_1 = 0; /* Don't transmit without carrier sense */
- cfblk->nocrc_ins = 0; /* i82593 generates CRC */
- cfblk->crc_1632 = 0; /* 32-bit Autodin-II CRC */
- cfblk->crs_cdt = 0; /* CD not to be interpreted as CS */
-
- /* Byte 8 */
- cfblk->cs_filter = 0; /* CS is recognized immediately */
- cfblk->crs_src = 0; /* External carrier sense */
- cfblk->cd_filter = 0; /* CD is recognized immediately */
-
- /* Byte 9 */
- cfblk->min_fr_len = ETH_ZLEN >> 2; /* Minimum frame length */
-
- /* Byte A */
- cfblk->lng_typ = 1; /* Type/length checks OFF */
- cfblk->lng_fld = 1; /* Disable 802.3 length field check */
- cfblk->rxcrc_xf = 1; /* Don't transfer CRC to memory */
- cfblk->artx = 1; /* Disable automatic retransmission */
- cfblk->sarec = 1; /* Disable source addr trig of CD */
- cfblk->tx_jabber = 0; /* Disable jabber jam sequence */
- cfblk->hash_1 = 1; /* Use bits 0-5 in mc address hash */
- cfblk->lbpkpol = 0; /* Loopback pin active high */
-
- /* Byte B */
- cfblk->fdx = 0; /* Disable full duplex operation */
-
- /* Byte C */
- cfblk->dummy_6 = 0x3f; /* all ones, Default multicast addresses & backoff. */
- cfblk->mult_ia = 0; /* No multiple individual addresses */
- cfblk->dis_bof = 0; /* Disable the backoff algorithm ?! */
-
- /* Byte D */
- cfblk->dummy_1 = 1; /* set to 1 */
- cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
- cfblk->mc_all = (!netdev_mc_empty(dev) ||
- (dev->flags & IFF_ALLMULTI)); /* multicast all mode */
- cfblk->rcv_mon = 0; /* Monitor mode disabled */
- cfblk->frag_acpt = 0; /* Do not accept fragments */
- cfblk->tstrttrs = 0; /* No start transmission threshold */
-
- /* Byte E */
- cfblk->fretx = 1; /* FIFO automatic retransmission */
- cfblk->runt_eop = 0; /* drop "runt" packets */
- cfblk->hw_sw_pin = 0; /* ?? */
- cfblk->big_endn = 0; /* Big Endian ? no... */
- cfblk->syncrqs = 1; /* Synchronous DRQ deassertion... */
- cfblk->sttlen = 1; /* 6 byte status registers */
- cfblk->rx_eop = 0; /* Signal EOP on packet reception */
- cfblk->tx_eop = 0; /* Signal EOP on packet transmission */
-
- /* Byte F */
- cfblk->rbuf_size = RX_BUF_SIZE >> 12; /* Set receive buffer size */
- cfblk->rcvstop = 1; /* Enable Receive Stop Register */
-
- if (znet_debug > 2) {
- int i;
- unsigned char *c;
-
- for (i = 0, c = (char *) cfblk; i < sizeof (*cfblk); i++)
- printk ("%02X ", c[i]);
- printk ("\n");
- }
-
- *znet->tx_cur++ = sizeof(struct i82593_conf_block);
- memcpy(znet->tx_cur, cfblk, sizeof(struct i82593_conf_block));
- znet->tx_cur += sizeof(struct i82593_conf_block)/2;
- outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
-
- /* XXX FIXME maz : Add multicast addresses here, so having a
- * multicast address configured isn't equal to IFF_ALLMULTI */
-}
-
-static const struct net_device_ops znet_netdev_ops = {
- .ndo_open = znet_open,
- .ndo_stop = znet_close,
- .ndo_start_xmit = znet_send_packet,
- .ndo_set_rx_mode = znet_set_multicast_list,
- .ndo_tx_timeout = znet_tx_timeout,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* The Z-Note probe is pretty easy. The NETIDBLK exists in the safe-to-probe
- BIOS area. We just scan for the signature, and pull the vital parameters
- out of the structure. */
-
-static int __init znet_probe (void)
-{
- int i;
- struct netidblk *netinfo;
- struct znet_private *znet;
- struct net_device *dev;
- char *p;
- char *plast = phys_to_virt(0x100000 - NETIDBLK_MAGIC_SIZE);
- int err = -ENOMEM;
-
- /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */
- for(p = (char *)phys_to_virt(0xf0000); p <= plast; p++)
- if (*p == 'N' &&
- strncmp(p, NETIDBLK_MAGIC, NETIDBLK_MAGIC_SIZE) == 0)
- break;
-
- if (p > plast) {
- if (znet_debug > 1)
- printk(KERN_INFO "No Z-Note ethernet adaptor found.\n");
- return -ENODEV;
- }
-
- dev = alloc_etherdev(sizeof(struct znet_private));
- if (!dev)
- return -ENOMEM;
-
- znet = netdev_priv(dev);
-
- netinfo = (struct netidblk *)p;
- dev->base_addr = netinfo->iobase1;
- dev->irq = netinfo->irq1;
-
- /* The station address is in the "netidblk" at 0x0f0000. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = netinfo->netid[i];
-
- printk(KERN_INFO "%s: ZNET at %#3lx, %pM"
- ", using IRQ %d DMA %d and %d.\n",
- dev->name, dev->base_addr, dev->dev_addr,
- dev->irq, netinfo->dma1, netinfo->dma2);
-
- if (znet_debug > 1) {
- printk(KERN_INFO "%s: vendor '%16.16s' IRQ1 %d IRQ2 %d DMA1 %d DMA2 %d.\n",
- dev->name, netinfo->vendor,
- netinfo->irq1, netinfo->irq2,
- netinfo->dma1, netinfo->dma2);
- printk(KERN_INFO "%s: iobase1 %#x size %d iobase2 %#x size %d net type %2.2x.\n",
- dev->name, netinfo->iobase1, netinfo->iosize1,
- netinfo->iobase2, netinfo->iosize2, netinfo->nettype);
- }
-
- if (znet_debug > 0)
- printk(KERN_INFO "%s", version);
-
- znet->rx_dma = netinfo->dma1;
- znet->tx_dma = netinfo->dma2;
- spin_lock_init(&znet->lock);
- znet->sia_base = 0xe6; /* Magic address for the 82501 SIA */
- znet->sia_size = 2;
- /* maz: Despite the '593 being advertised above as using a
- * single 8bits I/O port, this driver does many 16bits
- * access. So set io_size accordingly */
- znet->io_size = 2;
-
- if (!(znet->rx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
- goto free_dev;
- if (!(znet->tx_start = kmalloc (DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA)))
- goto free_rx;
-
- if (!dma_page_eq (znet->rx_start, znet->rx_start + (RX_BUF_SIZE/2-1)) ||
- !dma_page_eq (znet->tx_start, znet->tx_start + (TX_BUF_SIZE/2-1))) {
- printk (KERN_WARNING "tx/rx crossing DMA frontiers, giving up\n");
- goto free_tx;
- }
-
- znet->rx_end = znet->rx_start + RX_BUF_SIZE/2;
- znet->tx_buf_len = TX_BUF_SIZE/2;
- znet->tx_end = znet->tx_start + znet->tx_buf_len;
-
- /* The ZNET-specific entries in the device structure. */
- dev->netdev_ops = &znet_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
- err = register_netdev(dev);
- if (err)
- goto free_tx;
- znet_dev = dev;
- return 0;
-
- free_tx:
- kfree(znet->tx_start);
- free_rx:
- kfree(znet->rx_start);
- free_dev:
- free_netdev(dev);
- return err;
-}
-
-
-static int znet_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- if (znet_debug > 2)
- printk(KERN_DEBUG "%s: znet_open() called.\n", dev->name);
-
- /* These should never fail. You can't add devices to a sealed box! */
- if (znet_request_resources (dev)) {
- printk(KERN_WARNING "%s: Not opened -- resource busy?!?\n", dev->name);
- return -EBUSY;
- }
-
- znet_transceiver_power (dev, 1);
-
- /* According to the Crynwr driver we should wait 50 msec. for the
- LAN clock to stabilize. My experiments indicates that the '593 can
- be initialized immediately. The delay is probably needed for the
- DC-to-DC converter to come up to full voltage, and for the oscillator
- to be spot-on at 20Mhz before transmitting.
- Until this proves to be a problem we rely on the higher layers for the
- delay and save allocating a timer entry. */
-
- /* maz : Well, I'm getting every time the following message
- * without the delay on a 486@33. This machine is much too
- * fast... :-) So maybe the Crynwr driver wasn't wrong after
- * all, even if the message is completly harmless on my
- * setup. */
- mdelay (50);
-
- /* This follows the packet driver's lead, and checks for success. */
- if (inb(ioaddr) != 0x10 && inb(ioaddr) != 0x00)
- printk(KERN_WARNING "%s: Problem turning on the transceiver power.\n",
- dev->name);
-
- hardware_init(dev);
- netif_start_queue (dev);
-
- return 0;
-}
-
-
-static void znet_tx_timeout (struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- ushort event, tx_status, rx_offset, state;
-
- outb (CR0_STATUS_0, ioaddr);
- event = inb (ioaddr);
- outb (CR0_STATUS_1, ioaddr);
- tx_status = inw (ioaddr);
- outb (CR0_STATUS_2, ioaddr);
- rx_offset = inw (ioaddr);
- outb (CR0_STATUS_3, ioaddr);
- state = inb (ioaddr);
- printk (KERN_WARNING "%s: transmit timed out, status %02x %04x %04x %02x,"
- " resetting.\n", dev->name, event, tx_status, rx_offset, state);
- if (tx_status == TX_LOST_CRS)
- printk (KERN_WARNING "%s: Tx carrier error, check transceiver cable.\n",
- dev->name);
- outb (OP0_RESET, ioaddr);
- hardware_init (dev);
- netif_wake_queue (dev);
-}
-
-static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
- short length = skb->len;
-
- if (znet_debug > 4)
- printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
-
- netif_stop_queue (dev);
-
- /* Check that the part hasn't reset itself, probably from suspend. */
- outb(CR0_STATUS_0, ioaddr);
- if (inw(ioaddr) == 0x0010 &&
- inw(ioaddr) == 0x0000 &&
- inw(ioaddr) == 0x0010) {
- if (znet_debug > 1)
- printk (KERN_WARNING "%s : waking up\n", dev->name);
- hardware_init(dev);
- znet_transceiver_power (dev, 1);
- }
-
- if (1) {
- unsigned char *buf = (void *)skb->data;
- ushort *tx_link = znet->tx_cur - 1;
- ushort rnd_len = (length + 1)>>1;
-
- dev->stats.tx_bytes+=length;
-
- if (znet->tx_cur >= znet->tx_end)
- znet->tx_cur = znet->tx_start;
- *znet->tx_cur++ = length;
- if (znet->tx_cur + rnd_len + 1 > znet->tx_end) {
- int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */
- memcpy(znet->tx_cur, buf, semi_cnt);
- rnd_len -= semi_cnt>>1;
- memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt);
- znet->tx_cur = znet->tx_start + rnd_len;
- } else {
- memcpy(znet->tx_cur, buf, skb->len);
- znet->tx_cur += rnd_len;
- }
- *znet->tx_cur++ = 0;
-
- spin_lock_irqsave(&znet->lock, flags);
- {
- *tx_link = OP0_TRANSMIT | CR0_CHNL;
- /* Is this always safe to do? */
- outb(OP0_TRANSMIT | CR0_CHNL, ioaddr);
- }
- spin_unlock_irqrestore (&znet->lock, flags);
-
- netif_start_queue (dev);
-
- if (znet_debug > 4)
- printk(KERN_DEBUG "%s: Transmitter queued, length %d.\n", dev->name, length);
- }
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* The ZNET interrupt handler. */
-static irqreturn_t znet_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct znet_private *znet = netdev_priv(dev);
- int ioaddr;
- int boguscnt = 20;
- int handled = 0;
-
- spin_lock (&znet->lock);
-
- ioaddr = dev->base_addr;
-
- outb(CR0_STATUS_0, ioaddr);
- do {
- ushort status = inb(ioaddr);
- if (znet_debug > 5) {
- ushort result, rx_ptr, running;
- outb(CR0_STATUS_1, ioaddr);
- result = inw(ioaddr);
- outb(CR0_STATUS_2, ioaddr);
- rx_ptr = inw(ioaddr);
- outb(CR0_STATUS_3, ioaddr);
- running = inb(ioaddr);
- printk(KERN_DEBUG "%s: interrupt, status %02x, %04x %04x %02x serial %d.\n",
- dev->name, status, result, rx_ptr, running, boguscnt);
- }
- if ((status & SR0_INTERRUPT) == 0)
- break;
-
- handled = 1;
-
- if ((status & SR0_EVENT_MASK) == SR0_TRANSMIT_DONE ||
- (status & SR0_EVENT_MASK) == SR0_RETRANSMIT_DONE ||
- (status & SR0_EVENT_MASK) == SR0_TRANSMIT_NO_CRC_DONE) {
- int tx_status;
- outb(CR0_STATUS_1, ioaddr);
- tx_status = inw(ioaddr);
- /* It's undocumented, but tx_status seems to match the i82586. */
- if (tx_status & TX_OK) {
- dev->stats.tx_packets++;
- dev->stats.collisions += tx_status & TX_NCOL_MASK;
- } else {
- if (tx_status & (TX_LOST_CTS | TX_LOST_CRS))
- dev->stats.tx_carrier_errors++;
- if (tx_status & TX_UND_RUN)
- dev->stats.tx_fifo_errors++;
- if (!(tx_status & TX_HRT_BEAT))
- dev->stats.tx_heartbeat_errors++;
- if (tx_status & TX_MAX_COL)
- dev->stats.tx_aborted_errors++;
- /* ...and the catch-all. */
- if ((tx_status | (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL)) != (TX_LOST_CRS | TX_LOST_CTS | TX_UND_RUN | TX_HRT_BEAT | TX_MAX_COL))
- dev->stats.tx_errors++;
-
- /* Transceiver may be stuck if cable
- * was removed while emitting a
- * packet. Flip it off, then on to
- * reset it. This is very empirical,
- * but it seems to work. */
-
- znet_transceiver_power (dev, 0);
- znet_transceiver_power (dev, 1);
- }
- netif_wake_queue (dev);
- }
-
- if ((status & SR0_RECEPTION) ||
- (status & SR0_EVENT_MASK) == SR0_STOP_REG_HIT) {
- znet_rx(dev);
- }
- /* Clear the interrupts we've handled. */
- outb(CR0_INT_ACK, ioaddr);
- } while (boguscnt--);
-
- spin_unlock (&znet->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static void znet_rx(struct net_device *dev)
-{
- struct znet_private *znet = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int boguscount = 1;
- short next_frame_end_offset = 0; /* Offset of next frame start. */
- short *cur_frame_end;
- short cur_frame_end_offset;
-
- outb(CR0_STATUS_2, ioaddr);
- cur_frame_end_offset = inw(ioaddr);
-
- if (cur_frame_end_offset == znet->rx_cur - znet->rx_start) {
- printk(KERN_WARNING "%s: Interrupted, but nothing to receive, offset %03x.\n",
- dev->name, cur_frame_end_offset);
- return;
- }
-
- /* Use same method as the Crynwr driver: construct a forward list in
- the same area of the backwards links we now have. This allows us to
- pass packets to the upper layers in the order they were received --
- important for fast-path sequential operations. */
- while (znet->rx_start + cur_frame_end_offset != znet->rx_cur &&
- ++boguscount < 5) {
- unsigned short hi_cnt, lo_cnt, hi_status, lo_status;
- int count, status;
-
- if (cur_frame_end_offset < 4) {
- /* Oh no, we have a special case: the frame trailer wraps around
- the end of the ring buffer. We've saved space at the end of
- the ring buffer for just this problem. */
- memcpy(znet->rx_end, znet->rx_start, 8);
- cur_frame_end_offset += (RX_BUF_SIZE/2);
- }
- cur_frame_end = znet->rx_start + cur_frame_end_offset - 4;
-
- lo_status = *cur_frame_end++;
- hi_status = *cur_frame_end++;
- status = ((hi_status & 0xff) << 8) + (lo_status & 0xff);
- lo_cnt = *cur_frame_end++;
- hi_cnt = *cur_frame_end++;
- count = ((hi_cnt & 0xff) << 8) + (lo_cnt & 0xff);
-
- if (znet_debug > 5)
- printk(KERN_DEBUG "Constructing trailer at location %03x, %04x %04x %04x %04x"
- " count %#x status %04x.\n",
- cur_frame_end_offset<<1, lo_status, hi_status, lo_cnt, hi_cnt,
- count, status);
- cur_frame_end[-4] = status;
- cur_frame_end[-3] = next_frame_end_offset;
- cur_frame_end[-2] = count;
- next_frame_end_offset = cur_frame_end_offset;
- cur_frame_end_offset -= ((count + 1)>>1) + 3;
- if (cur_frame_end_offset < 0)
- cur_frame_end_offset += RX_BUF_SIZE/2;
- }
-
- /* Now step forward through the list. */
- do {
- ushort *this_rfp_ptr = znet->rx_start + next_frame_end_offset;
- int status = this_rfp_ptr[-4];
- int pkt_len = this_rfp_ptr[-2];
-
- if (znet_debug > 5)
- printk(KERN_DEBUG "Looking at trailer ending at %04x status %04x length %03x"
- " next %04x.\n", next_frame_end_offset<<1, status, pkt_len,
- this_rfp_ptr[-3]<<1);
- /* Once again we must assume that the i82586 docs apply. */
- if ( ! (status & RX_RCV_OK)) { /* There was an error. */
- dev->stats.rx_errors++;
- if (status & RX_CRC_ERR) dev->stats.rx_crc_errors++;
- if (status & RX_ALG_ERR) dev->stats.rx_frame_errors++;
-#if 0
- if (status & 0x0200) dev->stats.rx_over_errors++; /* Wrong. */
- if (status & 0x0100) dev->stats.rx_fifo_errors++;
-#else
- /* maz : Wild guess... */
- if (status & RX_OVRRUN) dev->stats.rx_over_errors++;
-#endif
- if (status & RX_SRT_FRM) dev->stats.rx_length_errors++;
- } else if (pkt_len > 1536) {
- dev->stats.rx_length_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len);
- if (skb == NULL) {
- if (znet_debug)
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
-
- if (&znet->rx_cur[(pkt_len+1)>>1] > znet->rx_end) {
- int semi_cnt = (znet->rx_end - znet->rx_cur)<<1;
- memcpy(skb_put(skb,semi_cnt), znet->rx_cur, semi_cnt);
- memcpy(skb_put(skb,pkt_len-semi_cnt), znet->rx_start,
- pkt_len - semi_cnt);
- } else {
- memcpy(skb_put(skb,pkt_len), znet->rx_cur, pkt_len);
- if (znet_debug > 6) {
- unsigned int *packet = (unsigned int *) skb->data;
- printk(KERN_DEBUG "Packet data is %08x %08x %08x %08x.\n", packet[0],
- packet[1], packet[2], packet[3]);
- }
- }
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- znet->rx_cur = this_rfp_ptr;
- if (znet->rx_cur >= znet->rx_end)
- znet->rx_cur -= RX_BUF_SIZE/2;
- update_stop_hit(ioaddr, (znet->rx_cur - znet->rx_start)<<1);
- next_frame_end_offset = this_rfp_ptr[-3];
- if (next_frame_end_offset == 0) /* Read all the frames? */
- break; /* Done for now */
- this_rfp_ptr = znet->rx_start + next_frame_end_offset;
- } while (--boguscount);
-
- /* If any worth-while packets have been received, dev_rint()
- has done a mark_bh(INET_BH) for us and will work on them
- when we get to the bottom-half routine. */
-}
-
-/* The inverse routine to znet_open(). */
-static int znet_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- netif_stop_queue (dev);
-
- outb(OP0_RESET, ioaddr); /* CMD0_RESET */
-
- if (znet_debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
- /* Turn off transceiver power. */
- znet_transceiver_power (dev, 0);
-
- znet_release_resources (dev);
-
- return 0;
-}
-
-static void show_dma(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- unsigned char stat = inb (ioaddr);
- struct znet_private *znet = netdev_priv(dev);
- unsigned long flags;
- short dma_port = ((znet->tx_dma&3)<<2) + IO_DMA2_BASE;
- unsigned addr = inb(dma_port);
- short residue;
-
- addr |= inb(dma_port) << 8;
- residue = get_dma_residue(znet->tx_dma);
-
- if (znet_debug > 1) {
- flags=claim_dma_lock();
- printk(KERN_DEBUG "Stat:%02x Addr: %04x cnt:%3x\n",
- stat, addr<<1, residue);
- release_dma_lock(flags);
- }
-}
-
-/* Initialize the hardware. We have to do this when the board is open()ed
- or when we come out of suspend mode. */
-static void hardware_init(struct net_device *dev)
-{
- unsigned long flags;
- short ioaddr = dev->base_addr;
- struct znet_private *znet = netdev_priv(dev);
-
- znet->rx_cur = znet->rx_start;
- znet->tx_cur = znet->tx_start;
-
- /* Reset the chip, and start it up. */
- outb(OP0_RESET, ioaddr);
-
- flags=claim_dma_lock();
- disable_dma(znet->rx_dma); /* reset by an interrupting task. */
- clear_dma_ff(znet->rx_dma);
- set_dma_mode(znet->rx_dma, DMA_RX_MODE);
- set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start));
- set_dma_count(znet->rx_dma, RX_BUF_SIZE);
- enable_dma(znet->rx_dma);
- /* Now set up the Tx channel. */
- disable_dma(znet->tx_dma);
- clear_dma_ff(znet->tx_dma);
- set_dma_mode(znet->tx_dma, DMA_TX_MODE);
- set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start));
- set_dma_count(znet->tx_dma, znet->tx_buf_len<<1);
- enable_dma(znet->tx_dma);
- release_dma_lock(flags);
-
- if (znet_debug > 1)
- printk(KERN_DEBUG "%s: Initializing the i82593, rx buf %p tx buf %p\n",
- dev->name, znet->rx_start,znet->tx_start);
- /* Do an empty configure command, just like the Crynwr driver. This
- resets to chip to its default values. */
- *znet->tx_cur++ = 0;
- *znet->tx_cur++ = 0;
- show_dma(dev);
- outb(OP0_CONFIGURE | CR0_CHNL, ioaddr);
-
- znet_set_multicast_list (dev);
-
- *znet->tx_cur++ = 6;
- memcpy(znet->tx_cur, dev->dev_addr, 6);
- znet->tx_cur += 3;
- show_dma(dev);
- outb(OP0_IA_SETUP | CR0_CHNL, ioaddr);
- show_dma(dev);
-
- update_stop_hit(ioaddr, 8192);
- if (znet_debug > 1) printk(KERN_DEBUG "enabling Rx.\n");
- outb(OP0_RCV_ENABLE, ioaddr);
- netif_start_queue (dev);
-}
-
-static void update_stop_hit(short ioaddr, unsigned short rx_stop_offset)
-{
- outb(OP0_SWIT_TO_PORT_1 | CR0_CHNL, ioaddr);
- if (znet_debug > 5)
- printk(KERN_DEBUG "Updating stop hit with value %02x.\n",
- (rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE);
- outb((rx_stop_offset >> 6) | CR1_STOP_REG_UPDATE, ioaddr);
- outb(OP1_SWIT_TO_PORT_0, ioaddr);
-}
-
-static __exit void znet_cleanup (void)
-{
- if (znet_dev) {
- struct znet_private *znet = netdev_priv(znet_dev);
-
- unregister_netdev (znet_dev);
- kfree (znet->rx_start);
- kfree (znet->tx_start);
- free_netdev (znet_dev);
- }
-}
-
-module_init (znet_probe);
-module_exit (znet_cleanup);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 19b64de7124..328f47c92e2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -76,16 +76,16 @@ MODULE_PARM_DESC(msg_level, "msg_level");
MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
"port to stack. 1:yes, 0:no. Default = 0 ");
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
- "[2^x - 1], x = [6..14]. Default = "
+ "[2^x - 1], x = [7..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
"Default = 1");
@@ -1921,10 +1921,8 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
u64 hret;
ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
- if (!ehea_mcl_entry) {
- pr_err("no mem for mcl_entry\n");
+ if (!ehea_mcl_entry)
return;
- }
INIT_LIST_HEAD(&ehea_mcl_entry->list);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 27f881758d1..9b03033bb55 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -64,11 +64,10 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
}
queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
- if (!queue->queue_pages) {
- pr_err("no mem for queue_pages\n");
+ queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *),
+ GFP_KERNEL);
+ if (!queue->queue_pages)
return -ENOMEM;
- }
/*
* allocate pages for queue:
@@ -129,10 +128,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
void *vpage;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- pr_err("no mem for cq\n");
+ if (!cq)
goto out_nomem;
- }
cq->attr.max_nr_of_cqes = nr_of_cqe;
cq->attr.cq_token = cq_token;
@@ -257,10 +254,8 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
struct ehea_eq *eq;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
- if (!eq) {
- pr_err("no mem for eq\n");
+ if (!eq)
return NULL;
- }
eq->adapter = adapter;
eq->attr.type = type;
@@ -428,10 +423,8 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
- if (!qp) {
- pr_err("no mem for qp\n");
+ if (!qp)
return NULL;
- }
qp->adapter = adapter;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 256bdb8e199..4989481c19f 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2190,11 +2190,10 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strcpy(info->driver, "ibm_emac");
- strcpy(info->version, DRV_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
- dev->cell_index, dev->ofdev->dev.of_node->full_name);
+ strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s",
+ dev->cell_index, dev->ofdev->dev.of_node->full_name);
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 50ea12bfb57..1f7ecf57181 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -528,12 +528,9 @@ static int mal_probe(struct platform_device *ofdev)
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
- if (!mal) {
- printk(KERN_ERR
- "mal%d: out of memory allocating MAL structure!\n",
- index);
+ if (!mal)
return -ENOMEM;
- }
+
mal->index = index;
mal->ofdev = ofdev;
mal->version = of_device_is_compatible(ofdev->dev.of_node, "ibm,mcmal2") ? 2 : 1;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f2fdbb79837..c859771a990 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -637,7 +637,6 @@ static int ibmveth_open(struct net_device *netdev)
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to allocate bounce buffer\n");
rc = -ENOMEM;
goto err_out_free_irq;
}
@@ -722,9 +721,8 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
- strncpy(info->version, ibmveth_driver_version,
- sizeof(info->version) - 1);
+ strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index ddee4060948..3d5f6d46375 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -6,7 +6,7 @@ config NET_VENDOR_INTEL
bool "Intel devices"
default y
depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
- ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
+ ARCH_ACORN || SNI_RM || SUN3 || \
GSC || BVME6000 || MVME16x || \
(ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
EXPERIMENTAL
@@ -74,6 +74,7 @@ config E1000E
tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
depends on PCI && (!SPARC32 || BROKEN)
select CRC32
+ select PTP_1588_CLOCK
---help---
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -94,6 +95,8 @@ config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
select PTP_1588_CLOCK
+ select I2C
+ select I2C_ALGOBIT
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -112,6 +115,17 @@ config IGB
To compile this driver as a module, choose M here. The module
will be called igb.
+config IGB_HWMON
+ bool "Intel(R) PCI-Express Gigabit adapters HWMON support"
+ default y
+ depends on IGB && HWMON && !(IGB=y && HWMON=m)
+ ---help---
+ Say Y if you want to expose thermal sensor data on Intel devices.
+
+ Some of our devices contain thermal sensors, both external and internal.
+ This data is available via the hwmon sysfs interface and exposes
+ the onboard sensors.
+
config IGB_DCA
bool "Direct Cache Access (DCA) Support"
default y
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index a59f0779e1c..ec800b093e7 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2928,8 +2928,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e100_phy_init(nic);
memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
- memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!eeprom_bad_csum_allow) {
netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
err = -EAGAIN;
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 2b6cd02bfba..26d9cd59ec7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -81,68 +81,69 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#define E1000_MAX_INTR 10
+#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
-#define E1000_DEFAULT_TXD 256
-#define E1000_MAX_TXD 256
-#define E1000_MIN_TXD 48
-#define E1000_MAX_82544_TXD 4096
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 48
+#define E1000_MAX_82544_TXD 4096
-#define E1000_DEFAULT_RXD 256
-#define E1000_MAX_RXD 256
-#define E1000_MIN_RXD 48
-#define E1000_MAX_82544_RXD 4096
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 48
+#define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define E1000_RXBUFFER_128 128 /* Used for packet split */
-#define E1000_RXBUFFER_256 256 /* Used for packet split */
-#define E1000_RXBUFFER_512 512
-#define E1000_RXBUFFER_1024 1024
-#define E1000_RXBUFFER_2048 2048
-#define E1000_RXBUFFER_4096 4096
-#define E1000_RXBUFFER_8192 8192
-#define E1000_RXBUFFER_16384 16384
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */
-#define E1000_SMARTSPEED_DOWNSHIFT 3
-#define E1000_SMARTSPEED_MAX 15
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */
-#define E1000_PBA_BYTES_SHIFT 0xA
-#define E1000_TX_HEAD_ADDR_SHIFT 7
-#define E1000_PBA_TX_MASK 0xFFFF0000
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */
-#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
-#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define E1000_EEPROM_82544_APM 0x0004
-#define E1000_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 14e30515f6a..43462d596a4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
- * and HALF_DUPLEX != DUPLEX_HALF */
-
+ * and HALF_DUPLEX != DUPLEX_HALF
+ */
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
- ETH_TP_MDI_X :
- ETH_TP_MDI);
+ ETH_TP_MDI_X : ETH_TP_MDI);
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_Autoneg;
else
hw->autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- /*
- * If the link is not reported up to netdev, interrupts are disabled,
+ /* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
- eeprom->len);
+ eeprom->len);
kfree(eeprom_buff);
return ret_val;
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
+ GFP_KERNEL);
if (!txdr)
goto err_alloc_tx;
- rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
+ GFP_KERNEL);
if (!rxdr)
goto err_alloc_rx;
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
goto err_setup_tx;
/* save the new, restore the old in order to free it,
- * then restore the new back again */
+ * then restore the new back again
+ */
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) {
-
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
-
} else {
-
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
-
}
value = E1000_MC_TBL_SIZE;
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
- /* NOTE: we don't test MSI interrupts here, yet */
- /* Hook up test interrupt handler just for this test */
+ /* NOTE: we don't test MSI interrupts here, yet
+ * Hook up test interrupt handler just for this test
+ */
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
- netdev))
+ netdev))
shared_int = false;
else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
- * duplex link is detected. */
+ * duplex link is detected.
+ */
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
- 1024);
+ 1024);
if (!ret_val)
good_cnt++;
if (unlikely(++l == rxdr->count)) l = 0;
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
- * could take as long as 2-3 minutes */
+ * could take as long as 2-3 minutes
+ */
do {
e1000_check_for_link(hw);
if (hw->serdes_has_link)
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
+ * interfere with test result
+ */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
+ * so exclude FUNC_1 ports from having WoL enabled
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
+ * supported by this hardware
+ */
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return;
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
-/* BUG_ON(i != E1000_STATS_LEN); */
+/* BUG_ON(i != E1000_STATS_LEN); */
}
static void e1000_get_strings(struct net_device *netdev, u32 stringset,
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
-/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
- .get_drvinfo = e1000_get_drvinfo,
- .get_regs_len = e1000_get_regs_len,
- .get_regs = e1000_get_regs,
- .get_wol = e1000_get_wol,
- .set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
- .nway_reset = e1000_nway_reset,
- .get_link = e1000_get_link,
- .get_eeprom_len = e1000_get_eeprom_len,
- .get_eeprom = e1000_get_eeprom,
- .set_eeprom = e1000_set_eeprom,
- .get_ringparam = e1000_get_ringparam,
- .set_ringparam = e1000_set_ringparam,
- .get_pauseparam = e1000_get_pauseparam,
- .set_pauseparam = e1000_set_pauseparam,
- .self_test = e1000_diag_test,
- .get_strings = e1000_get_strings,
- .set_phys_id = e1000_set_phys_id,
- .get_ethtool_stats = e1000_get_ethtool_stats,
- .get_sset_count = e1000_get_sset_count,
- .get_coalesce = e1000_get_coalesce,
- .set_coalesce = e1000_set_coalesce,
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = e1000_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8fedd245153..2879b9631e1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
if (hw->phy_init_script) {
msleep(20);
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of this routine. */
+ /* Save off the current value of register 0x2F5B to be restored
+ * at the end of this routine.
+ */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
case e1000_82541:
case e1000_82541_rev_2:
/* These controllers can't ack the 64-bit write when issuing the
- * reset, so use IO-mapping as a workaround to issue the reset */
+ * reset, so use IO-mapping as a workaround to issue the reset
+ */
E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
break;
case e1000_82545_rev_3:
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
break;
}
- /* After MAC reset, force reload of EEPROM to restore power-on settings to
- * device. Later controllers reload the EEPROM automatically, so just wait
- * for reload to complete.
+ /* After MAC reset, force reload of EEPROM to restore power-on settings
+ * to device. Later controllers reload the EEPROM automatically, so
+ * just wait for reload to complete.
*/
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
msleep(5);
}
- /* Setup the receive address. This involves initializing all of the Receive
- * Address Registers (RARs 0 - 15).
+ /* Setup the receive address. This involves initializing all of the
+ * Receive Address Registers (RARs 0 - 15).
*/
e1000_init_rx_addrs(hw);
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from
- * occurring when accessing our register space */
+ * occurring when accessing our register space
+ */
E1000_WRITE_FLUSH();
}
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
case e1000_82546_rev_3:
break;
default:
- /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ /* Workaround for PCI-X problem when BIOS sets MMRBC
+ * incorrectly.
+ */
if (hw->bus_type == e1000_bus_type_pcix
&& e1000_pcix_get_mmrbc(hw) > 2048)
e1000_pcix_set_mmrbc(hw, 2048);
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = er32(CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity
- * error crash in a PCI slot. */
+ * error crash in a PCI slot.
+ */
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
}
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ew32(FCRTL, 0);
ew32(FCRTH, 0);
} else {
- /* We need to set up the Receive Threshold high and low water marks
- * as well as (optionally) enabling the transmission of XON frames.
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
*/
if (hw->fc_send_xon) {
ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e1000_config_collision_dist(hw);
/* Check for a software override of the flow control settings, and setup
- * the device accordingly. If auto-negotiation is enabled, then software
- * will have to set the "PAUSE" bits to the correct value in the Tranmsit
- * Config Word Register (TXCW) and re-start auto-negotiation. However, if
- * auto-negotiation is disabled, then software will have to manually
- * configure the two flow control enable bits in the CTRL register.
+ * the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Tranmsit Config Word Register (TXCW) and re-start
+ * auto-negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
*
* The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames, but
- * not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do
- * not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
*/
switch (hw->fc) {
case E1000_FC_NONE:
- /* Flow control is completely disabled by a software over-ride. */
+ /* Flow ctrl is completely disabled by a software over-ride */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case E1000_FC_RX_PAUSE:
- /* RX Flow control is enabled and TX Flow control is disabled by a
- * software over-ride. Since there really isn't a way to advertise
- * that we are capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
+ /* Rx Flow control is enabled and Tx Flow control is disabled by
+ * a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case E1000_FC_TX_PAUSE:
- /* TX Flow control is enabled, and RX Flow control is disabled, by a
- * software over-ride.
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case E1000_FC_FULL:
- /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
- /* Since auto-negotiation is enabled, take the link out of reset (the link
- * will be in reset, because we previously reset the chip). This will
- * restart auto-negotiation. If auto-negotiation is successful then the
- * link-up status bit will be set and the flow control enable bits (RFCE
- * and TFCE) will be set according to their negotiated value.
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
*/
e_dbg("Auto-negotiation enabled\n");
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
hw->txcw = txcw;
msleep(1);
- /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
- * indication in the Device Status Register. Time-out if a link isn't
- * seen in 500 milliseconds seconds (Auto-negotiation should complete in
- * less than 500 milliseconds even if the other end is doing it in SW).
- * For internal serdes, we just assume a signal is present, then poll.
+ /* If we have a signal (the cable is plugged in) then poll for a
+ * "Link-Up" indication in the Device Status Register. Time-out if a
+ * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
+ * complete in less than 500 milliseconds even if the other end is doing
+ * it in SW). For internal serdes, we just assume a signal is present,
+ * then poll.
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
- * e1000_check_for_link. This routine will force the link up if
- * we detect a signal. This will allow us to communicate with
- * non-autonegotiating link partners.
+ * e1000_check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
- /* With 82543, we need to force speed and duplex on the MAC equal to what
- * the PHY speed and duplex configuration is. In addition, we need to
- * perform a hardware reset on the PHY to take it out of reset.
+ /* With 82543, we need to force speed and duplex on the MAC equal to
+ * what the PHY speed and duplex configuration is. In addition, we need
+ * to perform a hardware reset on the PHY to take it out of reset.
*/
if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU;
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
- * resolution as hardware default. */
+ * resolution as hardware default.
+ */
if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
ret_val =
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (hw->autoneg) {
/* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
+ * and perform autonegotiation
+ */
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
- * depending on value from forced_speed_duplex. */
+ * depending on value from forced_speed_duplex.
+ */
e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start
+ * auto-negotiation.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* capable of RX Pause ONLY, we will advertise that we
* support both symmetric and asymmetric RX PAUSE. Later
* (in e1000_config_fc_after_link_up) we will disable the
- *hw's ability to send PAUSE frames.
+ * hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Are we forcing Full or Half Duplex? */
if (hw->forced_speed_duplex == e1000_100_full ||
hw->forced_speed_duplex == e1000_10_full) {
- /* We want to force full duplex so we SET the full duplex bits in the
- * Device and MII Control Registers.
+ /* We want to force full duplex so we SET the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
e_dbg("Full Duplex\n");
} else {
- /* We want to force half duplex so we CLEAR the full duplex bits in
- * the Device and MII Control Registers.
+ /* We want to force half duplex so we CLEAR the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed are duplex are forced.
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
+ * MDI forced whenever speed are duplex are forced.
*/
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val =
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
- /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ /* Wait for autoneg to complete or 4.5 seconds to expire */
for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
msleep(100);
}
if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
- /* We didn't get link. Reset the DSP and wait again for link. */
+ /* We didn't get link. Reset the DSP and wait again
+ * for link.
+ */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
- /* This loop will early-out if the link condition has been met. */
+ /* This loop will early-out if the link condition has been
+ * met
+ */
for (i = PHY_FORCE_TIME; i > 0; i--) {
if (mii_status_reg & MII_SR_LINK_STATUS)
break;
msleep(100);
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
}
if (hw->phy_type == e1000_phy_m88) {
- /* Because we reset the PHY above, we need to re-force TX_CLK in the
- * Extended PHY Specific Control Register to 25MHz clock. This value
- * defaults back to a 2.5MHz clock when the PHY is reset.
+ /* Because we reset the PHY above, we need to re-force TX_CLK in
+ * the Extended PHY Specific Control Register to 25MHz clock.
+ * This value defaults back to a 2.5MHz clock when the PHY is
+ * reset.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* In addition, because of the s/w reset above, we need to enable CRS on
- * TX. This must be set for both full and half duplex operation.
+ /* In addition, because of the s/w reset above, we need to
+ * enable CRS on Tx. This must be set for both full and half
+ * duplex operation.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
- * MAC speed/duplex configuration.*/
+ * MAC speed/duplex configuration.
+ */
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
* registers depending on negotiated values.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
+ &phy_data);
if (ret_val)
return ret_val;
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000;
else if ((phy_data & M88E1000_PSSR_SPEED) ==
- M88E1000_PSSR_100MBS)
+ M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100;
}
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement Register
- * (Address 4) and the Auto_Negotiation Base Page Ability
- * Register (Address 5) to determine how flow control was
- * negotiated.
+ * (Address 4) and the Auto_Negotiation Base Page
+ * Ability Register (Address 5) to determine how flow
+ * control was negotiated.
*/
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
+ /* Two bits in the Auto Negotiation Advertisement
+ * Register (Address 4) and two bits in the Auto
+ * Negotiation Base Page Ability Register (Address 5)
+ * determine flow control for both the PHY and the link
+ * partner. The following table, taken out of the IEEE
+ * 802.3ab/D6.0 dated March 25, 1999, describes these
+ * PAUSE resolution bits and how flow control is
+ * determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 0 | DC | DC | E1000_FC_NONE
* 0 | 1 | 0 | DC | E1000_FC_NONE
* 0 | 1 | 1 | 0 | E1000_FC_NONE
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | DC | 1 | DC | E1000_FC_FULL
*
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ /* Now we need to check if the user selected Rx
+ * ONLY of pause frames. In this case, we had
+ * to advertise FULL flow control because we
+ * could not advertise Rx ONLY. Hence, we must
+ * now check to see if we need to turn OFF the
+ * TRANSMISSION of PAUSE frames.
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
*
*/
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
*
*/
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* Per the IEEE spec, at this point flow control should
+ * be disabled. However, we want to consider that we
+ * could be connected to a legacy switch that doesn't
+ * advertise desired flow control, but can be forced on
+ * the link partner. So if we advertised no flow
+ * control, that is what we will resolve to. If we
+ * advertised some kind of receive capability (Rx Pause
+ * Only or Full Flow Control) and the link partner
+ * advertised none, we will configure ourselves to
+ * enable Rx Flow Control only. We can do this safely
+ * for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx,
+ * no harm done since we won't be receiving any PAUSE
+ * frames anyway. If the intent on the link partner was
+ * to have flow control enabled, then by us enabling Rx
+ * only, we can at least receive pause frames and
+ * process them. This is a good idea because in most
+ * cases, since we are predominantly a server NIC, more
+ * times than not we will be asked to delay transmission
+ * of packets than asking our link partner to pause
+ * transmission of frames.
*/
else if ((hw->original_fc == E1000_FC_NONE ||
hw->original_fc == E1000_FC_TX_PAUSE) ||
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
if (phy_data & MII_SR_LINK_STATUS) {
hw->get_link_status = false;
- /* Check if there was DownShift, must be checked immediately after
- * link-up */
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
e1000_check_downshift(hw);
/* If we are on 82544 or 82543 silicon and speed/duplex
- * are forced to 10H or 10F, then we will implement the polarity
- * reversal workaround. We disable interrupts first, and upon
- * returning, place the devices interrupt state to its previous
- * value except for the link status change interrupt which will
+ * are forced to 10H or 10F, then we will implement the
+ * polarity reversal workaround. We disable interrupts
+ * first, and upon returning, place the devices
+ * interrupt state to its previous value except for the
+ * link status change interrupt which will
* happen due to the execution of this workaround.
*/
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
}
- /* Configure Flow Control now that Auto-Neg has completed. First, we
- * need to restore the desired flow control settings because we may
- * have had to re-autoneg with a different link partner.
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control settings
+ * because we may have had to re-autoneg with a different link
+ * partner.
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
/* At this point we know that we are on copper and we have
- * auto-negotiated link. These are conditions for checking the link
- * partner capability register. We use the link speed to determine if
- * TBI compatibility needs to be turned on or off. If the link is not
- * at gigabit speed, then TBI compatibility is not needed. If we are
- * at gigabit speed, we turn on TBI compatibility.
+ * auto-negotiated link. These are conditions for checking the
+ * link partner capability register. We use the link speed to
+ * determine if TBI compatibility needs to be turned on or off.
+ * If the link is not at gigabit speed, then TBI compatibility
+ * is not needed. If we are at gigabit speed, we turn on TBI
+ * compatibility.
*/
if (hw->tbi_compatibility_en) {
u16 speed, duplex;
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
return ret_val;
}
if (speed != SPEED_1000) {
- /* If link speed is not set to gigabit speed, we do not need
- * to enable TBI compatibility.
+ /* If link speed is not set to gigabit speed, we
+ * do not need to enable TBI compatibility.
*/
if (hw->tbi_compatibility_on) {
- /* If we previously were in the mode, turn it off. */
+ /* If we previously were in the mode,
+ * turn it off.
+ */
rctl = er32(RCTL);
rctl &= ~E1000_RCTL_SBP;
ew32(RCTL, rctl);
hw->tbi_compatibility_on = false;
}
} else {
- /* If TBI compatibility is was previously off, turn it on. For
- * compatibility with a TBI link partner, we will store bad
- * packets. Some frames have an additional byte on the end and
+ /* If TBI compatibility is was previously off,
+ * turn it on. For compatibility with a TBI link
+ * partner, we will store bad packets. Some
+ * frames have an additional byte on the end and
* will look like CRC errors to to the hardware.
*/
if (!hw->tbi_compatibility_on) {
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
*duplex = FULL_DUPLEX;
}
- /* IGP01 PHY may advertise full duplex operation after speed downgrade even
- * if it is operating at half duplex. Here we set the duplex settings to
- * match the duplex in the link partner's capabilities.
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade
+ * even if it is operating at half duplex. Here we set the duplex
+ * settings to match the duplex in the link partner's capabilities.
*/
if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
*/
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Raise the clock input to the Management Data Clock (by setting the MDC
- * bit), and then delay 10 microseconds.
+ /* Raise the clock input to the Management Data Clock (by setting the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
*/
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Lower the clock input to the Management Data Clock (by clearing the MDC
- * bit), and then delay 10 microseconds.
+ /* Lower the clock input to the Management Data Clock (by clearing the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
while (mask) {
- /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
- * then raising and lowering the Management Data Clock. A "0" is
- * shifted out to the PHY by setting the MDIO bit to "0" and then
- * raising and lowering the clock.
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to
+ * "1" and then raising and lowering the Management Data Clock.
+ * A "0" is shifted out to the PHY by setting the MDIO bit to
+ * "0" and then raising and lowering the clock.
*/
if (data & mask)
ctrl |= E1000_CTRL_MDIO;
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
u8 i;
/* In order to read a register from the PHY, we need to shift in a total
- * of 18 bits from the PHY. The first two bit (turnaround) times are used
- * to avoid contention on the MDIO pin when a read operation is performed.
- * These two bits are ignored by us and thrown away. Bits are "shifted in"
- * by raising the input to the Management Data Clock (setting the MDC bit),
- * and then reading the value of the MDIO bit.
+ * of 18 bits from the PHY. The first two bit (turnaround) times are
+ * used to avoid contention on the MDIO pin when a read operation is
+ * performed. These two bits are ignored by us and thrown away. Bits are
+ * "shifted in" by raising the input to the Management Data Clock
+ * (setting the MDC bit), and then reading the value of the MDIO bit.
*/
ctrl = er32(CTRL);
- /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+ * input.
+ */
ctrl &= ~E1000_CTRL_MDIO_DIR;
ctrl &= ~E1000_CTRL_MDIO;
ew32(CTRL, ctrl);
E1000_WRITE_FLUSH();
- /* Raise and Lower the clock before reading in the data. This accounts for
- * the turnaround bits. The first clock occurred when we clocked out the
- * last bit of the Register Address.
+ /* Raise and Lower the clock before reading in the data. This accounts
+ * for the turnaround bits. The first clock occurred when we clocked out
+ * the last bit of the Register Address.
*/
e1000_raise_mdi_clk(hw, &ctrl);
e1000_lower_mdi_clk(hw, &ctrl);
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, and register address in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
+ * Control register. The MAC will take care of interfacing with
+ * the PHY to retrieve the desired data.
*/
if (hw->mac_type == e1000_ce4100) {
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*phy_data = (u16) mdic;
}
} else {
- /* We must first send a preamble through the MDIO pin to signal the
- * beginning of an MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We must first send a preamble through the MDIO pin to signal
+ * the beginning of an MII instruction. This is done by sending
+ * 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
/* Now combine the next few fields that are required for a read
* operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine five different times. The format of
- * a MII read instruction consists of a shift out of 14 bits and is
- * defined as follows:
+ * e1000_shift_out_mdi_bits routine five different times. The
+ * format of a MII read instruction consists of a shift out of
+ * 14 bits and is defined as follows:
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
- * followed by a shift in of 18 bits. This first two bits shifted in
- * are TurnAround bits used to avoid contention on the MDIO pin when a
- * READ operation is performed. These two bits are thrown away
- * followed by a shift in of 16 bits which contains the desired data.
+ * followed by a shift in of 18 bits. This first two bits
+ * shifted in are TurnAround bits used to avoid contention on
+ * the MDIO pin when a READ operation is performed. These two
+ * bits are thrown away followed by a shift in of 16 bits which
+ * contains the desired data.
*/
mdic = ((reg_addr) | (phy_addr << 5) |
(PHY_OP_READ << 10) | (PHY_SOF << 12));
e1000_shift_out_mdi_bits(hw, mdic, 14);
- /* Now that we've shifted out the read command to the MII, we need to
- * "shift in" the 16-bit value (18 total bits) of the requested PHY
- * register address.
+ /* Now that we've shifted out the read command to the MII, we
+ * need to "shift in" the 16-bit value (18 total bits) of the
+ * requested PHY register address.
*/
*phy_data = e1000_shift_in_mdi_bits(hw);
}
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
}
} else {
- /* We'll need to use the SW defined pins to shift the write command
- * out to the PHY. We first send a preamble to the PHY to signal the
- * beginning of the MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We'll need to use the SW defined pins to shift the write
+ * command out to the PHY. We first send a preamble to the PHY
+ * to signal the beginning of the MII instruction. This is done
+ * by sending 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
- /* Now combine the remaining required fields that will indicate a
- * write operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine for each field in the command. The
- * format of a MII write instruction is as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ /* Now combine the remaining required fields that will indicate
+ * a write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the
+ * command. The format of a MII write instruction is as follows:
+ * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
*/
mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
(PHY_OP_WRITE << 12) | (PHY_SOF << 14));
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
- /* Read the device control register and assert the E1000_CTRL_PHY_RST
- * bit. Then, take it out of reset.
+ /* Read the device control register and assert the
+ * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
* For e1000 hardware, we delay for 10ms between the assert
- * and deassert.
+ * and de-assert.
*/
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH();
} else {
- /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
- * bit to put the PHY into reset. Then, take it out of reset.
+ /* Read the Extended Device Control Register, assert the
+ * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
+ * out of reset.
*/
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
/* IGP01E1000 does not need to support it. */
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ /* Local/Remote Receiver Information are only valid @ 1000
+ * Mbps
+ */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
}
if (eeprom->type == e1000_eeprom_spi) {
- /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
- * 32KB (incremented by powers of 2).
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
+ * 128B to 32KB (incremented by powers of 2).
*/
/* Set to default value for initial eeprom read. */
eeprom->word_size = 64;
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom_size =
(eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
/* 256B eeprom size was not supported in earlier hardware, so we
- * bump eeprom_size up one to ensure that "1" (which maps to 256B)
- * is never the result used in the shifting logic below. */
+ * bump eeprom_size up one to ensure that "1" (which maps to
+ * 256B) is never the result used in the shifting logic below.
+ */
if (eeprom_size)
eeprom_size++;
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
*/
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and
+ * then wait 50 microseconds.
*/
*eecd = *eecd & ~E1000_EECD_SK;
ew32(EECD, *eecd);
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
eecd |= E1000_EECD_DO;
}
do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK bit
+ * controls the clock input to the EEPROM). A "0" is shifted
+ * out to the EEPROM by setting "DI" to "0" and then raising and
+ * then lowering the clock.
*/
eecd &= ~E1000_EECD_DI;
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
/* In order to read a register from the EEPROM, we need to shift 'count'
* bits in from the EEPROM. Bits are "shifted in" by raising the clock
- * input to the EEPROM (setting the SK bit), and then reading the value of
- * the "DO" bit. During this "shifting in" process the "DI" bit should
- * always be clear.
+ * input to the EEPROM (setting the SK bit), and then reading the value
+ * of the "DO" bit. During this "shifting in" process the "DI" bit
+ * should always be clear.
*/
eecd = er32(EECD);
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_EEPROM;
/* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
- * acquired the EEPROM at this point, so any returns should release it */
+ * acquired the EEPROM at this point, so any returns should release it
+ */
if (eeprom->type == e1000_eeprom_spi) {
u16 word_in;
u8 read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
read_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
eeprom->address_bits);
- /* Read the data. The address of the eeprom internally increments with
- * each byte (spi) being read, saving on the overhead of eeprom setup
- * and tear-down. The address counter will roll over if reading beyond
- * the size of the eeprom, thus allowing the entire memory to be read
- * starting from any offset. */
+ /* Read the data. The address of the eeprom internally
+ * increments with each byte (spi) being read, saving on the
+ * overhead of eeprom setup and tear-down. The address counter
+ * will roll over if reading beyond the size of the eeprom, thus
+ * allowing the entire memory to be read starting from any
+ * offset.
+ */
for (i = 0; i < words; i++) {
word_in = e1000_shift_in_ee_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset + i),
eeprom->address_bits);
- /* Read the data. For microwire, each word requires the overhead
- * of eeprom setup and tear-down. */
+ /* Read the data. For microwire, each word requires the
+ * overhead of eeprom setup and tear-down.
+ */
data[i] = e1000_shift_in_ee_bits(hw, 16);
e1000_standby_eeprom(hw);
}
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
write_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
/* Send the data */
- /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ /* Loop to allow for up to whole page write (32 bytes) of
+ * eeprom
+ */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_ee_bits(hw, word_out, 16);
widx++;
- /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
- * operation, while the smaller eeproms are capable of an 8-byte
- * PAGE WRITE operation. Break the inner loop to pass new address
+ /* Some larger eeprom sizes are capable of a 32-byte
+ * PAGE WRITE operation, while the smaller eeproms are
+ * capable of an 8-byte PAGE WRITE operation. Break the
+ * inner loop to pass new address
*/
if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
e1000_standby_eeprom(hw);
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
/* Send the data */
e1000_shift_out_ee_bits(hw, data[words_written], 16);
- /* Toggle the CS line. This in effect tells the EEPROM to execute
- * the previous command.
+ /* Toggle the CS line. This in effect tells the EEPROM to
+ * execute the previous command.
*/
e1000_standby_eeprom(hw);
- /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
+ /* Read DO repeatedly until it is high (equal to '1'). The
+ * EEPROM will signal that the command has been completed by
+ * raising the DO signal. If DO does not go high in 10
+ * milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
eecd = er32(EECD);
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of the
- * manageability unit */
+ * manageability unit
+ */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
E1000_WRITE_FLUSH();
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
- /* This packet should not be counted as a CRC error. */
+ /* This packet should not be counted as a CRC error. */
stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
+ /* This packet does count as a Good Packet Received. */
stats->gprc++;
- /* Adjust the Good Octets received counters */
+ /* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
- * find the polarity status */
+ /* If speed is 1000 Mbps, must read the
+ * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
+ */
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity_reversed :
e1000_rev_polarity_normal;
} else {
- /* For 10 Mbps, read the polarity bit in the status register. (for
- * 100 Mbps this bit is always 0) */
+ /* For 10 Mbps, read the polarity bit in the status
+ * register. (for 100 Mbps this bit is always 0)
+ */
*polarity =
(phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
e1000_rev_polarity_reversed :
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
} else {
if (hw->dsp_config_state == e1000_dsp_config_activated) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
if (hw->ffe_config_state == e1000_ffe_config_active) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
ret_val =
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
return ret_val;
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link
- * from the lowest speeds starting from 10Mbps. The capability is used for
- * Dx transitions and states */
+ * from the lowest speeds starting from 10Mbps. The capability is used
+ * for Dx transitions and states
+ */
if (hw->mac_type == e1000_82541_rev_2
|| hw->mac_type == e1000_82547_rev_2) {
ret_val =
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return ret_val;
}
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
- * Dx states where the power conservation is most important. During
- * driver activity we should enable SmartSpeed, so performance is
- * maintained. */
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val =
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 294da56b824..8502c625dbe 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
* e1000_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
-
static int __init e1000_init_module(void)
{
int ret;
@@ -266,7 +265,6 @@ module_init(e1000_init_module);
* e1000_exit_module is called just before the driver is removed
* from memory.
**/
-
static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
-
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_rx(adapter);
/* call E1000_DESC_UNUSED which always leaves
* at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean */
+ * next_to_use != next_to_clean
+ */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
- E1000_DESC_UNUSED(ring));
+ E1000_DESC_UNUSED(ring));
}
}
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
* The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000_reset ***
- *
**/
-
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
/* Just clear the power down bit to wake the phy back up */
if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its
- * settings across a power-down/up cycle */
+ * settings across a power-down/up cycle
+ */
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
- * (c) SoL/IDER session is active */
+ * (c) SoL/IDER session is active
+ */
if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
- /*
- * Setting DOWN must be after irq_disable to prevent
+ /* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
* tasks from rescheduling.
*/
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
- * expressed in KB. */
+ * expressed in KB.
+ */
pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /*
- * the tx fifo also stores 16 bytes of information about the tx
+ /* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (hw->max_frame_size +
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation */
+ * allocation, take space away from current Rx allocation
+ */
if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
break;
}
- /* if short on rx space, rx wins and must trump tx
- * adjustment or use Early Receive if available */
+ /* if short on Rx space, Rx wins and must trump Tx
+ * adjustment or use Early Receive if available
+ */
if (pba < min_rx_space)
pba = min_rx_space;
}
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
- /*
- * flow control settings:
+ /* flow control settings:
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
- * can cause a loss of link at power off or driver unload */
+ * can cause a loss of link at power off or driver unload
+ */
ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl);
}
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /*
- * there is a workaround being applied below that limits
+ /* there is a workaround being applied below that limits
* 64-bit DMA addresses to 64-bit hardware. There are some
* 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /*
- * according to DMA-API-HOWTO, coherent calls will always
+ /* according to DMA-API-HOWTO, coherent calls will always
* succeed if the set call did
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* before reading the EEPROM, reset the controller to
- * put the device in a known good starting state */
+ * put the device in a known good starting state
+ */
e1000_reset_hw(hw);
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
- /*
- * set MAC address to all zeroes to invalidate and temporary
+ /* set MAC address to all zeroes to invalidate and temporary
* disable this device for the user. This blocks regular
* traffic while still permitting ethtool ioctls from reaching
* the hardware as well as allowing the user to run the
@@ -1123,9 +1118,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
@@ -1170,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
- * wake on lan on a particular port */
+ * wake on lan on a particular port
+ */
switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0;
@@ -1178,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
+ * regardless of eeprom setting
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
@@ -1271,7 +1267,6 @@ err_pci_reg:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-
static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1307,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
* e1000_sw_init initializes the Adapter private data structure.
* e1000_init_hw_struct MUST be called before this function
**/
-
static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1338,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time.
**/
-
static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@@ -1368,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-
static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1402,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so. */
+ * clean_rx handler before we do so.
+ */
e1000_configure(adapter);
err = e1000_request_irq(adapter);
@@ -1445,7 +1438,6 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1460,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
e1000_free_all_rx_resources(adapter);
/* kill manageability vlan ID if supported, but not if a vlan with
- * the same ID is registered on the host OS (let 8021q kill it) */
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
if ((hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
@@ -1484,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory
- * write location to cross 64k boundary due to errata 23 */
+ * write location to cross 64k boundary due to errata 23
+ */
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
@@ -1501,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
*
* Return 0 on success, negative on failure
**/
-
static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *txdr)
{
@@ -1510,11 +1503,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -1578,7 +1568,6 @@ setup_tx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1603,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
*
* Configure the Tx unit of the MAC after a reset.
**/
-
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
@@ -1624,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
ew32(TDT, 0);
ew32(TDH, 0);
- adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
- adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDT : E1000_82542_TDT);
break;
}
@@ -1680,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
- * need this to apply a workaround later in the send path. */
+ * need this to apply a workaround later in the send path.
+ */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true;
@@ -1696,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr)
{
@@ -1705,11 +1695,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
desc_len = sizeof(struct e1000_rx_desc);
@@ -1778,7 +1765,6 @@ setup_rx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1847,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -1869,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
*
* Configure the Rx unit of the MAC after a reset.
**/
-
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
@@ -1902,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
+ * the Base and Length of the Rx Descriptor Ring
+ */
switch (adapter->num_rx_queues) {
case 1:
default:
@@ -1912,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0);
ew32(RDH, 0);
- adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
- adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDT : E1000_82542_RDT);
break;
}
@@ -1939,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*
* Free all transmit software resources
**/
-
static void e1000_free_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -1962,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
*
* Free all transmit software resources
**/
-
void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -1997,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-
static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -2033,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2049,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
*
* Free all receive software resources
**/
-
static void e1000_free_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2072,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
*
* Free all receive software resources
**/
-
void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -2086,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2145,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2205,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2240,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
* responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-
static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2253,10 +2232,8 @@ static void e1000_set_rx_mode(struct net_device *netdev)
int mta_reg_count = E1000_NUM_MTA_REGISTERS;
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
- if (!mcarray) {
- e_err(probe, "memory allocation failed\n");
+ if (!mcarray)
return;
- }
/* Check for Promiscuous and All Multicast modes */
@@ -2326,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
}
/* write the hash table completely, write from bottom to avoid
- * both stupid write combining chipsets, and flushing each write */
+ * both stupid write combining chipsets, and flushing each write
+ */
for (i = mta_reg_count - 1; i >= 0 ; i--) {
- /*
- * If we are on an 82544 has an errata where writing odd
+ /* If we are on an 82544 has an errata where writing odd
* offsets overwrites the previous even offset, but writing
* backwards over the range solves the issue by always
* writing the odd offset first
@@ -2467,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
bool txb2b = true;
/* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
ctrl = er32(CTRL);
pr_info("%s NIC Link is Up %d Mbps %s, "
@@ -2542,7 +2519,8 @@ link_up:
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
+ * (Do the reset outside of interrupt context).
+ */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
@@ -2552,8 +2530,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@@ -2668,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2705,10 +2680,11 @@ set_itr_now:
if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
- * increasing */
+ * increasing
+ */
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
@@ -2870,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully
- * DMA'd to the controller */
+ * DMA'd to the controller
+ */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = false;
@@ -2878,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
+ * in TSO mode. Append 4-byte sentinel desc
+ */
if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
/* work-around for errata 10 and it applies
@@ -2891,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size = 2015;
/* Workaround for potential 82544 hang in PCI-X. Avoid
- * terminating buffers within evenly-aligned dwords. */
+ * terminating buffers within evenly-aligned dwords.
+ */
if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4))
@@ -2903,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2934,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ * in TSO mode. Append 4-byte sentinel desc
+ */
+ if (unlikely(mss && f == (nr_frags-1) &&
+ size == len && size > 8))
size -= 4;
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
- * dwords. */
+ * dwords.
+ */
bufend = (unsigned long)
page_to_phys(skb_frag_page(frag));
bufend += offset + size - 1;
@@ -3003,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
@@ -3044,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
mmiowb();
}
@@ -3099,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again in a case another CPU has just
- * made room available. */
+ * made room available.
+ */
if (likely(E1000_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3114,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
}
static int e1000_maybe_stop_tx(struct net_device *netdev,
- struct e1000_tx_ring *tx_ring, int size)
+ struct e1000_tx_ring *tx_ring, int size)
{
if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -3138,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int tso;
unsigned int f;
- /* This goes back to the question of how to logically map a tx queue
+ /* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
- * if using multiple tx queues. If the stack breaks away from a
- * single qdisc implementation, we can look at this again. */
+ * if using multiple Tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again.
+ */
tx_ring = adapter->tx_ring;
if (unlikely(skb->len <= 0)) {
@@ -3166,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
- * drops. */
+ * drops.
+ */
if (mss) {
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
@@ -3182,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* this hardware's requirements
* NOTE: this is a TSO only workaround
* if end byte alignment not correct move us
- * into the next dword */
- if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ * into the next dword
+ */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1)
+ & 4)
break;
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
@@ -3231,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += nr_frags;
/* need: count + 2 desc gap to keep tail from touching
- * head, otherwise try next time */
+ * head, otherwise try next time
+ */
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
@@ -3270,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
- nr_frags, mss);
+ nr_frags, mss);
if (count) {
netdev_sent_queue(netdev, skb->len);
@@ -3372,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
/* Print Registers */
e1000_regdump(adapter);
- /*
- * transmit dump
- */
+ /* transmit dump */
pr_info("TX Desc ring0 dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@@ -3435,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
}
rx_ring_summary:
- /*
- * receive dump
- */
+ /* receive dump */
pr_info("\nRX Desc ring dump\n");
/* Legacy Receive Descriptor Format
@@ -3502,7 +3489,6 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-
static void e1000_tx_timeout(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3530,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the watchdog.
**/
-
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{
/* only return the current stats */
@@ -3544,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3581,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
- * however with the new *_jumbo_rx* routines, jumbo receives will use
- * fragmented skbs */
+ * however with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@@ -3617,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* e1000_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-
void e1000_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3628,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -3719,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC
+ */
netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@@ -3773,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-
static irqreturn_t e1000_intr(int irq, void *data)
{
struct net_device *netdev = data;
@@ -3784,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
- /*
- * we might have caused the interrupt, but the above
+ /* we might have caused the interrupt, but the above
* read cleared it, and just in case the driver is
* down there is nothing to do so return handled
*/
@@ -3811,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
__napi_schedule(&adapter->napi);
} else {
/* this really should not happen! if it does it is basically a
- * bug, but not a hard error, so enable ints and continue */
+ * bug, but not a hard error, so enable ints and continue
+ */
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
@@ -3825,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
- struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+ napi);
int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@@ -3916,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
+ * check with the clearing of time_stamp and movement of i
+ */
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
+ (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
@@ -3963,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
-
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
{
@@ -3999,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -4095,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4107,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window
- * too */
+ * too
+ */
if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
@@ -4123,7 +4108,7 @@ process_skb:
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
@@ -4141,38 +4126,42 @@ process_skb:
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
- * page */
+ * page
+ */
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
+ * copybreak to save the put_page/alloc_page
+ */
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
- memcpy(skb_tail_pointer(skb), vaddr, length);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page */
+ * buffer_info->page
+ */
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
/* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
total_rx_bytes += (skb->len - 4); /* don't count FCS */
if (likely(!(netdev->features & NETIF_F_RXFCS)))
@@ -4214,8 +4203,7 @@ next_desc:
return cleaned;
}
-/*
- * this should improve performance for small packets with large amounts
+/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
@@ -4319,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
+ length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
- flags);
+ flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4386,10 +4374,9 @@ next_desc:
* @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass
**/
-
static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring, int cleaned_count)
+ struct e1000_rx_ring *rx_ring, int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -4430,7 +4417,7 @@ check_page:
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
+ buffer_info->page, 0,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
@@ -4460,7 +4447,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
@@ -4470,7 +4458,6 @@ check_page:
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
-
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count)
@@ -4541,8 +4528,7 @@ map_skb:
break; /* while !buffer_info->skb */
}
- /*
- * XXX if it was allocated cleanly it will never map to a
+ /* XXX if it was allocated cleanly it will never map to a
* boundary crossing
*/
@@ -4580,7 +4566,8 @@ map_skb:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
@@ -4590,7 +4577,6 @@ map_skb:
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter:
**/
-
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -4603,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
- * we assume back-to-back */
+ * we assume back-to-back
+ */
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@@ -4616,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
adapter->smartspeed++;
if (!e1000_phy_setup_autoneg(hw) &&
!e1000_read_phy_reg(hw, PHY_CTRL,
- &phy_ctrl)) {
+ &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(hw, PHY_CTRL,
@@ -4647,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
* @ifreq:
* @cmd:
**/
-
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -4666,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @ifreq:
* @cmd:
**/
-
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
@@ -4928,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
hw->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@@ -5131,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 750fc0194f3..c9cde352b1c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
-
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
- .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD
}}
};
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_int_delay = opt.def;
}
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
break;
case 4:
e_dev_info("%s set to simplified "
- "(2000-8000) ints mode\n", opt.name);
+ "(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
- adapter);
+ adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
- * used as control */
+ * used as control
+ */
adapter->itr_setting = adapter->itr & ~3;
break;
}
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on fiber adapters
**/
-
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on copper adapters
**/
-
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_100_HALF;
+ ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
- ADVERTISE_100_FULL |
- ADVERTISE_1000_FULL;
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL;
+ ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e73c2c35599..e0991388664 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -32,69 +32,6 @@
#include "e1000.h"
-#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
-#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
-#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
-#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
-
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
-#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
-#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
-
-#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
-#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
-#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
-
-#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
-#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
-
-#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
-#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
-
-#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
-#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
-
-/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
-#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Disab. */
-#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
-#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
-#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
-#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
-
-/* PHY Specific Control Register 2 (Page 0, Register 26) */
-#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000
- /* 1=Reverse Auto-Negotiation */
-
-/* MAC Specific Control Register (Page 2, Register 21) */
-/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
-#define GG82563_MSCR_TX_CLK_MASK 0x0007
-#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
-#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
-#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
-
-#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
-
-/* DSP Distance Register (Page 5, Register 26) */
-#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M
- 1 = 50-80M
- 2 = 80-110M
- 3 = 110-140M
- 4 = >140M
- */
-
-/* Kumeran Mode Control Register (Page 193, Register 16) */
-#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
-
-/* Max number of times Kumeran read/write should be validated */
-#define GG82563_MAX_KMRN_RETRY 0x5
-
-/* Power Management Control Register (Page 193, Register 20) */
-#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
- /* 1=Enable SERDES Electrical Idle */
-
-/* In-Band Control Register (Page 194, Register 18) */
-#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
-
/* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
@@ -111,11 +48,10 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
-static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
-static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 *data);
-static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
- u16 data);
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 *data);
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+ u16 data);
static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
/**
@@ -625,16 +561,16 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
e_dbg("GG82563 PSCR: %X\n", phy_data);
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
/* Reset the phy to commit changes. */
- phy_data |= MII_CR_RESET;
+ phy_data |= BMCR_RESET;
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -696,7 +632,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
u16 phy_data, index;
ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
@@ -774,6 +710,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
ret_val = e1000_acquire_phy_80003es2lan(hw);
+ if (ret_val)
+ return ret_val;
+
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
e1000_release_phy_80003es2lan(hw);
@@ -833,6 +772,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Setup link and flow control */
ret_val = mac->ops.setup_link(hw);
+ if (ret_val)
+ return ret_val;
/* Disable IBIST slave mode (far-end loopback) */
e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
@@ -1006,7 +947,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
/* SW Reset the PHY so all changes take effect */
- ret_val = e1000e_commit_phy(hw);
+ ret_val = hw->phy.ops.commit(hw);
if (ret_val) {
e_dbg("Error Resetting the PHY\n");
return ret_val;
@@ -1272,7 +1213,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 *data)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
+ s32 ret_val;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
@@ -1307,7 +1248,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
u16 data)
{
u32 kmrnctrlsta;
- s32 ret_val = 0;
+ s32 ret_val;
ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
if (ret_val)
@@ -1331,7 +1272,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
**/
static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
{
- s32 ret_val = 0;
+ s32 ret_val;
/* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
@@ -1434,18 +1375,18 @@ static const struct e1000_phy_operations es2_phy_ops = {
.acquire = e1000_acquire_phy_80003es2lan,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
- .commit = e1000e_phy_sw_reset,
- .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
- .get_cfg_done = e1000_get_cfg_done_80003es2lan,
- .get_cable_length = e1000_get_cable_length_80003es2lan,
- .get_info = e1000e_get_phy_info_m88,
- .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
+ .commit = e1000e_phy_sw_reset,
+ .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
+ .get_cfg_done = e1000_get_cfg_done_80003es2lan,
+ .get_cable_length = e1000_get_cable_length_80003es2lan,
+ .get_info = e1000e_get_phy_info_m88,
+ .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
.release = e1000_release_phy_80003es2lan,
- .reset = e1000e_phy_hw_reset_generic,
- .set_d0_lplu_state = NULL,
- .set_d3_lplu_state = e1000e_set_d3_lplu_state,
- .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
- .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
+ .reset = e1000e_phy_hw_reset_generic,
+ .set_d0_lplu_state = NULL,
+ .set_d3_lplu_state = e1000e_set_d3_lplu_state,
+ .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
+ .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
};
static const struct e1000_nvm_operations es2_nvm_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
new file mode 100644
index 00000000000..90d363b2d28
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_80003ES2LAN_H_
+#define _E1000E_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26)
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH 0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY 0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+/* 1=Enable SERDES Electrical Idle */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index c77d010d5c5..2faffbde179 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,21 +44,6 @@
#include "e1000.h"
-#define ID_LED_RESERVED_F746 0xF746
-#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_OFF1_ON2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
-#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
-#define E1000_BASE1000T_STATUS 10
-#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
-#define E1000_RECEIVE_ERROR_COUNTER 21
-#define E1000_RECEIVE_ERROR_MAX 0xFFFF
-
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
@@ -67,9 +52,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
-static s32 e1000_setup_link_82571(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
-static void e1000_clear_vfta_82571(struct e1000_hw *hw);
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
@@ -449,13 +432,13 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
break;
case e1000_82574:
case e1000_82583:
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -556,16 +539,14 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
s32 i = 0;
extcnf_ctrl = er32(EXTCNF_CTRL);
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
do {
+ extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
ew32(EXTCNF_CTRL, extcnf_ctrl);
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
break;
- extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
usleep_range(2000, 4000);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
@@ -937,6 +918,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
@@ -1329,9 +1312,10 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
*/
vfta_offset = (hw->mng_cookie.vlan_id >>
E1000_VFTA_ENTRY_SHIFT) &
- E1000_VFTA_ENTRY_MASK;
- vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ E1000_VFTA_ENTRY_MASK;
+ vfta_bit_in_reg =
+ 1 << (hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
@@ -1399,7 +1383,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
{
u16 status_1kbt = 0;
u16 receive_errors = 0;
- s32 ret_val = 0;
+ s32 ret_val;
/* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
@@ -1544,13 +1528,12 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ctrl = er32(CTRL);
status = er32(STATUS);
- rxcw = er32(RXCW);
+ er32(RXCW);
/* SYNCH bit and IV bit are sticky */
udelay(10);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
-
/* Receiver is synchronized with no invalid bits. */
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
@@ -1799,6 +1782,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
+ if (ret_val)
+ return ret_val;
}
}
@@ -1812,7 +1797,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
if (hw->mac.type == e1000_82571) {
- s32 ret_val = 0;
+ s32 ret_val;
/* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
@@ -1931,7 +1916,7 @@ static const struct e1000_phy_operations e82_phy_ops_igp = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_igp,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_m88 = {
@@ -1940,7 +1925,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = {
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
- .get_cfg_done = e1000e_get_cfg_done,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_m88,
@@ -1949,7 +1934,7 @@ static const struct e1000_phy_operations e82_phy_ops_m88 = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_m88,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_phy_operations e82_phy_ops_bm = {
@@ -1958,7 +1943,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = {
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
- .get_cfg_done = e1000e_get_cfg_done,
+ .get_cfg_done = e1000e_get_cfg_done_generic,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_bm2,
@@ -1967,7 +1952,7 @@ static const struct e1000_phy_operations e82_phy_ops_bm = {
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_bm2,
- .cfg_on_link_up = NULL,
+ .cfg_on_link_up = NULL,
};
static const struct e1000_nvm_operations e82571_nvm_ops = {
@@ -2044,6 +2029,7 @@ const struct e1000_info e1000_82574_info = {
| FLAG_HAS_MSIX
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
@@ -2065,6 +2051,7 @@ const struct e1000_info e1000_82583_info = {
.mac = e1000_82583,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_APME_IN_CTRL3
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
new file mode 100644
index 00000000000..85cb1a3b7cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -0,0 +1,58 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_82571_H_
+#define _E1000E_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_ON2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
+
+/* Intr Throttling - RW */
+#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n)))
+
+#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAC_MASK_82574 0x01F00000
+
+/* Manageability Operation Mode mask */
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
+
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
+bool e1000_check_phy_82574(struct e1000_hw *hw);
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 591b7132450..c2dcfcc1085 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -34,5 +34,5 @@ obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
mac.o manage.o nvm.o phy.o \
- param.o ethtool.o netdev.o
+ param.o ethtool.o netdev.o ptp.o
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 4dab6fc265a..fc3a4fe1ac7 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,25 +29,6 @@
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
-#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
-#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
-#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */
-#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
-#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */
-#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */
-#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */
-#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
-#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
-#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */
-#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */
-#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */
-#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */
-#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */
-#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */
-#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
-#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
-#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
-
/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
#define REQ_TX_DESCRIPTOR_MULTIPLE 8
#define REQ_RX_DESCRIPTOR_MULTIPLE 8
@@ -86,7 +67,6 @@
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
-#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -107,6 +87,7 @@
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
@@ -115,19 +96,19 @@
/* mask to determine if packets should be dropped due to frame errors */
#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
- E1000_RXD_ERR_CE | \
- E1000_RXD_ERR_SE | \
- E1000_RXD_ERR_SEQ | \
- E1000_RXD_ERR_CXE | \
- E1000_RXD_ERR_RXE)
+ E1000_RXD_ERR_CE | \
+ E1000_RXD_ERR_SE | \
+ E1000_RXD_ERR_SEQ | \
+ E1000_RXD_ERR_CXE | \
+ E1000_RXD_ERR_RXE)
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
@@ -242,9 +223,9 @@
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-/* Bit definitions for the Management Data IO (MDIO) and Management Data
- * Clock (MDC) pins in the Device Control Register.
- */
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
/* Device Status */
#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */
@@ -260,8 +241,6 @@
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
-/* Constants used to interpret the masked PCI-X bus speed. */
-
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
@@ -274,14 +253,15 @@
#define ADVERTISE_1000_FULL 0x0020
/* 1000/H is not supported, nor spec-compliant. */
-#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
- ADVERTISE_1000_FULL)
-#define E1000_ALL_NOT_GIG ( ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
- ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
-#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
-#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
+#define E1000_ALL_SPEED_DUPLEX ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL | ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG ( \
+ ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \
+ ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF)
#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX
@@ -319,6 +299,7 @@
#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */
#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
+#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */
/* Transmit Control */
#define E1000_TCTL_EN 0x00000002 /* enable Tx */
@@ -328,8 +309,6 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */
-/* Transmit Arbitration Count */
-
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -384,10 +363,15 @@
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
+/* Low Power IDLE Control */
+#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */
+
/* PBA constants */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_16K 0x0010 /* 16KB */
+#define E1000_PBA_RXA_MASK 0xFFFF
+
#define E1000_PBS_16K E1000_PBA_16K
/* Uncorrectable/correctable ECC Error counts and enable bits */
@@ -439,11 +423,11 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -542,6 +526,28 @@
#define E1000_RXCW_C 0x20000000 /* Receive config */
#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
+#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
+#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */
+#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */
+
+#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000
+
+#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000
+#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000
+
+#define E1000_TIMINCA_INCPERIOD_SHIFT 24
+#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF
+
/* PCI Express Control */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
@@ -557,66 +563,6 @@
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
-/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
-#define MII_CR_POWER_DOWN 0x0800 /* Power down */
-#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
-#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_1000 0x0040
-#define MII_CR_SPEED_100 0x2000
-#define MII_CR_SPEED_10 0x0000
-
-/* PHY Status Register */
-#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
-#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
-
-/* Autoneg Advertisement Register */
-#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
-#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
-#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
-#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
-#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */
-#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
-
-/* Link Partner Ability Register (Base Page) */
-#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
-#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
-#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
-
-/* Autoneg Expansion Register */
-#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */
-
-/* 1000BASE-T Control Register */
-#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
-#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
- /* 0=DTE device */
-#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */
- /* 0=Configure PHY as Slave */
-#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */
- /* 0=Automatic Master/Slave config */
-
-/* 1000BASE-T Status Register */
-#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
-
-
-/* PHY 1000 MII Register/Bit Definitions */
-/* PHY Registers defined by IEEE */
-#define PHY_CONTROL 0x00 /* Control Register */
-#define PHY_STATUS 0x01 /* Status Register */
-#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */
-#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */
-#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
-#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
-#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */
-#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
-#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */
-
-#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */
-
/* NVM Control */
#define E1000_EECD_SK 0x00000001 /* NVM Clock */
#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */
@@ -648,6 +594,10 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_3GIO_3 0x001A
@@ -656,8 +606,6 @@
#define NVM_ALT_MAC_ADDR_PTR 0x0037
#define NVM_CHECKSUM_REG 0x003F
-#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
-
#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
@@ -766,9 +714,6 @@
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
- * 0=Normal 10BASE-T Rx Threshold
- */
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* M88E1000 PHY Specific Status Register */
@@ -804,11 +749,6 @@
/* BME1000 PHY Specific Control Register */
#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-
/* Bits...
* 15-5: page
* 4-0: register offset
@@ -855,8 +795,4 @@
/* SerDes Control */
#define E1000_GEN_POLL_TIMEOUT 640
-/* FW Semaphore */
-#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
-#define E1000_FWSM_WLOCK_MAC_SHIFT 7
-
#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 7e95f221d60..fcc758138b8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,7 +41,11 @@
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
#include <linux/if_vlan.h>
-
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/mii.h>
#include "hw.h"
struct e1000_info;
@@ -75,9 +79,6 @@ struct e1000_info;
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
-/* Early Receive defines */
-#define E1000_ERT_2048 0x100
-
#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
@@ -94,70 +95,6 @@ struct e1000_info;
#define DEFAULT_JUMBO 9234
-/* BM/HV Specific Registers */
-#define BM_PORT_CTRL_PAGE 769
-
-#define PHY_UPPER_SHIFT 21
-#define BM_PHY_REG(page, reg) \
- (((reg) & MAX_PHY_REG_ADDRESS) |\
- (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
- (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
-
-/* PHY Wakeup Registers and defines */
-#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
-#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
-#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
-#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
-#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
-#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
-
-#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
-#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
-#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
-#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
-#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
-#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
-#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
-
-#define HV_STATS_PAGE 778
-#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
-#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
-#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
-#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
-#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
-#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
-#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
-#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
-#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
-#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
-#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
-#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
-#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
-#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
-
-#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
-
-/* BM PHY Copper Specific Status */
-#define BM_CS_STATUS 17
-#define BM_CS_STATUS_LINK_UP 0x0400
-#define BM_CS_STATUS_RESOLVED 0x0800
-#define BM_CS_STATUS_SPEED_MASK 0xC000
-#define BM_CS_STATUS_SPEED_1000 0x8000
-
-/* 82577 Mobile Phy Status Register */
-#define HV_M_STATUS 26
-#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
-#define HV_M_STATUS_SPEED_MASK 0x0300
-#define HV_M_STATUS_SPEED_1000 0x0200
-#define HV_M_STATUS_LINK_UP 0x0040
-
-#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
-#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
-
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
@@ -355,6 +292,7 @@ struct e1000_adapter {
u64 gorc_old;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
+ u32 rx_hwtstamp_cleared;
unsigned int rx_ps_pages;
u16 rx_ps_bsize0;
@@ -368,7 +306,7 @@ struct e1000_adapter {
/* structs defined in e1000_hw.h */
struct e1000_hw hw;
- spinlock_t stats64_lock;
+ spinlock_t stats64_lock; /* protects statistics counters */
struct e1000_hw_stats stats;
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
@@ -404,6 +342,16 @@ struct e1000_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct delayed_work systim_overflow_work;
+ struct sk_buff *tx_hwtstamp_skb;
+ struct work_struct tx_hwtstamp_work;
+ spinlock_t systim_lock; /* protects SYSTIML/H regsters */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
};
struct e1000_info {
@@ -418,6 +366,40 @@ struct e1000_info {
const struct e1000_nvm_operations *nvm_ops;
};
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
+
+/* The system time is maintained by a 64-bit counter comprised of the 32-bit
+ * SYSTIMH and SYSTIML registers. How the counter increments (and therefore
+ * its resolution) is based on the contents of the TIMINCA register - it
+ * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0).
+ * For the best accuracy, the incperiod should be as small as possible. The
+ * incvalue is scaled by a factor as large as possible (while still fitting
+ * in bits 23:0) so that relatively small clock corrections can be made.
+ *
+ * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of
+ * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n)
+ * bits to count nanoseconds leaving the rest for fractional nonseconds.
+ */
+#define INCVALUE_96MHz 125
+#define INCVALUE_SHIFT_96MHz 17
+#define INCPERIOD_SHIFT_96MHz 2
+#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz)
+
+#define INCVALUE_25MHz 40
+#define INCVALUE_SHIFT_25MHz 18
+#define INCPERIOD_25MHz 1
+
+/* Another drawback of scaling the incvalue by a large factor is the
+ * 64-bit SYSTIM register overflows more quickly. This is dealt with
+ * by simply reading the clock before it overflows.
+ *
+ * Clock ns bits Overflows after
+ * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~
+ * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs
+ * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
+ */
+#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
#define FLAG_HAS_FLASH (1 << 1)
@@ -433,7 +415,7 @@ struct e1000_info {
#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
#define FLAG_IS_QUAD_PORT_A (1 << 12)
#define FLAG_IS_QUAD_PORT (1 << 13)
-/* reserved bit14 */
+#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
#define FLAG_APME_IN_WUC (1 << 15)
#define FLAG_APME_IN_CTRL3 (1 << 16)
#define FLAG_APME_CHECK_PORT_B (1 << 17)
@@ -449,7 +431,7 @@ struct e1000_info {
#define FLAG_MSI_ENABLED (1 << 27)
/* reserved (1 << 28) */
#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RX_RESTART_NOW (1 << 30)
+#define FLAG_RESTART_NOW (1 << 30)
#define FLAG_MSI_TEST_FAILED (1 << 31)
#define FLAG2_CRC_STRIPPING (1 << 0)
@@ -465,6 +447,7 @@ struct e1000_info {
#define FLAG2_NO_DISABLE_RX (1 << 10)
#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
+#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -514,8 +497,6 @@ extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
-extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
-
extern const struct e1000_info e1000_82571_info;
extern const struct e1000_info e1000_82572_info;
extern const struct e1000_info e1000_82573_info;
@@ -529,138 +510,8 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_es2_info;
-extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-
-extern s32 e1000e_commit_phy(struct e1000_hw *hw);
-
-extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
-
-extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
-extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
-
-extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
-extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state);
-extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
-extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
-extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
-extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
-extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
-extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
-
-extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
-extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
-extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
-extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
-extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
-extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
-extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
-extern s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
-extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
-extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
-extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
-extern s32 e1000e_setup_link_generic(struct e1000_hw *hw);
-extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
-extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
-extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count);
-extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
-extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
-extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
-extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
-extern void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
-extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
-extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
-extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
-extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
-extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
-extern void e1000e_reset_adaptive(struct e1000_hw *hw);
-extern void e1000e_update_adaptive(struct e1000_hw *hw);
-
-extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
-extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
-extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
-extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
-extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
-extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
-extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
-extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
-extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
-extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
-extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
-extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
-extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
- u16 *phy_reg);
-extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
-extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
-extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success);
-extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
-extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
-extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
-extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000e_check_downshift(struct e1000_hw *hw);
-extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
-extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 *data);
-extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
-extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
- u16 data);
-extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
-extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
-extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
-
-extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
-extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
-extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
-extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
-extern bool e1000_check_phy_82574(struct e1000_hw *hw);
+extern void e1000e_ptp_init(struct e1000_adapter *adapter);
+extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
@@ -687,20 +538,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
return hw->phy.ops.write_reg_locked(hw, offset, data);
}
-static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
-{
- return hw->phy.ops.get_cable_length(hw);
-}
-
-extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
-extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
-extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
-extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
-extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
-extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
-extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
{
@@ -735,10 +573,6 @@ static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
return hw->phy.ops.get_info(hw);
}
-extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
-extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
-extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
-
static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
{
return readl(hw->hw_addr + reg);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index fd4772a2691..2c1813737f6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
+#include <linux/mdio.h>
#include "e1000.h"
@@ -98,7 +99,6 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
E1000_STAT("tx_flow_control_xon", stats.xontxc),
E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
- E1000_STAT("rx_long_byte_count", stats.gorc),
E1000_STAT("rx_csum_offload_good", hw_csum_good),
E1000_STAT("rx_csum_offload_errors", hw_csum_err),
E1000_STAT("rx_header_split", rx_hdr_split),
@@ -108,6 +108,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
E1000_STAT("dropped_smbus", stats.mgpdc),
E1000_STAT("rx_dma_failed", rx_dma_failed),
E1000_STAT("tx_dma_failed", tx_dma_failed),
+ E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
E1000_STAT("uncorr_ecc_errors", uncorr_errors),
E1000_STAT("corr_ecc_errors", corr_errors),
};
@@ -129,7 +130,6 @@ static int e1000_get_settings(struct net_device *netdev,
u32 speed;
if (hw->phy.media_type == e1000_media_type_copper) {
-
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -327,12 +327,12 @@ static int e1000_set_settings(struct net_device *netdev,
}
/* reset the link */
-
if (netif_running(adapter->netdev)) {
e1000e_down(adapter);
e1000e_up(adapter);
- } else
+ } else {
e1000e_reset(adapter);
+ }
clear_bit(__E1000_RESETTING, &adapter->state);
return 0;
@@ -417,7 +417,7 @@ static void e1000_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-static int e1000_get_regs_len(struct net_device *netdev)
+static int e1000_get_regs_len(struct net_device __always_unused *netdev)
{
#define E1000_REGS_LEN 32 /* overestimate */
return E1000_REGS_LEN * sizeof(u32);
@@ -471,10 +471,10 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[22] = adapter->phy_stats.receive_errors;
regs_buff[23] = regs_buff[13]; /* mdix mode */
}
- regs_buff[21] = 0; /* was idle_errors */
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
- regs_buff[24] = (u32)phy_data; /* phy local receiver status */
- regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+ regs_buff[21] = 0; /* was idle_errors */
+ e1e_rphy(hw, MII_STAT1000, &phy_data);
+ regs_buff[24] = (u32)phy_data; /* phy local receiver status */
+ regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -761,8 +761,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
(test[pat] & write));
val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
if (val != (test[pat] & write & mask)) {
- e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
- reg + offset, val, (test[pat] & write & mask));
+ e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
+ reg + (offset << 2), val,
+ (test[pat] & write & mask));
*data = reg;
return 1;
}
@@ -777,7 +778,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
return 1;
@@ -885,12 +886,20 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
E1000_FWSM_WLOCK_MAC_SHIFT;
for (i = 0; i < mac->rar_entry_count; i++) {
- /* Cannot test write-protected SHRAL[n] registers */
- if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
- continue;
+ if (mac->type == e1000_pch_lpt) {
+ /* Cannot test write-protected SHRAL[n] registers */
+ if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
+ continue;
- REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
- mask, 0xFFFFFFFF);
+ /* SHRAH[9] different than the others */
+ if (i == 10)
+ mask |= (1 << 30);
+ else
+ mask &= ~(1 << 30);
+ }
+
+ REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
+ 0xFFFFFFFF);
}
for (i = 0; i < mac->mta_reg_count; i++)
@@ -924,7 +933,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
return *data;
}
-static irqreturn_t e1000_test_intr(int irq, void *data)
+static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
{
struct net_device *netdev = (struct net_device *) data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1274,7 +1283,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
if (hw->phy.type == e1000_phy_ife) {
/* force 100, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x6100);
+ e1e_wphy(hw, MII_BMCR, 0x6100);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
@@ -1297,9 +1306,9 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* Auto-MDI/MDIX Off */
e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
- e1e_wphy(hw, PHY_CONTROL, 0x9140);
+ e1e_wphy(hw, MII_BMCR, 0x9140);
/* autoneg off */
- e1e_wphy(hw, PHY_CONTROL, 0x8140);
+ e1e_wphy(hw, MII_BMCR, 0x8140);
break;
case e1000_phy_gg82563:
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
@@ -1311,7 +1320,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
phy_reg |= 0x006;
e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
/* Assert SW reset for above settings to take effect */
- e1000e_commit_phy(hw);
+ hw->phy.ops.commit(hw);
mdelay(1);
/* Force Full Duplex */
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
@@ -1345,7 +1354,6 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
/* Enable loopback on the PHY */
-#define I82577_PHY_LBK_CTRL 19
e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001);
break;
default:
@@ -1353,7 +1361,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
}
/* force 1000, set loopback */
- e1e_wphy(hw, PHY_CONTROL, 0x4140);
+ e1e_wphy(hw, MII_BMCR, 0x4140);
mdelay(250);
/* Now set up the MAC to the same speed/duplex as the PHY. */
@@ -1395,7 +1403,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl = er32(CTRL);
- int link = 0;
+ int link;
/* special requirements for 82571/82572 fiber adapters */
@@ -1528,11 +1536,12 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->mac.autoneg = 1;
if (hw->phy.type == e1000_phy_gg82563)
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180);
- e1e_rphy(hw, PHY_CONTROL, &phy_reg);
- if (phy_reg & MII_CR_LOOPBACK) {
- phy_reg &= ~MII_CR_LOOPBACK;
- e1e_wphy(hw, PHY_CONTROL, phy_reg);
- e1000e_commit_phy(hw);
+ e1e_rphy(hw, MII_BMCR, &phy_reg);
+ if (phy_reg & BMCR_LOOPBACK) {
+ phy_reg &= ~BMCR_LOOPBACK;
+ e1e_wphy(hw, MII_BMCR, phy_reg);
+ if (hw->phy.ops.commit)
+ hw->phy.ops.commit(hw);
}
break;
}
@@ -1694,7 +1703,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
return *data;
}
-static int e1000e_get_sset_count(struct net_device *netdev, int sset)
+static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
+ int sset)
{
switch (sset) {
case ETH_SS_TEST:
@@ -1957,7 +1967,7 @@ static int e1000_nway_reset(struct net_device *netdev)
}
static void e1000_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
+ struct ethtool_stats __always_unused *stats,
u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1986,8 +1996,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
}
}
-static void e1000_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+static void e1000_get_strings(struct net_device __always_unused *netdev,
+ u32 stringset, u8 *data)
{
u8 *p = data;
int i;
@@ -2007,7 +2017,8 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
static int e1000_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *info, u32 *rule_locs)
+ struct ethtool_rxnfc *info,
+ u32 __always_unused *rule_locs)
{
info->data = 0;
@@ -2053,6 +2064,171 @@ static int e1000_get_rxnfc(struct net_device *netdev,
}
}
+static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl;
+ u32 status, ret_val;
+
+ if (!(adapter->flags & FLAG_IS_ICH) ||
+ !(adapter->flags2 & FLAG2_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ cap_addr = I82579_EEE_CAPABILITY;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ lpa_addr = I82579_EEE_LP_ABILITY;
+ pcs_stat_addr = I82579_EEE_PCS_STATUS;
+ break;
+ case e1000_phy_i217:
+ cap_addr = I217_EEE_CAPABILITY;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ lpa_addr = I217_EEE_LP_ABILITY;
+ pcs_stat_addr = I217_EEE_PCS_STATUS;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return -EBUSY;
+
+ /* EEE Capability */
+ ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
+
+ /* EEE Advertised */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ /* EEE Link Partner Advertised */
+ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
+ if (ret_val)
+ goto release;
+ edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+
+ /* EEE PCS Status */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+ if (hw->phy.type == e1000_phy_82579)
+ phy_data <<= 8;
+
+release:
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return -ENODATA;
+
+ e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl);
+ status = er32(STATUS);
+
+ /* Result of the EEE auto negotiation - there is no register that
+ * has the status of the EEE negotiation so do a best-guess based
+ * on whether both Tx and Rx LPI indications have been received or
+ * base it on the link speed, the EEE advertised speeds on both ends
+ * and the speeds on which EEE is enabled locally.
+ */
+ if (((phy_data & E1000_EEE_TX_LPI_RCVD) &&
+ (phy_data & E1000_EEE_RX_LPI_RCVD)) ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (edata->advertised & ADVERTISED_100baseT_Full) &&
+ (edata->lp_advertised & ADVERTISED_100baseT_Full) &&
+ (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) ||
+ ((status & E1000_STATUS_SPEED_1000) &&
+ (edata->advertised & ADVERTISED_1000baseT_Full) &&
+ (edata->lp_advertised & ADVERTISED_1000baseT_Full) &&
+ (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE)))
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
+ edata->tx_lpi_enabled = true;
+ edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
+
+ return 0;
+}
+
+static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_IS_ICH) ||
+ !(adapter->flags2 & FLAG2_HAS_EEE))
+ return -EOPNOTSUPP;
+
+ ret_val = e1000e_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.advertised != edata->advertised) {
+ e_err("Setting EEE advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ e_err("Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) {
+ e_err("Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+
+ /* reset the link */
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int e1000e_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ ethtool_op_get_ts_info(netdev, info);
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return 0;
+
+ info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
@@ -2080,7 +2256,9 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_rxnfc = e1000_get_rxnfc,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = e1000e_get_ts_info,
+ .get_eee = e1000e_get_eee,
+ .set_eee = e1000e_set_eee,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b88676ff3d8..1e6b889aee8 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,332 +29,10 @@
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
-#include <linux/types.h>
-
-struct e1000_hw;
-struct e1000_adapter;
-
+#include "regs.h"
#include "defines.h"
-enum e1e_registers {
- E1000_CTRL = 0x00000, /* Device Control - RW */
- E1000_STATUS = 0x00008, /* Device Status - RO */
- E1000_EECD = 0x00010, /* EEPROM/Flash Control - RW */
- E1000_EERD = 0x00014, /* EEPROM Read - RW */
- E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
- E1000_FLA = 0x0001C, /* Flash Access - RW */
- E1000_MDIC = 0x00020, /* MDI Control - RW */
- E1000_SCTL = 0x00024, /* SerDes Control - RW */
- E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */
- E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */
- E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
- E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
- E1000_FCT = 0x00030, /* Flow Control Type - RW */
- E1000_VET = 0x00038, /* VLAN Ether Type - RW */
- E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
- E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
- E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
- E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
- E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
- E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
- E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
- E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
- E1000_IVAR = 0x000E4, /* Interrupt Vector Allocation - RW */
- E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
-#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
- E1000_RCTL = 0x00100, /* Rx Control - RW */
- E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
- E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
- E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
- E1000_TCTL = 0x00400, /* Tx Control - RW */
- E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
- E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
- E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
- E1000_LEDCTL = 0x00E00, /* LED Control - RW */
- E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
- E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
- E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
-#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
- E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
- E1000_PBS = 0x01008, /* Packet Buffer Size */
- E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
- E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
- E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
- E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
- E1000_PBA_ECC = 0x01100, /* PBA ECC Register */
- E1000_ERT = 0x02008, /* Early Rx Threshold - RW */
- E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
- E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
- E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
-/* Convenience macros
- *
- * Note: "_n" is the queue number of the register to be written to.
- *
- * Example usage:
- * E1000_RDBAL(current_rx_queue)
- */
- E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
-#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8))
- E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
-#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8))
- E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
-#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8))
- E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
-#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8))
- E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
-#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8))
- E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
- E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
-#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
- E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
-
- E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
- E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
-#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8))
- E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
-#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8))
- E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
-#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8))
- E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
-#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8))
- E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
-#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8))
- E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
- E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
-#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
- E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
- E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
-#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
- E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
- E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
- E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
- E1000_RXERRC = 0x0400C, /* Receive Error Count - R/clr */
- E1000_MPC = 0x04010, /* Missed Packet Count - R/clr */
- E1000_SCC = 0x04014, /* Single Collision Count - R/clr */
- E1000_ECOL = 0x04018, /* Excessive Collision Count - R/clr */
- E1000_MCC = 0x0401C, /* Multiple Collision Count - R/clr */
- E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
- E1000_COLC = 0x04028, /* Collision Count - R/clr */
- E1000_DC = 0x04030, /* Defer Count - R/clr */
- E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
- E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
- E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
- E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
- E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
- E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
- E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
- E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
- E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
- E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
- E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
- E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
- E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
- E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
- E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
- E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
- E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
- E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
- E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
- E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
- E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
- E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
- E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
- E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
- E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
- E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
- E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
- E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
- E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
- E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
- E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
- E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
- E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
- E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
- E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
- E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
- E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
- E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
- E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
- E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
- E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
- E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
- E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
- E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
- E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
- E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
- E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
- E1000_IAC = 0x04100, /* Interrupt Assertion Count */
- E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
- E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
- E1000_ICTXPTC = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
- E1000_ICTXATC = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
- E1000_ICTXQEC = 0x04118, /* Irq Cause Tx Queue Empty Count */
- E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
- E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
- E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
- E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
- E1000_RFCTL = 0x05008, /* Receive Filter Control */
- E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
- E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
-#define E1000_RAL(_n) (E1000_RAL_BASE + ((_n) * 8))
-#define E1000_RA (E1000_RAL(0))
- E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
-#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
- E1000_SHRAL_PCH_LPT_BASE = 0x05408,
-#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
- E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
-#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
- E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
-#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8))
- E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
-#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8))
- E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
- E1000_WUC = 0x05800, /* Wakeup Control - RW */
- E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
- E1000_WUS = 0x05810, /* Wakeup Status - RO */
- E1000_MRQC = 0x05818, /* Multiple Receive Control - RW */
- E1000_MANC = 0x05820, /* Management Control - RW */
- E1000_FFLT = 0x05F00, /* Flexible Filter Length Table - RW Array */
- E1000_HOST_IF = 0x08800, /* Host Interface */
-
- E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
- E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
- E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
-#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
- E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
- E1000_GCR = 0x05B00, /* PCI-Ex Control */
- E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
- E1000_FACTPS = 0x05B30, /* Function Active and Power State to MNG */
- E1000_SWSM = 0x05B50, /* SW Semaphore */
- E1000_FWSM = 0x05B54, /* FW Semaphore */
- E1000_SWSM2 = 0x05B58, /* Driver-only SW semaphore */
- E1000_RETA_BASE = 0x05C00, /* Redirection Table - RW */
-#define E1000_RETA(_n) (E1000_RETA_BASE + ((_n) * 4))
- E1000_RSSRK_BASE = 0x05C80, /* RSS Random Key - RW */
-#define E1000_RSSRK(_n) (E1000_RSSRK_BASE + ((_n) * 4))
- E1000_FFLT_DBG = 0x05F04, /* Debug Register */
- E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
-#define E1000_PCH_RAICC(_n) (E1000_PCH_RAICC_BASE + ((_n) * 4))
-#define E1000_CRC_OFFSET E1000_PCH_RAICC_BASE
- E1000_HICR = 0x08F00, /* Host Interface Control */
-};
-
-#define E1000_MAX_PHY_ADDR 4
-
-/* IGP01E1000 Specific Registers */
-#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
-#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
-#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
-#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
-#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
-#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
-#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
-#define IGP_PAGE_SHIFT 5
-#define PHY_REG_MASK 0x1F
-
-#define BM_WUC_PAGE 800
-#define BM_WUC_ADDRESS_OPCODE 0x11
-#define BM_WUC_DATA_OPCODE 0x12
-#define BM_WUC_ENABLE_PAGE 769
-#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
-
-#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
-#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
-#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
-
-#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
-#define IGP01E1000_PHY_POLARITY_MASK 0x0078
-
-#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
-#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
-
-#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
-
-#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
-#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
-#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
-
-#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
-
-#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
-#define IGP01E1000_PSSR_MDIX 0x0800
-#define IGP01E1000_PSSR_SPEED_MASK 0xC000
-#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
-
-#define IGP02E1000_PHY_CHANNEL_NUM 4
-#define IGP02E1000_PHY_AGC_A 0x11B1
-#define IGP02E1000_PHY_AGC_B 0x12B1
-#define IGP02E1000_PHY_AGC_C 0x14B1
-#define IGP02E1000_PHY_AGC_D 0x18B1
-
-#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */
-#define IGP02E1000_AGC_LENGTH_MASK 0x7F
-#define IGP02E1000_AGC_RANGE 15
-
-/* manage.c */
-#define E1000_VFTA_ENTRY_SHIFT 5
-#define E1000_VFTA_ENTRY_MASK 0x7F
-#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-
-#define E1000_HICR_EN 0x01 /* Enable bit - RO */
-/* Driver sets this bit when done to put command in RAM */
-#define E1000_HICR_C 0x02
-#define E1000_HICR_FW_RESET_ENABLE 0x40
-#define E1000_HICR_FW_RESET 0x80
-
-#define E1000_FWSM_MODE_MASK 0xE
-#define E1000_FWSM_MODE_SHIFT 1
-
-#define E1000_MNG_IAMT_MODE 0x3
-#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
-#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
-#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
-#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
-#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
-#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
-
-/* nvm.c */
-#define E1000_STM_OPCODE 0xDB00
-
-#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
-#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
-#define E1000_KMRNCTRLSTA_REN 0x00200000
-#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
-#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
-#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
-#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
-#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
-#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
-#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
-#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
-#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
-
-#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
-#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */
-#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
-#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
-
-/* IFE PHY Extended Status Control */
-#define IFE_PESC_POLARITY_REVERSED 0x0100
-
-/* IFE PHY Special Control */
-#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
-#define IFE_PSC_FORCE_POLARITY 0x0020
-
-/* IFE PHY Special Control and LED Control */
-#define IFE_PSCL_PROBE_MODE 0x0020
-#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
-#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
-
-/* IFE PHY MDIX Control */
-#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
-#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
-#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
-
-#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+struct e1000_hw;
#define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F
@@ -374,13 +52,11 @@ enum e1e_registers {
#define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82574L 0x10D3
#define E1000_DEV_ID_82574LA 0x10F6
-#define E1000_DEV_ID_82583V 0x150C
-
+#define E1000_DEV_ID_82583V 0x150C
#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096
#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098
#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA
#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB
-
#define E1000_DEV_ID_ICH8_82567V_3 0x1501
#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049
#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A
@@ -415,12 +91,12 @@ enum e1e_registers {
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
-#define E1000_REVISION_4 4
+#define E1000_REVISION_4 4
-#define E1000_FUNC_1 1
+#define E1000_FUNC_1 1
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
-#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
enum e1000_mac_type {
e1000_82571,
@@ -525,16 +201,6 @@ enum e1000_serdes_link_state {
e1000_serdes_link_forced_up
};
-/* Receive Descriptor */
-struct e1000_rx_desc {
- __le64 buffer_addr; /* Address of the descriptor's data buffer */
- __le16 length; /* Length of data DMAed into data buffer */
- __le16 csum; /* Packet checksum */
- u8 status; /* Descriptor status */
- u8 errors; /* Descriptor Errors */
- __le16 special;
-};
-
/* Receive Descriptor - Extended */
union e1000_rx_desc_extended {
struct {
@@ -657,7 +323,7 @@ struct e1000_data_desc {
struct {
u8 status; /* Descriptor status */
u8 popts; /* Packet Options */
- __le16 special; /* */
+ __le16 special;
} fields;
} upper;
};
@@ -753,7 +419,7 @@ struct e1000_host_command_header {
u8 checksum;
};
-#define E1000_HI_MAX_DATA_LENGTH 252
+#define E1000_HI_MAX_DATA_LENGTH 252
struct e1000_host_command_info {
struct e1000_host_command_header command_header;
u8 command_data[E1000_HI_MAX_DATA_LENGTH];
@@ -768,13 +434,18 @@ struct e1000_host_mng_command_header {
u16 command_length;
};
-#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header;
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
};
-/* Function pointers and static data for the MAC. */
+#include "mac.h"
+#include "phy.h"
+#include "nvm.h"
+#include "manage.h"
+
+/* Function pointers for the MAC. */
struct e1000_mac_operations {
s32 (*id_led_init)(struct e1000_hw *);
s32 (*blink_led)(struct e1000_hw *);
@@ -1003,4 +674,8 @@ struct e1000_hw {
} dev_spec;
};
+#include "82571.h"
+#include "80003es2lan.h"
+#include "ich8lan.h"
+
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 24d9f61956f..dff7bff8b8e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,147 +57,6 @@
#include "e1000.h"
-#define ICH_FLASH_GFPREG 0x0000
-#define ICH_FLASH_HSFSTS 0x0004
-#define ICH_FLASH_HSFCTL 0x0006
-#define ICH_FLASH_FADDR 0x0008
-#define ICH_FLASH_FDATA0 0x0010
-#define ICH_FLASH_PR0 0x0074
-
-#define ICH_FLASH_READ_COMMAND_TIMEOUT 500
-#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
-#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
-#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
-#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
-
-#define ICH_CYCLE_READ 0
-#define ICH_CYCLE_WRITE 2
-#define ICH_CYCLE_ERASE 3
-
-#define FLASH_GFPREG_BASE_MASK 0x1FFF
-#define FLASH_SECTOR_ADDR_SHIFT 12
-
-#define ICH_FLASH_SEG_SIZE_256 256
-#define ICH_FLASH_SEG_SIZE_4K 4096
-#define ICH_FLASH_SEG_SIZE_8K 8192
-#define ICH_FLASH_SEG_SIZE_64K 65536
-
-
-#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
-/* FW established a valid mode */
-#define E1000_ICH_FWSM_FW_VALID 0x00008000
-
-#define E1000_ICH_MNG_IAMT_MODE 0x2
-
-#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_OFF2 << 8) | \
- (ID_LED_DEF1_ON2 << 4) | \
- (ID_LED_DEF1_DEF2))
-
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
-
-#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
-
-#define E1000_FEXTNVM_SW_CONFIG 1
-#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
-
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
-#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
-
-#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
-#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
-
-#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
-
-#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
-#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
-
-#define PHY_PAGE_SHIFT 5
-#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
- ((reg) & MAX_PHY_REG_ADDRESS))
-#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
-#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
-
-#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
-#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
-#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
-
-#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
-
-#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
-
-/* SMBus Control Phy Register */
-#define CV_SMB_CTRL PHY_REG(769, 23)
-#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
-
-/* SMBus Address Phy Register */
-#define HV_SMB_ADDR PHY_REG(768, 26)
-#define HV_SMB_ADDR_MASK 0x007F
-#define HV_SMB_ADDR_PEC_EN 0x0200
-#define HV_SMB_ADDR_VALID 0x0080
-#define HV_SMB_ADDR_FREQ_MASK 0x1100
-#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
-#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
-
-/* PHY Power Management Control */
-#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
-
-/* PHY Low Power Idle Control */
-#define I82579_LPI_CTRL PHY_REG(772, 20)
-#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
-#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
-
-/* EMI Registers */
-#define I82579_EMI_ADDR 0x10
-#define I82579_EMI_DATA 0x11
-#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
-#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
-#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
-#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
-#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
-#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
-
-/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
-#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
-#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
-#define I217_CGFREG PHY_REG(772, 29)
-#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
-#define I217_MEMPWR PHY_REG(772, 26)
-#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
-
-/* Strapping Option Register - RO */
-#define E1000_STRAP 0x0000C
-#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
-#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
-#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
-#define E1000_STRAP_SMT_FREQ_SHIFT 12
-
-/* OEM Bits Phy Register */
-#define HV_OEM_BITS PHY_REG(768, 25)
-#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
-#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
-#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
-
-#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
-#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
-
-/* KMRN Mode Control */
-#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
-#define HV_KMRN_MDIO_SLOW 0x0400
-
-/* KMRN FIFO Control and Status */
-#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
-#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
-
/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
@@ -252,7 +111,6 @@ union ich8_flash_protected_range {
u32 regval;
};
-static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
@@ -264,9 +122,7 @@ static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
u16 *data);
static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 *data);
-static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
-static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
@@ -278,7 +134,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
-static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
@@ -330,12 +186,12 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
u16 retry_count;
for (retry_count = 0; retry_count < 2; retry_count++) {
- ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF))
continue;
phy_id = (u32)(phy_reg << 16);
- ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+ ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
if (ret_val || (phy_reg == 0xFFFF)) {
phy_id = 0;
continue;
@@ -378,10 +234,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
s32 ret_val;
u16 phy_reg;
+ /* Gate automatic PHY configuration by hardware on managed and
+ * non-managed 82579 and newer adapters.
+ */
+ e1000_gate_hw_phy_config_ich8lan(hw, true);
+
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
e_dbg("Failed to initialize PHY flow\n");
- return ret_val;
+ goto out;
}
/* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
@@ -402,13 +263,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
- /* Gate automatic PHY configuration by hardware on
- * non-managed 82579
- */
- if ((hw->mac.type == e1000_pch2lan) &&
- !(fwsm & E1000_ICH_FWSM_FW_VALID))
- e1000_gate_hw_phy_config_ich8lan(hw, true);
-
if (e1000_phy_is_accessible_pchlan(hw)) {
if (hw->mac.type == e1000_pch_lpt) {
/* Unforce SMBus mode in PHY */
@@ -443,6 +297,15 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
ew32(FEXTNVM3, mac_reg);
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * So ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
+
/* Toggle LANPHYPC Value bit */
mac_reg = er32(CTRL);
mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -476,6 +339,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
ret_val = e1000e_phy_hw_reset_generic(hw);
+out:
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
@@ -495,7 +359,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
+ s32 ret_val;
phy->addr = 1;
phy->reset_delay_us = 100;
@@ -778,68 +642,143 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /* Gate automatic PHY configuration by hardware on managed
- * 82579 and i217
- */
- if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
- (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
- e1000_gate_hw_phy_config_ich8lan(hw, true);
-
return 0;
}
/**
+ * __e1000_access_emi_reg_locked - Read/write EMI register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: pointer to value to read/write from/to the EMI address
+ * @read: boolean flag to indicate read or write
+ *
+ * This helper function assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
+ u16 *data, bool read)
+{
+ s32 ret_val;
+
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
+ if (ret_val)
+ return ret_val;
+
+ if (read)
+ ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
+ else
+ ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
+
+ return ret_val;
+}
+
+/**
+ * e1000_read_emi_reg_locked - Read Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be read from the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, data, true);
+}
+
+/**
+ * e1000_write_emi_reg_locked - Write Extended Management Interface register
+ * @hw: pointer to the HW structure
+ * @addr: EMI address to program
+ * @data: value to be written to the EMI address
+ *
+ * Assumes the SW/FW/HW Semaphore is already acquired.
+ **/
+static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+{
+ return __e1000_access_emi_reg_locked(hw, addr, &data, false);
+}
+
+/**
* e1000_set_eee_pchlan - Enable/disable EEE support
* @hw: pointer to the HW structure
*
- * Enable/disable EEE based on setting in dev_spec structure. The bits in
- * the LPI Control register will remain set only if/when link is up.
+ * Enable/disable EEE based on setting in dev_spec structure, the duplex of
+ * the link and the EEE capabilities of the link partner. The LPI Control
+ * register bits will remain set only if/when link is up.
**/
static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
- s32 ret_val = 0;
- u16 phy_reg;
+ s32 ret_val;
+ u16 lpi_ctrl;
if ((hw->phy.type != e1000_phy_82579) &&
(hw->phy.type != e1000_phy_i217))
return 0;
- ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+ ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- if (dev_spec->eee_disable)
- phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
- else
- phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
-
- ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+ ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
if (ret_val)
- return ret_val;
+ goto release;
+
+ /* Clear bits that enable EEE in various speeds */
+ lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
+
+ /* Enable EEE if not disabled by user */
+ if (!dev_spec->eee_disable) {
+ u16 lpa, pcs_status, data;
- if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
/* Save off link partner's EEE ability */
- ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
- return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I217_EEE_LP_ABILITY);
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto release;
+ }
+ ret_val = e1000_read_emi_reg_locked(hw, lpa,
+ &dev_spec->eee_lp_ability);
if (ret_val)
goto release;
- e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
- /* EEE is not supported in 100Half, so ignore partner's EEE
- * in 100 ability if full-duplex is not advertised.
+ /* Enable EEE only for speeds in which the link partner is
+ * EEE capable.
*/
- e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
- if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
- dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
-release:
- hw->phy.ops.release(hw);
+ if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+ if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ e1e_rphy_locked(hw, MII_LPA, &data);
+ if (data & LPA_100FULL)
+ lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+ else
+ /* EEE is not supported in 100Half, so ignore
+ * partner's EEE in 100 ability if full-duplex
+ * is not advertised.
+ */
+ dev_spec->eee_lp_ability &=
+ ~I82579_EEE_100_SUPPORTED;
+ }
+
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
}
- return 0;
+ ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
+release:
+ hw->phy.ops.release(hw);
+
+ return ret_val;
}
/**
@@ -1017,7 +956,7 @@ static DEFINE_MUTEX(nvm_mutex);
*
* Acquires the mutex for performing NVM operations.
**/
-static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_lock(&nvm_mutex);
@@ -1030,7 +969,7 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
*
* Releases the mutex used while performing NVM operations.
**/
-static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
{
mutex_unlock(&nvm_mutex);
}
@@ -1322,7 +1261,7 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
u32 strap = er32(STRAP);
u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
E1000_STRAP_SMT_FREQ_SHIFT;
- s32 ret_val = 0;
+ s32 ret_val;
strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -1558,7 +1497,7 @@ release:
**/
s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
{
- s32 ret_val = 0;
+ s32 ret_val;
u32 ctrl_reg = 0;
u32 ctrl_ext = 0;
u32 reg = 0;
@@ -1727,7 +1666,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
*/
if (hw->phy.revision < 2) {
e1000e_phy_sw_reset(hw);
- ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
+ ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
}
}
@@ -1757,6 +1696,11 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
+ if (ret_val)
+ goto release;
+
+ /* set MSE higher to enable link to stay up when noise is high */
+ ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
release:
hw->phy.ops.release(hw);
@@ -1983,22 +1927,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
/* Set MDIO slow mode before any other MDIO access */
ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (ret_val)
+ return ret_val;
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
- if (ret_val)
- goto release;
/* set MSE higher to enable link to stay up when noise is high */
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
- if (ret_val)
- goto release;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
if (ret_val)
goto release;
/* drop link after 5 times MSE threshold was reached */
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
+ ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
release:
hw->phy.ops.release(hw);
@@ -2172,10 +2112,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I82579_LPI_UPDATE_TIMER);
- if (!ret_val)
- ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
+ ret_val = e1000_write_emi_reg_locked(hw,
+ I82579_LPI_UPDATE_TIMER,
+ 0x1387);
hw->phy.ops.release(hw);
}
@@ -2219,7 +2158,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
{
- s32 ret_val = 0;
+ s32 ret_val;
u16 oem_reg;
ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
@@ -2277,6 +2216,8 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+ if (ret_val)
+ return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
@@ -2949,19 +2890,32 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
{
s32 ret_val;
u16 data;
+ u16 word;
+ u16 valid_csum_mask;
- /* Read 0x19 and check bit 6. If this bit is 0, the checksum
- * needs to be fixed. This bit is an indication that the NVM
- * was prepared by OEM software and did not calculate the
- * checksum...a likely scenario.
+ /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
+ * the checksum needs to be fixed. This bit is an indication that
+ * the NVM was prepared by OEM software and did not calculate
+ * the checksum...a likely scenario.
*/
- ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+ switch (hw->mac.type) {
+ case e1000_pch_lpt:
+ word = NVM_COMPAT;
+ valid_csum_mask = NVM_COMPAT_VALID_CSUM;
+ break;
+ default:
+ word = NVM_FUTURE_INIT_WORD1;
+ valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
+ break;
+ }
+
+ ret_val = e1000_read_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
- if (!(data & 0x40)) {
- data |= 0x40;
- ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+ if (!(data & valid_csum_mask)) {
+ data |= valid_csum_mask;
+ ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
@@ -3975,8 +3929,7 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
if (ret_val)
return;
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
- ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- reg_data);
+ e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
}
/**
@@ -4011,19 +3964,20 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if (!dev_spec->eee_disable) {
u16 eee_advert;
- ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
- I217_EEE_ADVERTISEMENT);
+ ret_val =
+ e1000_read_emi_reg_locked(hw,
+ I217_EEE_ADVERTISEMENT,
+ &eee_advert);
if (ret_val)
goto release;
- e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
/* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
* link.
*/
- if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+ if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
(dev_spec->eee_lp_ability &
- I217_EEE_100_SUPPORTED) &&
+ I82579_EEE_100_SUPPORTED) &&
(hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
E1000_PHY_CTRL_NOND0A_LPLU);
@@ -4037,7 +3991,6 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* The SMBus release must also be disabled on LCD reset.
*/
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
-
/* Enable proxy to reset only on power good. */
e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
@@ -4298,7 +4251,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
u32 bank = 0;
u32 status;
- e1000e_get_cfg_done(hw);
+ e1000e_get_cfg_done_generic(hw);
/* Wait for indication from h/w that it has completed basic config */
if (hw->mac.type >= e1000_ich10lan) {
@@ -4427,7 +4380,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
.reset_hw = e1000_reset_hw_ich8lan,
.init_hw = e1000_init_hw_ich8lan,
.setup_link = e1000_setup_link_ich8lan,
- .setup_physical_interface= e1000_setup_copper_link_ich8lan,
+ .setup_physical_interface = e1000_setup_copper_link_ich8lan,
/* id_led_init dependent on mac type */
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
@@ -4449,7 +4402,7 @@ static const struct e1000_phy_operations ich8_phy_ops = {
static const struct e1000_nvm_operations ich8_nvm_ops = {
.acquire = e1000_acquire_nvm_ich8lan,
- .read = e1000_read_nvm_ich8lan,
+ .read = e1000_read_nvm_ich8lan,
.release = e1000_release_nvm_ich8lan,
.reload = e1000e_reload_nvm_generic,
.update = e1000_update_nvm_checksum_ich8lan,
@@ -4531,6 +4484,7 @@ const struct e1000_info e1000_pch2_info = {
.mac = e1000_pch2lan,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4539,7 +4493,7 @@ const struct e1000_info e1000_pch2_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = DEFAULT_JUMBO,
+ .max_hw_frame_size = 9018,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
@@ -4550,6 +4504,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.mac = e1000_pch_lpt,
.flags = FLAG_IS_ICH
| FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_AMT
| FLAG_HAS_FLASH
@@ -4558,7 +4513,7 @@ const struct e1000_info e1000_pch_lpt_info = {
.flags2 = FLAG2_HAS_PHY_STATS
| FLAG2_HAS_EEE,
.pba = 26,
- .max_hw_frame_size = DEFAULT_JUMBO,
+ .max_hw_frame_size = 9018,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
new file mode 100644
index 00000000000..b6d3174d7d2
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -0,0 +1,268 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_ICH8LAN_H_
+#define _E1000E_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG 0x0000
+#define ICH_FLASH_HSFSTS 0x0004
+#define ICH_FLASH_HSFCTL 0x0006
+#define ICH_FLASH_FADDR 0x0008
+#define ICH_FLASH_FDATA0 0x0010
+#define ICH_FLASH_PR0 0x0074
+
+/* Requires up to 10 seconds when MNG might be accessing part. */
+#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000
+#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT 10
+
+#define ICH_CYCLE_READ 0
+#define ICH_CYCLE_WRITE 2
+#define ICH_CYCLE_ERASE 3
+
+#define FLASH_GFPREG_BASE_MASK 0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT 12
+
+#define ICH_FLASH_SEG_SIZE_256 256
+#define ICH_FLASH_SEG_SIZE_4K 4096
+#define ICH_FLASH_SEG_SIZE_8K 8192
+#define ICH_FLASH_SEG_SIZE_64K 65536
+
+#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID 0x00008000
+#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000
+
+#define E1000_ICH_MNG_IAMT_MODE 0x2
+
+#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT 7
+
+/* Shared Receive Address Registers */
+#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
+#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
+
+#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
+ (ID_LED_OFF1_OFF2 << 8) | \
+ (ID_LED_OFF1_ON2 << 4) | \
+ (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD 0x13
+#define E1000_ICH_NVM_SIG_MASK 0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
+#define E1000_ICH_NVM_SIG_VALUE 0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
+
+#define E1000_FEXTNVM_SW_CONFIG 1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
+
+#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES 7
+#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+ ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */
+#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */
+
+#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
+#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
+#define HV_STATS_PAGE 778
+/* Half-duplex collision counts */
+#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */
+#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */
+#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */
+#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */
+#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */
+#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK 0x007F
+#define HV_SMB_ADDR_PEC_EN 0x0200
+#define HV_SMB_ADDR_VALID 0x0080
+#define HV_SMB_ADDR_FREQ_MASK 0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP 0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT 12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW 0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+
+#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL PHY_REG(772, 20)
+#define I82579_LPI_CTRL_100_ENABLE 0x2000
+#define I82579_LPI_CTRL_1000_ENABLE 0x4000
+#define I82579_LPI_CTRL_ENABLE_MASK 0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
+
+/* Extended Management Interface (EMI) Registers */
+#define I82579_EMI_ADDR 0x10
+#define I82579_EMI_DATA 0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
+#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
+#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
+#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
+#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
+#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
+#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
+#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
+#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
+#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
+#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
+#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
+
+#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
+#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
+#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
+#define I217_CGFREG PHY_REG(772, 29)
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
+#define I217_MEMPWR PHY_REG(772, 26)
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
+
+/* Receive Address Initial CRC Calculation */
+#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4))
+
+/* Latency Tolerance Reporting */
+#define E1000_LTRV 0x000F8
+#define E1000_LTRV_SCALE_MAX 5
+#define E1000_LTRV_SCALE_FACTOR 5
+#define E1000_LTRV_REQ_SHIFT 15
+#define E1000_LTRV_NOSNOOP_SHIFT 16
+#define E1000_LTRV_SEND (1 << 30)
+
+/* Proprietary Latency Tolerance Reporting PCI Capability */
+#define E1000_PCI_LTR_CAP_LPT 0xA8
+
+/* OBFF Control & Threshold Defines */
+#define E1000_SVCR_OFF_EN 0x00000001
+#define E1000_SVCR_OFF_MASKINT 0x00001000
+#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
+#define E1000_SVCR_OFF_TIMER_SHIFT 16
+#define E1000_SVT_OFF_HWM_MASK 0x0000001F
+
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+ bool state);
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 54d9dafaf12..b78e0217460 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -165,7 +165,7 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
{
u32 i;
- s32 ret_val = 0;
+ s32 ret_val;
u16 offset, nvm_alt_mac_addr_offset, nvm_data;
u8 alt_mac_addr[ETH_ALEN];
@@ -1021,6 +1021,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = 0;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
@@ -1052,14 +1053,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
- ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+ ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
if (ret_val)
return ret_val;
- if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+ if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) {
e_dbg("Copper PHY and Auto Neg has not completed.\n");
return ret_val;
}
@@ -1070,11 +1071,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* Page Ability Register (Address 5) to determine how
* flow control was negotiated.
*/
- ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg);
if (ret_val)
return ret_val;
- ret_val =
- e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+ ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg);
if (ret_val)
return ret_val;
@@ -1111,8 +1111,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* 1 | DC | 1 | DC | E1000_fc_full
*
*/
- if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+ if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) {
/* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
@@ -1134,10 +1134,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*-------|---------|-------|---------|--------------------
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
*/
- else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
hw->fc.current_mode = e1000_fc_tx_pause;
e_dbg("Flow Control = Tx PAUSE frames only.\n");
}
@@ -1148,10 +1148,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*-------|---------|-------|---------|--------------------
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*/
- else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
- (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
- !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
- (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+ else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
+ (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
+ !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
+ (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = Rx PAUSE frames only.\n");
} else {
@@ -1185,6 +1185,130 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
}
}
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
+ mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = er32(PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ e_dbg("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = er32(PCS_ANADV);
+ pcs_lp_ability_reg = er32(PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ e_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ e_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ e_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ e_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = er32(PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ ew32(PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = e1000e_force_mac_fc(hw);
+ if (ret_val) {
+ e_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
+
return 0;
}
@@ -1231,8 +1355,8 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
* Sets the speed and duplex to gigabit full duplex (the only possible option)
* for fiber/serdes links.
**/
-s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed,
- u16 *duplex)
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused
+ *hw, u16 *speed, u16 *duplex)
{
*speed = SPEED_1000;
*duplex = FULL_DUPLEX;
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
new file mode 100644
index 00000000000..a61fee404eb
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MAC_H_
+#define _E1000E_MAC_H_
+
+s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex);
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw,
+ u16 *speed, u16 *duplex);
+s32 e1000e_id_led_init_generic(struct e1000_hw *hw);
+s32 e1000e_led_on_generic(struct e1000_hw *hw);
+s32 e1000e_led_off_generic(struct e1000_hw *hw);
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+ u8 *mc_addr_list, u32 mc_addr_count);
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+s32 e1000e_setup_link_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw);
+
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000e_reset_adaptive(struct e1000_hw *hw);
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+void e1000e_update_adaptive(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 6dc47beb3ad..e4b0f1ef92f 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,19 +28,6 @@
#include "e1000.h"
-enum e1000_mng_mode {
- e1000_mng_mode_none = 0,
- e1000_mng_mode_asf,
- e1000_mng_mode_pt,
- e1000_mng_mode_ipmi,
- e1000_mng_mode_host_if_only
-};
-
-#define E1000_FACTPS_MNGCG 0x20000000
-
-/* Intel(R) Active Management Technology signature */
-#define E1000_IAMT_SIGNATURE 0x544D4149
-
/**
* e1000_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
new file mode 100644
index 00000000000..326897c29ea
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_MANAGE_H_
+#define _E1000E_MANAGE_H_
+
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+ e1000_mng_mode_none = 0,
+ e1000_mng_mode_asf,
+ e1000_mng_mode_pt,
+ e1000_mng_mode_ipmi,
+ e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG 0x20000000
+
+#define E1000_FWSM_MODE_MASK 0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE 0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2
+
+#define E1000_VFTA_ENTRY_SHIFT 5
+#define E1000_VFTA_ENTRY_MASK 0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
+
+#define E1000_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C 0x02
+#define E1000_HICR_SV 0x04 /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET 0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE 0x544D4149
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 643c883dd79..a177b8b65c4 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -42,7 +42,6 @@
#include <linux/slab.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/cpu.h>
@@ -56,7 +55,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
+#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -87,20 +86,7 @@ struct e1000_reg_info {
char *name;
};
-#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
-#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
-#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
-#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
-#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
-
-#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
-#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
-#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
-#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
-#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
-
static const struct e1000_reg_info e1000_reg_info_tbl[] = {
-
/* General Registers */
{E1000_CTRL, "CTRL"},
{E1000_STATUS, "STATUS"},
@@ -488,20 +474,87 @@ static int e1000_desc_unused(struct e1000_ring *ring)
}
/**
+ * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
+ * @adapter: board private structure
+ * @hwtstamps: time stamp structure to update
+ * @systim: unsigned 64bit system time value.
+ *
+ * Convert the system time value stored in the RX/TXSTMP registers into a
+ * hwtstamp which can be used by the upper level time stamping functions.
+ *
+ * The 'systim_lock' spinlock is used to protect the consistency of the
+ * system time value. This is needed because reading the 64 bit time
+ * value involves reading two 32 bit registers. The first read latches the
+ * value.
+ **/
+static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
+{
+ u64 ns;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_cyc2time(&adapter->tc, systim);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
+ * @adapter: board private structure
+ * @status: descriptor extended error and status field
+ * @skb: particular skb to include time stamp
+ *
+ * If the time stamp is valid, convert it into the timecounter ns value
+ * and store that result into the shhwtstamps structure which is passed
+ * up the network stack.
+ **/
+static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
+ struct sk_buff *skb)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u64 rxstmp;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
+ !(status & E1000_RXDEXT_STATERR_TST) ||
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
+
+ /* The Rx time stamp registers contain the time stamp. No other
+ * received packet will be time stamped until the Rx time stamp
+ * registers are read. Because only one packet can be time stamped
+ * at a time, the register values must belong to this packet and
+ * therefore none of the other additional attributes need to be
+ * compared.
+ */
+ rxstmp = (u64)er32(RXSTMPL);
+ rxstmp |= (u64)er32(RXSTMPH) << 32;
+ e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
+
+ adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
+}
+
+/**
* e1000_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
- * @status: descriptor status field as written by hardware
+ * @staterr: descriptor extended error and status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
**/
static void e1000_receive_skb(struct e1000_adapter *adapter,
struct net_device *netdev, struct sk_buff *skb,
- u8 status, __le16 vlan)
+ u32 staterr, __le16 vlan)
{
u16 tag = le16_to_cpu(vlan);
+
+ e1000e_rx_hwtstamp(adapter, staterr, skb);
+
skb->protocol = eth_type_trans(skb, netdev);
- if (status & E1000_RXD_STAT_VP)
+ if (staterr & E1000_RXD_STAT_VP)
__vlan_hwaccel_put_tag(skb, tag);
napi_gro_receive(&adapter->napi, skb);
@@ -765,7 +818,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
struct e1000_buffer *buffer_info;
struct sk_buff *skb;
unsigned int i;
- unsigned int bufsz = 256 - 16 /* for skb_reserve */;
+ unsigned int bufsz = 256 - 16; /* for skb_reserve */
i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i];
@@ -1050,9 +1103,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
adapter->tx_hang_recheck = false;
netif_stop_queue(netdev);
- e1e_rphy(hw, PHY_STATUS, &phy_status);
- e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
- e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
+ e1e_rphy(hw, MII_BMSR, &phy_status);
+ e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
+ e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
@@ -1092,6 +1145,41 @@ static void e1000_print_hw_hang(struct work_struct *work)
}
/**
+ * e1000e_tx_hwtstamp_work - check for Tx time stamp
+ * @work: pointer to work struct
+ *
+ * This work function polls the TSYNCTXCTL valid bit to determine when a
+ * timestamp has been taken for the current stored skb. The timestamp must
+ * be for this skb because only one such packet is allowed in the queue.
+ */
+static void e1000e_tx_hwtstamp_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ tx_hwtstamp_work);
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (!adapter->tx_hwtstamp_skb)
+ return;
+
+ if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ u64 txstmp;
+
+ txstmp = er32(TXSTMPL);
+ txstmp |= (u64)er32(TXSTMPH) << 32;
+
+ e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
+
+ skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ } else {
+ /* reschedule to check later */
+ schedule_work(&adapter->tx_hwtstamp_work);
+ }
+}
+
+/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: Tx descriptor ring
*
@@ -1345,8 +1433,8 @@ copydone:
cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
- e1000_receive_skb(adapter, netdev, skb,
- staterr, rx_desc->wb.middle.vlan);
+ e1000_receive_skb(adapter, netdev, skb, staterr,
+ rx_desc->wb.middle.vlan);
next_desc:
rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
@@ -1645,7 +1733,7 @@ static void e1000e_downshift_workaround(struct work_struct *work)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr_msi(int irq, void *data)
+static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1671,7 +1759,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
/* disable receives */
u32 rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
+ adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -1711,7 +1799,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr(int irq, void *data)
+static irqreturn_t e1000_intr(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1751,7 +1839,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
/* disable receives */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- adapter->flags |= FLAG_RX_RESTART_NOW;
+ adapter->flags |= FLAG_RESTART_NOW;
}
/* guard against interrupt when we're going down */
if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -1786,7 +1874,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t e1000_msix_other(int irq, void *data)
+static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1818,8 +1906,7 @@ no_link_interrupt:
return IRQ_HANDLED;
}
-
-static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
+static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1837,7 +1924,7 @@ static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
+static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1924,7 +2011,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
/* Auto-Mask Other interrupts upon ICR read */
-#define E1000_EIAC_MASK_82574 0x01F00000
ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
ctrl_ext |= E1000_CTRL_EXT_EIAME;
ew32(CTRL_EXT, ctrl_ext);
@@ -2394,9 +2480,7 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
* while increasing bulk throughput. This functionality is controlled
* by the InterruptThrottleRate module parameter.
**/
-static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
- u16 itr_setting, int packets,
- int bytes)
+static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
{
unsigned int retval = itr_setting;
@@ -2441,7 +2525,6 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
static void e1000_set_itr(struct e1000_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
u16 current_itr;
u32 new_itr = adapter->itr;
@@ -2457,18 +2540,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2504,10 +2585,7 @@ set_itr_now:
if (adapter->msix_entries)
adapter->rx_ring->set_itr = 1;
else
- if (new_itr)
- ew32(ITR, 1000000000 / (new_itr * 256));
- else
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, new_itr);
}
}
@@ -3049,7 +3127,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
/* just started the receive unit, no need to restart */
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ adapter->flags &= ~FLAG_RESTART_NOW;
}
/**
@@ -3144,18 +3222,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rxcsum &= ~E1000_RXCSUM_TUOFL;
ew32(RXCSUM, rxcsum);
- if (adapter->hw.mac.type == e1000_pch2lan) {
- /* With jumbo frames, excessive C-state transition
- * latencies result in dropped transactions.
- */
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ /* With jumbo frames, excessive C-state transition latencies result
+ * in dropped transactions.
+ */
+ if (adapter->netdev->mtu > ETH_DATA_LEN) {
+ u32 lat =
+ ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
+ adapter->max_frame_size) * 8 / 1000;
+
+ if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
ew32(RXDCTL(0), rxdctl | 0x3);
- pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
- } else {
- pm_qos_update_request(&adapter->netdev->pm_qos_req,
- PM_QOS_DEFAULT_VALUE);
}
+
+ pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
+ } else {
+ pm_qos_update_request(&adapter->netdev->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
}
/* Enable Receives */
@@ -3344,6 +3427,241 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
}
/**
+ * e1000e_get_base_timinca - get default SYSTIM time increment attributes
+ * @adapter: board private structure
+ * @timinca: pointer to returned time increment attributes
+ *
+ * Get attributes for incrementing the System Time Register SYSTIML/H at
+ * the default base frequency, and set the cyclecounter shift value.
+ **/
+s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 incvalue, incperiod, shift;
+
+ /* Make sure clock is enabled on I217 before checking the frequency */
+ if ((hw->mac.type == e1000_pch_lpt) &&
+ !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
+ !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
+ u32 fextnvm7 = er32(FEXTNVM7);
+
+ if (!(fextnvm7 & (1 << 0))) {
+ ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ e1e_flush();
+ }
+ }
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ /* On I217, the clock frequency is 25MHz or 96MHz as
+ * indicated by the System Clock Frequency Indication
+ */
+ if ((hw->mac.type != e1000_pch_lpt) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ /* Stable 96MHz frequency */
+ incperiod = INCPERIOD_96MHz;
+ incvalue = INCVALUE_96MHz;
+ shift = INCVALUE_SHIFT_96MHz;
+ adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
+ ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
+
+ return 0;
+}
+
+/**
+ * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
+ * @adapter: board private structure
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_config_hwtstamp(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct hwtstamp_config *config = &adapter->hwtstamp_config;
+ u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
+ u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
+ u32 rxmtrl = 0;
+ u16 rxudp = 0;
+ bool is_l4 = false;
+ bool is_l2 = false;
+ u32 regval;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return -EINVAL;
+
+ /* flags reserved for future extensions - must be zero */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tsync_tx_ctl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tsync_rx_ctl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
+ rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ /* Also time stamps V2 L2 Path Delay Request/Response */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ /* Also time stamps V2 L2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* Hardware cannot filter just V2 L4 Sync messages;
+ * fall-through to V2 (both L2 and L4) Sync.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* Hardware cannot filter just V2 L4 Delay Request messages;
+ * fall-through to V2 (both L2 and L4) Delay Request.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Also time stamps V2 Path Delay Request/Response. */
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
+ rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ /* Hardware cannot filter just V2 L4 or L2 Event messages;
+ * fall-through to all V2 (both L2 and L4) Events.
+ */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ is_l2 = true;
+ is_l4 = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* For V1, the hardware can only filter Sync messages or
+ * Delay Request messages but not both so fall-through to
+ * time stamp all packets.
+ */
+ case HWTSTAMP_FILTER_ALL:
+ is_l2 = true;
+ is_l4 = true;
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* enable/disable Tx h/w time stamping */
+ regval = er32(TSYNCTXCTL);
+ regval &= ~E1000_TSYNCTXCTL_ENABLED;
+ regval |= tsync_tx_ctl;
+ ew32(TSYNCTXCTL, regval);
+ if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
+ (regval & E1000_TSYNCTXCTL_ENABLED)) {
+ e_err("Timesync Tx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* enable/disable Rx h/w time stamping */
+ regval = er32(TSYNCRXCTL);
+ regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
+ regval |= tsync_rx_ctl;
+ ew32(TSYNCRXCTL, regval);
+ if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK)) !=
+ (regval & (E1000_TSYNCRXCTL_ENABLED |
+ E1000_TSYNCRXCTL_TYPE_MASK))) {
+ e_err("Timesync Rx Control register not set as expected\n");
+ return -EAGAIN;
+ }
+
+ /* L2: define ethertype filter for time stamped packets */
+ if (is_l2)
+ rxmtrl |= ETH_P_1588;
+
+ /* define which PTP packets get time stamped */
+ ew32(RXMTRL, rxmtrl);
+
+ /* Filter by destination port */
+ if (is_l4) {
+ rxudp = PTP_EV_PORT;
+ cpu_to_be16s(&rxudp);
+ }
+ ew32(RXUDP, rxudp);
+
+ e1e_flush();
+
+ /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
+ er32(RXSTMPH);
+ er32(TXSTMPH);
+
+ /* Get and set the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &regval);
+ if (ret_val)
+ return ret_val;
+ ew32(TIMINCA, regval);
+
+ /* reset the ns time counter */
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+/**
* e1000_configure - configure the hardware for Rx and Tx
* @adapter: private board structure
**/
@@ -3509,14 +3827,17 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
case e1000_pch2lan:
case e1000_pch_lpt:
- fc->high_water = 0x05C20;
- fc->low_water = 0x05048;
- fc->pause_time = 0x0650;
fc->refresh_time = 0x0400;
- if (adapter->netdev->mtu > ETH_DATA_LEN) {
- pba = 14;
- ew32(PBA, pba);
+
+ if (adapter->netdev->mtu <= ETH_DATA_LEN) {
+ fc->high_water = 0x05C20;
+ fc->low_water = 0x05048;
+ fc->pause_time = 0x0650;
+ break;
}
+
+ fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
+ fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
break;
}
@@ -3569,6 +3890,9 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
+ /* initialize systim and reset the ns time counter */
+ e1000e_config_hwtstamp(adapter);
+
if (!netif_running(adapter->netdev) &&
!test_bit(__E1000_TESTING, &adapter->state)) {
e1000_power_down_phy(adapter);
@@ -3705,6 +4029,24 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
}
/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+ struct e1000_hw *hw = &adapter->hw;
+ cycle_t systim;
+
+ /* latch SYSTIMH on read of SYSTIML */
+ systim = (cycle_t)er32(SYSTIML);
+ systim |= (cycle_t)er32(SYSTIMH) << 32;
+
+ return systim;
+}
+
+/**
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
* @adapter: board private structure to initialize
*
@@ -3730,6 +4072,17 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
if (e1000_alloc_queues(adapter))
return -ENOMEM;
+ /* Setup hardware time stamping cyclecounter */
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ adapter->cc.read = e1000e_cyclecounter_read;
+ adapter->cc.mask = CLOCKSOURCE_MASK(64);
+ adapter->cc.mult = 1;
+ /* cc.shift set in e1000e_get_base_tininca() */
+
+ spin_lock_init(&adapter->systim_lock);
+ INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
+ }
+
/* Explicitly disable IRQ since the NIC can be in any state. */
e1000_irq_disable(adapter);
@@ -3742,7 +4095,7 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3913,10 +4266,8 @@ static int e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
- if (adapter->hw.mac.type == e1000_pch2lan)
- pm_qos_add_request(&adapter->netdev->pm_qos_req,
- PM_QOS_CPU_DMA_LATENCY,
- PM_QOS_DEFAULT_VALUE);
+ pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -4024,8 +4375,7 @@ static int e1000_close(struct net_device *netdev)
!test_bit(__E1000_TESTING, &adapter->state))
e1000e_release_hw_control(adapter);
- if (adapter->hw.mac.type == e1000_pch2lan)
- pm_qos_remove_request(&adapter->netdev->pm_qos_req);
+ pm_qos_remove_request(&adapter->netdev->pm_qos_req);
pm_runtime_put_sync(&pdev->dev);
@@ -4312,14 +4662,14 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
- ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
- ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
- ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
- ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
- ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
- ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
+ ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
+ ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
+ ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
+ ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
+ ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
+ ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
+ ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
} else {
@@ -4346,9 +4696,8 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
+ pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name, adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
(ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
(ctrl & E1000_CTRL_RFCE) ? "Rx" :
@@ -4401,11 +4750,11 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
{
/* make sure the receive unit is started */
if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW)) {
+ (adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
ew32(RCTL, rctl | E1000_RCTL_EN);
- adapter->flags &= ~FLAG_RX_RESTART_NOW;
+ adapter->flags &= ~FLAG_RESTART_NOW;
}
}
@@ -4481,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
+
+ /* check if SmartSpeed worked */
+ e1000e_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev,
+ "Link Speed was downgraded by SmartSpeed\n");
+
/* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/
@@ -4492,9 +4848,9 @@ static void e1000_watchdog_task(struct work_struct *work)
(adapter->link_duplex == HALF_DUPLEX)) {
u16 autoneg_exp;
- e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+ e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
- if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+ if (!(autoneg_exp & EXPANSION_NWAY))
e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
}
@@ -4567,15 +4923,22 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->link_speed = 0;
adapter->link_duplex = 0;
/* Link status message must follow this format */
- printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
- adapter->netdev->name);
+ pr_info("%s NIC Link is Down\n", adapter->netdev->name);
netif_carrier_off(netdev);
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- if (adapter->flags & FLAG_RX_NEEDS_RESTART)
- schedule_work(&adapter->reset_task);
+ /* The link is lost so the controller stops DMA.
+ * If there is queued Tx work that cannot be done
+ * or if on an 8000ES2LAN which requires a Rx packet
+ * buffer work-around on link down event, reset the
+ * controller to flush the Tx/Rx packet buffers.
+ * (Do the reset outside of interrupt context).
+ */
+ if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
+ (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
+ adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
LINK_TIMEOUT);
@@ -4597,20 +4960,14 @@ link_up:
adapter->gotc_old = adapter->stats.gotc;
spin_unlock(&adapter->stats64_lock);
- e1000e_update_adaptive(&adapter->hw);
-
- if (!netif_carrier_ok(netdev) &&
- (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
+ if (adapter->flags & FLAG_RESTART_NOW) {
schedule_work(&adapter->reset_task);
/* return immediately since reset is imminent */
return;
}
+ e1000e_update_adaptive(&adapter->hw);
+
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (adapter->itr_setting == 4) {
/* Symmetric Tx/Rx gets a reduced ITR=2000;
@@ -4647,6 +5004,17 @@ link_up:
if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
e1000e_check_82574_phy_workaround(adapter);
+ /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
+ if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
+ er32(RXSTMPH);
+ adapter->rx_hwtstamp_cleared++;
+ } else {
+ adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
+ }
+ }
+
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -4658,6 +5026,7 @@ link_up:
#define E1000_TX_FLAGS_TSO 0x00000004
#define E1000_TX_FLAGS_IPV4 0x00000008
#define E1000_TX_FLAGS_NO_FCS 0x00000010
+#define E1000_TX_FLAGS_HWTSTAMP 0x00000020
#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
#define E1000_TX_FLAGS_VLAN_SHIFT 16
@@ -4916,6 +5285,11 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
txd_lower &= ~(E1000_TXD_CMD_IFCS);
+ if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
+ txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+ txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
+ }
+
i = tx_ring->next_to_use;
do {
@@ -4964,12 +5338,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
struct e1000_hw *hw = &adapter->hw;
u16 length, offset;
- if (vlan_tx_tag_present(skb)) {
- if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
- (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
- return 0;
- }
+ if (vlan_tx_tag_present(skb) &&
+ !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
+ (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+ return 0;
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
return 0;
@@ -5140,7 +5513,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
nr_frags);
if (count) {
- skb_tx_timestamp(skb);
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !adapter->tx_hwtstamp_skb)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
+ adapter->tx_hwtstamp_skb = skb_get(skb);
+ schedule_work(&adapter->tx_hwtstamp_work);
+ } else {
+ skb_tx_timestamp(skb);
+ }
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
@@ -5180,10 +5561,9 @@ static void e1000_reset_task(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
- (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ if (!(adapter->flags & FLAG_RESTART_NOW)) {
e1000e_dump(adapter);
- e_err("Reset adapter\n");
+ e_err("Reset adapter unexpectedly\n");
}
e1000e_reinit_locked(adapter);
}
@@ -5369,6 +5749,61 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
return 0;
}
+/**
+ * e1000e_hwtstamp_ioctl - control hardware time stamping
+ * @netdev: network interface device structure
+ * @ifreq: interface request
+ *
+ * Outgoing time stamping can be enabled and disabled. Play nice and
+ * disable it when requested, although it shouldn't cause any overhead
+ * when no packet needs it. At most one packet in the queue may be
+ * marked for time stamping, otherwise it would be impossible to tell
+ * for sure to which packet the hardware time stamp belongs.
+ *
+ * Incoming time stamping has to be configured via the hardware filters.
+ * Not all combinations are supported, in particular event type has to be
+ * specified. Matching the kind of event packet is not supported, with the
+ * exception of "all V2 events regardless of level 2 or 4".
+ **/
+static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int ret_val;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ adapter->hwtstamp_config = config;
+
+ ret_val = e1000e_config_hwtstamp(adapter);
+ if (ret_val)
+ return ret_val;
+
+ config = adapter->hwtstamp_config;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* With V2 type filters which specify a Sync or Delay Request,
+ * Path Delay Request/Response messages are also time stamped
+ * by hardware so notify the caller the requested packets plus
+ * some others are time stamped.
+ */
+ config.rx_filter = HWTSTAMP_FILTER_SOME;
+ break;
+ default:
+ break;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -5376,6 +5811,8 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return e1000_mii_ioctl(netdev, ifr, cmd);
+ case SIOCSHWTSTAMP:
+ return e1000e_hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -5386,7 +5823,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
struct e1000_hw *hw = &adapter->hw;
u32 i, mac_reg;
u16 phy_reg, wuc_enable;
- int retval = 0;
+ int retval;
/* copy MAC RARs to PHY RARs */
e1000_copy_rx_addrs_to_phy_ich8lan(hw);
@@ -5600,14 +6037,21 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
+ u16 aspm_ctl = 0;
+
+ if (state & PCIE_LINK_STATE_L0S)
+ aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
+
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
if (pdev->bus->self)
pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
- state);
+ aspm_ctl);
}
#endif
static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
@@ -5792,7 +6236,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
#ifdef CONFIG_NET_POLL_CONTROLLER
-static irqreturn_t e1000_intr_msix(int irq, void *data)
+static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
{
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5956,7 +6400,6 @@ static void e1000_io_resume(struct pci_dev *pdev)
*/
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
-
}
static void e1000_print_device_info(struct e1000_adapter *adapter)
@@ -6114,8 +6557,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = pci_request_selected_regions_exclusive(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM),
- e1000e_driver_name);
+ pci_select_bars(pdev, IORESOURCE_MEM),
+ e1000e_driver_name);
if (err)
goto err_pci_reg;
@@ -6274,11 +6717,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
- netdev->perm_addr);
+ netdev->dev_addr);
err = -EIO;
goto err_eeprom;
}
@@ -6364,6 +6806,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
e1000_print_device_info(adapter);
if (pci_dev_run_wake(pdev))
@@ -6412,6 +6857,8 @@ static void e1000_remove(struct pci_dev *pdev)
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
+ e1000e_ptp_remove(adapter);
+
/* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
@@ -6426,6 +6873,14 @@ static void e1000_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->update_phy_task);
cancel_work_sync(&adapter->print_hang_task);
+ if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
+ cancel_work_sync(&adapter->tx_hwtstamp_work);
+ if (adapter->tx_hwtstamp_skb) {
+ dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ adapter->tx_hwtstamp_skb = NULL;
+ }
+ }
+
if (!(netdev->flags & IFF_UP))
e1000_power_down_phy(adapter);
@@ -6578,7 +7033,7 @@ static int __init e1000_init_module(void)
int ret;
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index b6468804cb2..84fecc26816 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -359,7 +359,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
+ s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/* A check for invalid values: offset too large, too many words,
@@ -371,16 +371,18 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
return -E1000_ERR_NVM;
}
- ret_val = nvm->ops.acquire(hw);
- if (ret_val)
- return ret_val;
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
- ret_val = e1000_ready_nvm_eeprom(hw);
+ ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto release;
+ return ret_val;
+
+ ret_val = e1000_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
e1000_standby_nvm(hw);
@@ -413,12 +415,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
+ usleep_range(10000, 20000);
+ nvm->ops.release(hw);
}
- usleep_range(10000, 20000);
-release:
- nvm->ops.release(hw);
-
return ret_val;
}
@@ -464,8 +464,8 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
if (nvm_data != NVM_PBA_PTR_GUARD) {
e_dbg("NVM PBA number is not stored as string\n");
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
+ /* make sure callers buffer is big enough to store the PBA */
+ if (pba_num_size < E1000_PBANUM_LENGTH) {
e_dbg("PBA string buffer too small\n");
return E1000_ERR_NO_SPACE;
}
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
new file mode 100644
index 00000000000..45fc6956162
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_NVM_H_
+#define _E1000E_NVM_H_
+
+s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000e_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE 0xDB00
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 89d536dd7ff..98da75dff93 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -53,8 +53,7 @@ MODULE_PARM_DESC(copybreak,
*/
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
- static int X[E1000_MAX_NIC+1] \
- = E1000_PARAM_INIT; \
+ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
@@ -447,8 +446,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
e1000_validate_option(&spd, &opt, adapter);
- if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
- && spd)
+ if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 28b38ff37e8..0930c136aa3 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -28,16 +28,12 @@
#include "e1000.h"
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
-static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
u16 *data, bool read, bool page_set);
static u32 e1000_get_phy_addr_for_hv_page(u32 page);
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read);
+ u16 *data, bool read);
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
@@ -57,48 +53,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
-#define BM_PHY_REG_PAGE(offset) \
- ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
-#define BM_PHY_REG_NUM(offset) \
- ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
- (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
- ~MAX_PHY_REG_ADDRESS)))
-
-#define HV_INTC_FC_PAGE_START 768
-#define I82578_ADDR_REG 29
-#define I82577_ADDR_REG 16
-#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
-#define I82577_CTRL_REG 23
-
-/* 82577 specific PHY registers */
-#define I82577_PHY_CTRL_2 18
-#define I82577_PHY_STATUS_2 26
-#define I82577_PHY_DIAG_STATUS 31
-
-/* I82577 PHY Status 2 */
-#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
-#define I82577_PHY_STATUS2_MDIX 0x0800
-#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
-#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
-
-/* I82577 PHY Control 2 */
-#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
-#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
-#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
-
-/* I82577 PHY Diagnostics Status */
-#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
-#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
-
-/* BM PHY Copper Specific Control 1 */
-#define BM_CS_CTRL1 16
-
-#define HV_MUX_DATA_CTRL PHY_REG(776, 16)
-#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400
-#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004
-
/**
* e1000e_check_reset_block_generic - Check if PHY reset is blocked
* @hw: pointer to the HW structure
@@ -135,13 +89,13 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
return 0;
while (retry_count < 2) {
- ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
- ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+ ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -645,31 +599,31 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
u16 phy_data;
/* Resolve Master/Slave mode */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data);
if (ret_val)
return ret_val;
/* load defaults for future use */
- hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
- ((phy_data & CR_1000T_MS_VALUE) ?
+ hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ?
+ ((phy_data & CTL1000_AS_MASTER) ?
e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
switch (hw->phy.ms_type) {
case e1000_ms_force_master:
- phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+ phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
break;
case e1000_ms_force_slave:
- phy_data |= CR_1000T_MS_ENABLE;
- phy_data &= ~(CR_1000T_MS_VALUE);
+ phy_data |= CTL1000_ENABLE_MASTER;
+ phy_data &= ~(CTL1000_AS_MASTER);
break;
case e1000_ms_auto:
- phy_data &= ~CR_1000T_MS_ENABLE;
+ phy_data &= ~CTL1000_ENABLE_MASTER;
/* fall-through */
default:
break;
}
- return e1e_wphy(hw, PHY_1000T_CTRL, phy_data);
+ return e1e_wphy(hw, MII_CTRL1000, phy_data);
}
/**
@@ -792,7 +746,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Commit the changes. */
- ret_val = e1000e_commit_phy(hw);
+ ret_val = phy->ops.commit(hw);
if (ret_val) {
e_dbg("Error committing the PHY changes\n");
return ret_val;
@@ -848,10 +802,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
}
/* Commit the changes. */
- ret_val = e1000e_commit_phy(hw);
- if (ret_val) {
- e_dbg("Error committing the PHY changes\n");
- return ret_val;
+ if (phy->ops.commit) {
+ ret_val = phy->ops.commit(hw);
+ if (ret_val) {
+ e_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
}
if (phy->type == e1000_phy_82578) {
@@ -895,10 +851,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
msleep(100);
/* disable lplu d0 during driver init */
- ret_val = e1000_set_d0_lplu_state(hw, false);
- if (ret_val) {
- e_dbg("Error Disabling LPLU D0\n");
- return ret_val;
+ if (hw->phy.ops.set_d0_lplu_state) {
+ ret_val = hw->phy.ops.set_d0_lplu_state(hw, false);
+ if (ret_val) {
+ e_dbg("Error Disabling LPLU D0\n");
+ return ret_val;
+ }
}
/* Configure mdi-mdix settings */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
@@ -943,12 +901,12 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
return ret_val;
/* Set auto Master/Slave resolution process */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &data);
if (ret_val)
return ret_val;
- data &= ~CR_1000T_MS_ENABLE;
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+ data &= ~CTL1000_ENABLE_MASTER;
+ ret_val = e1e_wphy(hw, MII_CTRL1000, data);
if (ret_val)
return ret_val;
}
@@ -978,13 +936,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
phy->autoneg_advertised &= phy->autoneg_mask;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
- ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+ ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
/* Read the MII 1000Base-T Control Register (Address 9). */
- ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+ ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
}
@@ -1000,36 +958,35 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
- mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
- NWAY_AR_100TX_HD_CAPS |
- NWAY_AR_10T_FD_CAPS |
- NWAY_AR_10T_HD_CAPS);
- mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+ mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF |
+ ADVERTISE_10FULL | ADVERTISE_10HALF);
+ mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
/* Do we want to advertise 10 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
e_dbg("Advertise 10mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10HALF;
}
/* Do we want to advertise 10 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
e_dbg("Advertise 10mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_10FULL;
}
/* Do we want to advertise 100 Mb Half Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
e_dbg("Advertise 100mb Half duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100HALF;
}
/* Do we want to advertise 100 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
e_dbg("Advertise 100mb Full duplex\n");
- mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+ mii_autoneg_adv_reg |= ADVERTISE_100FULL;
}
/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
@@ -1039,14 +996,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
/* Do we want to advertise 1000 Mb Full Duplex? */
if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
e_dbg("Advertise 1000mb Full duplex\n");
- mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+ mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
}
/* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+ * Advertisement Register (MII_ADVERTISE) and re-start auto-
* negotiation.
*
* The possible values of the "fc" parameter are:
@@ -1064,7 +1021,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
/* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
- mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg &=
+ ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
case e1000_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
@@ -1076,34 +1034,36 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* (in e1000e_config_fc_after_link_up) we will disable the
* hw's ability to send PAUSE frames.
*/
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
case e1000_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
- mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
- mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+ mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;
+ mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP;
break;
case e1000_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
- mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+ mii_autoneg_adv_reg |=
+ (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
break;
default:
e_dbg("Flow control param set incorrectly\n");
return -E1000_ERR_CONFIG;
}
- ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+ ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
if (phy->autoneg_mask & ADVERTISE_1000_FULL)
- ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+ ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg);
return ret_val;
}
@@ -1145,12 +1105,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
if (ret_val)
return ret_val;
- phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
if (ret_val)
return ret_val;
@@ -1196,7 +1156,7 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
* depending on user settings.
*/
e_dbg("Forcing Speed and Duplex\n");
- ret_val = e1000_phy_force_speed_duplex(hw);
+ ret_val = hw->phy.ops.force_speed_duplex(hw);
if (ret_val) {
e_dbg("Error Forcing Speed and Duplex\n");
return ret_val;
@@ -1237,13 +1197,13 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -1315,20 +1275,22 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
e_dbg("M88E1000 PSCR: %X\n", phy_data);
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
/* Reset the phy to commit changes. */
- ret_val = e1000e_commit_phy(hw);
- if (ret_val)
- return ret_val;
+ if (hw->phy.ops.commit) {
+ ret_val = hw->phy.ops.commit(hw);
+ if (ret_val)
+ return ret_val;
+ }
if (phy->autoneg_wait_to_complete) {
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
@@ -1406,13 +1368,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
u16 data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, data);
+ ret_val = e1e_wphy(hw, MII_BMCR, data);
if (ret_val)
return ret_val;
@@ -1456,13 +1418,13 @@ s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
/**
* e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
* @hw: pointer to the HW structure
- * @phy_ctrl: pointer to current value of PHY_CONTROL
+ * @phy_ctrl: pointer to current value of MII_BMCR
*
* Forces speed and duplex on the PHY by doing the following: disable flow
* control, force speed/duplex on the MAC, disable auto speed detection,
* disable auto-negotiation, configure duplex, configure speed, configure
* the collision distance, write configuration to CTRL register. The
- * caller must write to the PHY_CONTROL register for these settings to
+ * caller must write to the MII_BMCR register for these settings to
* take affect.
**/
void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
@@ -1482,29 +1444,28 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
ctrl &= ~E1000_CTRL_ASDE;
/* Disable autoneg on the phy */
- *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+ *phy_ctrl &= ~BMCR_ANENABLE;
/* Forcing Full or Half Duplex? */
if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
ctrl &= ~E1000_CTRL_FD;
- *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+ *phy_ctrl &= ~BMCR_FULLDPLX;
e_dbg("Half Duplex\n");
} else {
ctrl |= E1000_CTRL_FD;
- *phy_ctrl |= MII_CR_FULL_DUPLEX;
+ *phy_ctrl |= BMCR_FULLDPLX;
e_dbg("Full Duplex\n");
}
/* Forcing 10mb or 100mb? */
if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
ctrl |= E1000_CTRL_SPD_100;
- *phy_ctrl |= MII_CR_SPEED_100;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+ *phy_ctrl |= BMCR_SPEED100;
+ *phy_ctrl &= ~BMCR_SPEED1000;
e_dbg("Forcing 100mb\n");
} else {
ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
- *phy_ctrl |= MII_CR_SPEED_10;
- *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+ *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100);
e_dbg("Forcing 10mb\n");
}
@@ -1745,13 +1706,13 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & MII_SR_AUTONEG_COMPLETE)
+ if (phy_status & BMSR_ANEGCOMPLETE)
break;
msleep(100);
}
@@ -1778,21 +1739,21 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u16 i, phy_status;
for (i = 0; i < iterations; i++) {
- /* Some PHYs require the PHY_STATUS register to be read
+ /* Some PHYs require the MII_BMSR register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
udelay(usec_interval);
- ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+ ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & MII_SR_LINK_STATUS)
+ if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
mdelay(usec_interval/1000);
@@ -1962,21 +1923,19 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
- ret_val = e1000_get_cable_length(hw);
+ ret_val = hw->phy.ops.get_cable_length(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data);
if (ret_val)
return ret_val;
- phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (phy_data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (phy_data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
/* Set values to "undefined" */
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2026,21 +1985,19 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- ret_val = e1000_get_cable_length(hw);
+ ret_val = phy->ops.get_cable_length(hw);
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
if (ret_val)
return ret_val;
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -2114,12 +2071,12 @@ s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl);
if (ret_val)
return ret_val;
- phy_ctrl |= MII_CR_RESET;
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+ phy_ctrl |= BMCR_RESET;
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl);
if (ret_val)
return ret_val;
@@ -2166,17 +2123,17 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
phy->ops.release(hw);
- return e1000_get_phy_cfg_done(hw);
+ return phy->ops.get_cfg_done(hw);
}
/**
- * e1000e_get_cfg_done - Generic configuration done
+ * e1000e_get_cfg_done_generic - Generic configuration done
* @hw: pointer to the HW structure
*
* Generic function to wait 10 milli-seconds for configuration to complete
* and return success.
**/
-s32 e1000e_get_cfg_done(struct e1000_hw *hw)
+s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw)
{
mdelay(10);
@@ -2266,38 +2223,6 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
return 0;
}
-/* Internal function pointers */
-
-/**
- * e1000_get_phy_cfg_done - Generic PHY configuration done
- * @hw: pointer to the HW structure
- *
- * Return success if silicon family did not implement a family specific
- * get_cfg_done function.
- **/
-static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
-{
- if (hw->phy.ops.get_cfg_done)
- return hw->phy.ops.get_cfg_done(hw);
-
- return 0;
-}
-
-/**
- * e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
- * @hw: pointer to the HW structure
- *
- * When the silicon family has not implemented a forced speed/duplex
- * function for the PHY, simply return 0.
- **/
-static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
-{
- if (hw->phy.ops.force_speed_duplex)
- return hw->phy.ops.force_speed_duplex(hw);
-
- return 0;
-}
-
/**
* e1000e_get_phy_type_from_id - Get PHY type from id
* @phy_id: phy_id read from the phy
@@ -2549,7 +2474,6 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
hw->phy.addr = 1;
if (offset > MAX_PHY_MULTI_PAGE_REG) {
-
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
page);
@@ -2672,7 +2596,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
**/
s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
{
- s32 ret_val = 0;
+ s32 ret_val;
/* Select Port Control Registers page */
ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
@@ -2781,9 +2705,9 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, PHY_CONTROL, &mii_reg);
- mii_reg &= ~MII_CR_POWER_DOWN;
- e1e_wphy(hw, PHY_CONTROL, mii_reg);
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg &= ~BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
}
/**
@@ -2799,50 +2723,13 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
u16 mii_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, PHY_CONTROL, &mii_reg);
- mii_reg |= MII_CR_POWER_DOWN;
- e1e_wphy(hw, PHY_CONTROL, mii_reg);
+ e1e_rphy(hw, MII_BMCR, &mii_reg);
+ mii_reg |= BMCR_PDOWN;
+ e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
}
/**
- * e1000e_commit_phy - Soft PHY reset
- * @hw: pointer to the HW structure
- *
- * Performs a soft PHY reset on those that apply. This is a function pointer
- * entry point called by drivers.
- **/
-s32 e1000e_commit_phy(struct e1000_hw *hw)
-{
- if (hw->phy.ops.commit)
- return hw->phy.ops.commit(hw);
-
- return 0;
-}
-
-/**
- * e1000_set_d0_lplu_state - Sets low power link up state for D0
- * @hw: pointer to the HW structure
- * @active: boolean used to enable/disable lplu
- *
- * Success returns 0, Failure returns 1
- *
- * The low power link up (lplu) state is set to the power management level D0
- * and SmartSpeed is disabled when active is true, else clear lplu for D0
- * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
- * is used during Dx states where the power conservation is most important.
- * During driver activity, SmartSpeed should be enabled so performance is
- * maintained. This is a function pointer entry point called by drivers.
- **/
-static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
-{
- if (hw->phy.ops.set_d0_lplu_state)
- return hw->phy.ops.set_d0_lplu_state(hw, active);
-
- return 0;
-}
-
-/**
* __e1000_read_phy_reg_hv - Read HV PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
@@ -3104,8 +2991,8 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
u16 *data, bool read)
{
s32 ret_val;
- u32 addr_reg = 0;
- u32 data_reg = 0;
+ u32 addr_reg;
+ u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */
addr_reg = (hw->phy.type == e1000_phy_82578) ?
@@ -3154,8 +3041,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, PHY_CONTROL, &data);
- if (data & PHY_CONTROL_LB)
+ e1e_rphy(hw, MII_BMCR, &data);
+ if (data & BMCR_LOOPBACK)
return 0;
/* check if link is up and at 1Gbps */
@@ -3173,8 +3060,9 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
msleep(200);
/* flush the packets in the fifo buffer */
- ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
- HV_MUX_DATA_CTRL_FORCE_SPEED);
+ ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL,
+ (HV_MUX_DATA_CTRL_GEN_TO_MAC |
+ HV_MUX_DATA_CTRL_FORCE_SPEED));
if (ret_val)
return ret_val;
@@ -3218,13 +3106,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
u16 phy_data;
bool link;
- ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &phy_data);
if (ret_val)
return ret_val;
e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
- ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+ ret_val = e1e_wphy(hw, MII_BMCR, phy_data);
if (ret_val)
return ret_val;
@@ -3292,17 +3180,15 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+ ret_val = e1e_rphy(hw, MII_STAT1000, &data);
if (ret_val)
return ret_val;
- phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->local_rx = (data & LPA_1000LOCALRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
- phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
- ? e1000_1000t_rx_status_ok
- : e1000_1000t_rx_status_not_ok;
+ phy->remote_rx = (data & LPA_1000REMRXOK)
+ ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
} else {
phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
phy->local_rx = e1000_1000t_rx_status_undefined;
@@ -3333,7 +3219,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
I82577_DSTATUS_CABLE_LENGTH_SHIFT;
if (length == E1000_CABLE_LENGTH_UNDEFINED)
- ret_val = -E1000_ERR_PHY;
+ return -E1000_ERR_PHY;
phy->cable_length = length;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
new file mode 100644
index 00000000000..f4f71b9991e
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_PHY_H_
+#define _E1000E_PHY_H_
+
+s32 e1000e_check_downshift(struct e1000_hw *hw);
+s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw);
+s32 e1000e_get_phy_id(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ u32 usec_interval, bool *success);
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg);
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data);
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR 8
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */
+#define IGP_PAGE_SHIFT 5
+#define PHY_REG_MASK 0x1F
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE 769
+#define BM_WUC_PAGE 800
+#define BM_WUC_ADDRESS_OPCODE 0x11
+#define BM_WUC_DATA_OPCODE 0x12
+#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
+#define BM_WUC_ENABLE_REG 17
+#define BM_WUC_ENABLE_BIT (1 << 2)
+#define BM_WUC_HOST_WU_BIT (1 << 4)
+#define BM_WUC_ME_WU_BIT (1 << 5)
+
+#define PHY_UPPER_SHIFT 21
+#define BM_PHY_REG(page, reg) \
+ (((reg) & MAX_PHY_REG_ADDRESS) |\
+ (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+ (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+#define BM_PHY_REG_PAGE(offset) \
+ ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+ ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+ (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+ ~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START 768
+#define I82578_ADDR_REG 29
+#define I82577_ADDR_REG 16
+#define I82577_CFG_REG 22
+#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CTRL_REG 23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2 18
+#define I82577_PHY_LBK_CTRL 19
+#define I82577_PHY_STATUS_2 26
+#define I82577_PHY_DIAG_STATUS 31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY 0x0400
+#define I82577_PHY_STATUS2_MDIX 0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK 0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200
+#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400
+#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH 0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1 16
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS 17
+#define BM_CS_STATUS_LINK_UP 0x0400
+#define BM_CS_STATUS_RESOLVED 0x0800
+#define BM_CS_STATUS_SPEED_MASK 0xC000
+#define BM_CS_STATUS_SPEED_1000 0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS 26
+#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
+#define HV_M_STATUS_SPEED_MASK 0x0300
+#define HV_M_STATUS_SPEED_1000 0x0200
+#define HV_M_STATUS_LINK_UP 0x0040
+
+#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK 0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX 0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
+
+#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX 0x0800
+#define IGP01E1000_PSSR_SPEED_MASK 0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM 4
+#define IGP02E1000_PHY_AGC_A 0x11B1
+#define IGP02E1000_PHY_AGC_B 0x12B1
+#define IGP02E1000_PHY_AGC_C 0x14B1
+#define IGP02E1000_PHY_AGC_D 0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK 0x7F
+#define IGP02E1000_AGC_RANGE 15
+
+#define E1000_CABLE_LENGTH_UNDEFINED 0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16
+#define E1000_KMRNCTRLSTA_REN 0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */
+#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */
+#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED 0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010
+#define IFE_PSC_FORCE_POLARITY 0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE 0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */
+
+#endif
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
new file mode 100644
index 00000000000..b477fa53ec9
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -0,0 +1,277 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* PTP 1588 Hardware Clock (PHC)
+ * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
+ * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
+ */
+
+#include "e1000.h"
+
+/**
+ * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired frequency change in parts per billion
+ *
+ * Adjust the frequency of the PHC cycle counter by the indicated delta from
+ * the base frequency.
+ **/
+static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ struct e1000_hw *hw = &adapter->hw;
+ bool neg_adj = false;
+ u64 adjustment;
+ u32 timinca, incvalue;
+ s32 ret_val;
+
+ if ((delta > ptp->max_adj) || (delta <= -1000000000))
+ return -EINVAL;
+
+ if (delta < 0) {
+ neg_adj = true;
+ delta = -delta;
+ }
+
+ /* Get the System Time Register SYSTIM base frequency */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (ret_val)
+ return ret_val;
+
+ incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
+
+ adjustment = incvalue;
+ adjustment *= delta;
+ adjustment = div_u64(adjustment, 1000000000);
+
+ incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
+
+ timinca &= ~E1000_TIMINCA_INCVALUE_MASK;
+ timinca |= incvalue;
+
+ ew32(TIMINCA, timinca);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_adjtime - Shift the time of the hardware clock
+ * @ptp: ptp clock structure
+ * @delta: Desired change in nanoseconds
+ *
+ * Adjust the timer by resetting the timecounter structure.
+ **/
+static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ s64 now;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ now = timecounter_read(&adapter->tc);
+ now += delta;
+ timecounter_init(&adapter->tc, &adapter->cc, now);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * Read the timecounter and return the correct value in ns after converting
+ * it into a struct timespec.
+ **/
+static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u32 remainder;
+ u64 ns;
+
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ ns = timecounter_read(&adapter->tc);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder);
+ ts->tv_nsec = remainder;
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_settime - Set the current time on the hardware clock
+ * @ptp: ptp clock structure
+ * @ts: timespec containing the new time for the cycle counter
+ *
+ * Reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ **/
+static int e1000e_phc_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 ns;
+
+ ns = ts->tv_sec * NSEC_PER_SEC;
+ ns += ts->tv_nsec;
+
+ /* reset the timecounter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc, ns);
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ return 0;
+}
+
+/**
+ * e1000e_phc_enable - enable or disable an ancillary feature
+ * @ptp: ptp clock structure
+ * @request: Desired resource to enable or disable
+ * @on: Caller passes one to enable or zero to disable
+ *
+ * Enable (or disable) ancillary features of the PHC subsystem.
+ * Currently, no ancillary features are supported.
+ **/
+static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp,
+ struct ptp_clock_request __always_unused *request,
+ int __always_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+static void e1000e_systim_overflow_work(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
+ systim_overflow_work.work);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec ts;
+
+ adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts);
+
+ e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+}
+
+static const struct ptp_clock_info e1000e_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = e1000e_phc_adjfreq,
+ .adjtime = e1000e_phc_adjtime,
+ .gettime = e1000e_phc_gettime,
+ .settime = e1000e_phc_settime,
+ .enable = e1000e_phc_enable,
+};
+
+/**
+ * e1000e_ptp_init - initialize PTP for devices which support it
+ * @adapter: board private structure
+ *
+ * This function performs the required steps for enabling PTP support.
+ * If PTP support has already been loaded it simply calls the cyclecounter
+ * init routine and exits.
+ **/
+void e1000e_ptp_init(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ adapter->ptp_clock = NULL;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ adapter->ptp_clock_info = e1000e_ptp_clock_info;
+
+ snprintf(adapter->ptp_clock_info.name,
+ sizeof(adapter->ptp_clock_info.name), "%pm",
+ adapter->netdev->perm_addr);
+
+ switch (hw->mac.type) {
+ case e1000_pch2lan:
+ case e1000_pch_lpt:
+ if ((hw->mac.type != e1000_pch_lpt) ||
+ (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ adapter->ptp_clock_info.max_adj = 24000000 - 1;
+ break;
+ }
+ /* fall-through */
+ case e1000_82574:
+ case e1000_82583:
+ adapter->ptp_clock_info.max_adj = 600000000 - 1;
+ break;
+ default:
+ break;
+ }
+
+ INIT_DELAYED_WORK(&adapter->systim_overflow_work,
+ e1000e_systim_overflow_work);
+
+ schedule_delayed_work(&adapter->systim_overflow_work,
+ E1000_SYSTIM_OVERFLOW_PERIOD);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ adapter->ptp_clock = NULL;
+ e_err("ptp_clock_register failed\n");
+ } else {
+ e_info("registered PHC clock\n");
+ }
+}
+
+/**
+ * e1000e_ptp_remove - disable PTP device and stop the overflow check
+ * @adapter: board private structure
+ *
+ * Stop the PTP support, and cancel the delayed work.
+ **/
+void e1000e_ptp_remove(struct e1000_adapter *adapter)
+{
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ cancel_delayed_work_sync(&adapter->systim_overflow_work);
+
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ e_info("removed PHC\n");
+ }
+}
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
new file mode 100644
index 00000000000..794fe149766
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -0,0 +1,252 @@
+/*******************************************************************************
+
+ Intel PRO/1000 Linux driver
+ Copyright(c) 1999 - 2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ Linux NICS <linux.nics@intel.com>
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000E_REGS_H_
+#define _E1000E_REGS_H_
+
+#define E1000_CTRL 0x00000 /* Device Control - RW */
+#define E1000_STATUS 0x00008 /* Device Status - RO */
+#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */
+#define E1000_EERD 0x00014 /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
+#define E1000_FLA 0x0001C /* Flash Access - RW */
+#define E1000_MDIC 0x00020 /* MDI Control - RW */
+#define E1000_SCTL 0x00024 /* SerDes Control - RW */
+#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
+#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
+#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
+#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
+#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FCT 0x00030 /* Flow Control Type - RW */
+#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
+#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
+#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
+#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
+#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
+#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
+#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
+#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */
+#define E1000_SVCR 0x000F0
+#define E1000_SVT 0x000F4
+#define E1000_LPIC 0x000FC /* Low Power IDLE control */
+#define E1000_RCTL 0x00100 /* Rx Control - RW */
+#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */
+#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */
+#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */
+#define E1000_TCTL 0x00400 /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */
+#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */
+#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL 0x00E00 /* LED Control - RW */
+#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */
+#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */
+#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
+#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
+#define E1000_PBS 0x01008 /* Packet Buffer Size */
+#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
+#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
+#define E1000_FLOP 0x0103C /* FLASH Opcode Register */
+#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */
+#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */
+#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
+#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */
+#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+ (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+ (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+ (0x0C008 + ((_n) * 0x40)))
+#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+ (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+ (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+ (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+ (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+ (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+ (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+ (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+ (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+ (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100))
+#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */
+#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+ (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+ (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8))
+#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
+#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
+#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */
+#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */
+#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */
+#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */
+#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */
+#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */
+#define E1000_COLC 0x04028 /* Collision Count - R/clr */
+#define E1000_DC 0x04030 /* Defer Count - R/clr */
+#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */
+#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */
+#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */
+#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */
+#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */
+#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */
+#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */
+#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */
+#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */
+#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */
+#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */
+#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */
+#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
+#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
+#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */
+
+#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */
+#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */
+#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */
+#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */
+#define E1000_RFCTL 0x05008 /* Receive Filter Control */
+#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
+#define E1000_RA 0x05400 /* Receive Address - RW Array */
+#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC 0x05800 /* Wakeup Control - RW */
+#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */
+#define E1000_WUS 0x05810 /* Wakeup Status - RO */
+#define E1000_MANC 0x05820 /* Management Control - RW */
+#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF 0x08800 /* Host Interface */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
+/* Management Decision Filters */
+#define E1000_MDEF(_n) (0x05890 + (4 * (_n)))
+#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */
+#define E1000_GCR 0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */
+#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM 0x05B50 /* SW Semaphore */
+#define E1000_FWSM 0x05B54 /* FW Semaphore */
+/* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_SWSM2 0x05B58
+#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
+#define E1000_HICR 0x08F00 /* Host Interface Control */
+
+/* RSS registers */
+#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
+#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
+#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */
+#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */
+
+#endif
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 624476cfa72..f19700e285b 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -34,4 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o
+ e1000_i210.o igb_ptp.o igb_hwmon.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fdaaf2709d0..84e7e0909de 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -33,6 +33,7 @@
#include <linux/types.h>
#include <linux/if_ether.h>
+#include <linux/i2c.h>
#include "e1000_mac.h"
#include "e1000_82575.h"
@@ -110,184 +111,168 @@ static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
return ext_mdio;
}
-static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+/**
+ * igb_init_phy_params_82575 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
- struct e1000_nvm_info *nvm = &hw->nvm;
- struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
- u32 eecd;
- s32 ret_val;
- u16 size;
- u32 ctrl_ext = 0;
+ s32 ret_val = 0;
+ u32 ctrl_ext;
- switch (hw->device_id) {
- case E1000_DEV_ID_82575EB_COPPER:
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- mac->type = e1000_82575;
- break;
- case E1000_DEV_ID_82576:
- case E1000_DEV_ID_82576_NS:
- case E1000_DEV_ID_82576_NS_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- case E1000_DEV_ID_82576_SERDES_QUAD:
- mac->type = e1000_82576;
- break;
- case E1000_DEV_ID_82580_COPPER:
- case E1000_DEV_ID_82580_FIBER:
- case E1000_DEV_ID_82580_QUAD_FIBER:
- case E1000_DEV_ID_82580_SERDES:
- case E1000_DEV_ID_82580_SGMII:
- case E1000_DEV_ID_82580_COPPER_DUAL:
- case E1000_DEV_ID_DH89XXCC_SGMII:
- case E1000_DEV_ID_DH89XXCC_SERDES:
- case E1000_DEV_ID_DH89XXCC_BACKPLANE:
- case E1000_DEV_ID_DH89XXCC_SFP:
- mac->type = e1000_82580;
- break;
- case E1000_DEV_ID_I350_COPPER:
- case E1000_DEV_ID_I350_FIBER:
- case E1000_DEV_ID_I350_SERDES:
- case E1000_DEV_ID_I350_SGMII:
- mac->type = e1000_i350;
- break;
- case E1000_DEV_ID_I210_COPPER:
- case E1000_DEV_ID_I210_COPPER_OEM1:
- case E1000_DEV_ID_I210_COPPER_IT:
- case E1000_DEV_ID_I210_FIBER:
- case E1000_DEV_ID_I210_SERDES:
- case E1000_DEV_ID_I210_SGMII:
- mac->type = e1000_i210;
- break;
- case E1000_DEV_ID_I211_COPPER:
- mac->type = e1000_i211;
- break;
- default:
- return -E1000_ERR_MAC_INIT;
- break;
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ phy->type = e1000_phy_none;
+ goto out;
}
- /* Set media type */
- /*
- * The 82575 uses bits 22:23 for link mode. The mode can be changed
- * based on the EEPROM. We cannot rely upon device ID. There
- * is no distinguishable difference between fiber and internal
- * SerDes mode on the 82575. There can be an external PHY attached
- * on the SGMII interface. For this, we'll set sgmii_active to true.
- */
- phy->media_type = e1000_media_type_copper;
- dev_spec->sgmii_active = false;
+ phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+ phy->reset_delay_us = 100;
ctrl_ext = rd32(E1000_CTRL_EXT);
- switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
- case E1000_CTRL_EXT_LINK_MODE_SGMII:
- dev_spec->sgmii_active = true;
- break;
- case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
- case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
- hw->phy.media_type = e1000_media_type_internal_serdes;
- break;
- default:
- break;
+
+ if (igb_sgmii_active_82575(hw)) {
+ phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ } else {
+ phy->ops.reset = igb_phy_hw_reset;
+ ctrl_ext &= ~E1000_CTRL_I2C_ENA;
}
- /* Set mta register count */
- mac->mta_reg_count = 128;
- /* Set rar entry count */
- switch (mac->type) {
- case e1000_82576:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ igb_reset_mdicnfg_82580(hw);
+
+ if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+ phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+ phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+ } else {
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ phy->ops.read_reg = igb_read_phy_reg_82580;
+ phy->ops.write_reg = igb_write_phy_reg_82580;
+ break;
+ case e1000_i210:
+ case e1000_i211:
+ phy->ops.read_reg = igb_read_phy_reg_gs40g;
+ phy->ops.write_reg = igb_write_phy_reg_gs40g;
+ break;
+ default:
+ phy->ops.read_reg = igb_read_phy_reg_igp;
+ phy->ops.write_reg = igb_write_phy_reg_igp;
+ }
+ }
+
+ /* set lan id */
+ hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+ E1000_STATUS_FUNC_SHIFT;
+
+ /* Set phy->phy_addr and phy->id. */
+ ret_val = igb_get_phy_id_82575(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* Verify phy id and set remaining function pointers */
+ switch (phy->id) {
+ case I347AT4_E_PHY_ID:
+ case M88E1112_E_PHY_ID:
+ case M88E1111_I_PHY_ID:
+ phy->type = e1000_phy_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ if (phy->id == I347AT4_E_PHY_ID ||
+ phy->id == M88E1112_E_PHY_ID)
+ phy->ops.get_cable_length =
+ igb_get_cable_length_m88_gen2;
+ else
+ phy->ops.get_cable_length = igb_get_cable_length_m88;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
- case e1000_82580:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ case IGP03E1000_E_PHY_ID:
+ phy->type = e1000_phy_igp_3;
+ phy->ops.get_phy_info = igb_get_phy_info_igp;
+ phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
break;
- case e1000_i350:
- mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+ case I82580_I_PHY_ID:
+ case I350_I_PHY_ID:
+ phy->type = e1000_phy_82580;
+ phy->ops.force_speed_duplex =
+ igb_phy_force_speed_duplex_82580;
+ phy->ops.get_cable_length = igb_get_cable_length_82580;
+ phy->ops.get_phy_info = igb_get_phy_info_82580;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
break;
- default:
- mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+ case I210_I_PHY_ID:
+ phy->type = e1000_phy_i210;
+ phy->ops.check_polarity = igb_check_polarity_m88;
+ phy->ops.get_phy_info = igb_get_phy_info_m88;
+ phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+ phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+ phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
break;
+ default:
+ ret_val = -E1000_ERR_PHY;
+ goto out;
}
- /* reset */
- if (mac->type >= e1000_82580)
- mac->ops.reset_hw = igb_reset_hw_82580;
- else
- mac->ops.reset_hw = igb_reset_hw_82575;
- if (mac->type >= e1000_i210) {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- } else {
- mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
- mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
- }
+out:
+ return ret_val;
+}
- /* Set if part includes ASF firmware */
- mac->asf_firmware_present = true;
- /* Set if manageability features are enabled. */
- mac->arc_subsystem_valid =
- (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
- ? true : false;
- /* enable EEE on i350 parts and later parts */
- if (mac->type >= e1000_i350)
- dev_spec->eee_disable = false;
- else
- dev_spec->eee_disable = true;
- /* physical interface link setup */
- mac->ops.setup_physical_interface =
- (hw->phy.media_type == e1000_media_type_copper)
- ? igb_setup_copper_link_82575
- : igb_setup_serdes_link_82575;
+/**
+ * igb_init_nvm_params_82575 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ u32 eecd = rd32(E1000_EECD);
+ u16 size;
- /* NVM initialization */
- eecd = rd32(E1000_EECD);
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
-
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
- /*
- * Check for invalid size
+ /* Just in case size is out of range, cap it to the largest
+ * EEPROM size supported
*/
- if ((hw->mac.type == e1000_82576) && (size > 15)) {
- pr_notice("The NVM size is not valid, defaulting to 32K\n");
+ if (size > 15)
size = 15;
- }
nvm->word_size = 1 << size;
if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
+
switch (nvm->override) {
case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
+ nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
+ nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
- nvm->page_size = eecd
- & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd
- & E1000_EECD_ADDR_BITS ? 16 : 8;
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
break;
}
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
- } else
+ } else {
nvm->type = e1000_nvm_flash_hw;
+ }
/* NVM Function Pointers */
switch (hw->mac.type) {
@@ -344,118 +329,176 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
break;
}
- /* if part supports SR-IOV then initialize mailbox parameters */
+ return 0;
+}
+
+/**
+ * igb_init_mac_params_82575 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+ /* Set mta register count */
+ mac->mta_reg_count = 128;
+ /* Set rar entry count */
switch (mac->type) {
case e1000_82576:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+ break;
+ case e1000_82580:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+ break;
case e1000_i350:
- igb_init_mbx_params_pf(hw);
+ mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
+ mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
break;
}
+ /* reset */
+ if (mac->type >= e1000_82580)
+ mac->ops.reset_hw = igb_reset_hw_82580;
+ else
+ mac->ops.reset_hw = igb_reset_hw_82575;
- /* setup PHY parameters */
- if (phy->media_type != e1000_media_type_copper) {
- phy->type = e1000_phy_none;
- return 0;
- }
-
- phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
- phy->reset_delay_us = 100;
-
- ctrl_ext = rd32(E1000_CTRL_EXT);
+ if (mac->type >= e1000_i210) {
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
- /* PHY function pointers */
- if (igb_sgmii_active_82575(hw)) {
- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
- ctrl_ext |= E1000_CTRL_I2C_ENA;
} else {
- phy->ops.reset = igb_phy_hw_reset;
- ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+ mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+ mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
}
- wr32(E1000_CTRL_EXT, ctrl_ext);
- igb_reset_mdicnfg_82580(hw);
-
- if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
- } else if ((hw->mac.type == e1000_82580)
- || (hw->mac.type == e1000_i350)) {
- phy->ops.read_reg = igb_read_phy_reg_82580;
- phy->ops.write_reg = igb_write_phy_reg_82580;
- } else if (hw->phy.type >= e1000_phy_i210) {
- phy->ops.read_reg = igb_read_phy_reg_gs40g;
- phy->ops.write_reg = igb_write_phy_reg_gs40g;
- } else {
- phy->ops.read_reg = igb_read_phy_reg_igp;
- phy->ops.write_reg = igb_write_phy_reg_igp;
- }
+ /* Set if part includes ASF firmware */
+ mac->asf_firmware_present = true;
+ /* Set if manageability features are enabled. */
+ mac->arc_subsystem_valid =
+ (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+ ? true : false;
+ /* enable EEE on i350 parts and later parts */
+ if (mac->type >= e1000_i350)
+ dev_spec->eee_disable = false;
+ else
+ dev_spec->eee_disable = true;
+ /* physical interface link setup */
+ mac->ops.setup_physical_interface =
+ (hw->phy.media_type == e1000_media_type_copper)
+ ? igb_setup_copper_link_82575
+ : igb_setup_serdes_link_82575;
- /* set lan id */
- hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
- E1000_STATUS_FUNC_SHIFT;
+ return 0;
+}
- /* Set phy->phy_addr and phy->id. */
- ret_val = igb_get_phy_id_82575(hw);
- if (ret_val)
- return ret_val;
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+ struct e1000_mac_info *mac = &hw->mac;
+ struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ s32 ret_val;
+ u32 ctrl_ext = 0;
- /* Verify phy id and set remaining function pointers */
- switch (phy->id) {
- case I347AT4_E_PHY_ID:
- case M88E1112_E_PHY_ID:
- case M88E1111_I_PHY_ID:
- phy->type = e1000_phy_m88;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
+ switch (hw->device_id) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ mac->type = e1000_82575;
+ break;
+ case E1000_DEV_ID_82576:
+ case E1000_DEV_ID_82576_NS:
+ case E1000_DEV_ID_82576_NS_SERDES:
+ case E1000_DEV_ID_82576_FIBER:
+ case E1000_DEV_ID_82576_SERDES:
+ case E1000_DEV_ID_82576_QUAD_COPPER:
+ case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+ case E1000_DEV_ID_82576_SERDES_QUAD:
+ mac->type = e1000_82576;
+ break;
+ case E1000_DEV_ID_82580_COPPER:
+ case E1000_DEV_ID_82580_FIBER:
+ case E1000_DEV_ID_82580_QUAD_FIBER:
+ case E1000_DEV_ID_82580_SERDES:
+ case E1000_DEV_ID_82580_SGMII:
+ case E1000_DEV_ID_82580_COPPER_DUAL:
+ case E1000_DEV_ID_DH89XXCC_SGMII:
+ case E1000_DEV_ID_DH89XXCC_SERDES:
+ case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+ case E1000_DEV_ID_DH89XXCC_SFP:
+ mac->type = e1000_82580;
+ break;
+ case E1000_DEV_ID_I350_COPPER:
+ case E1000_DEV_ID_I350_FIBER:
+ case E1000_DEV_ID_I350_SERDES:
+ case E1000_DEV_ID_I350_SGMII:
+ mac->type = e1000_i350;
+ break;
+ case E1000_DEV_ID_I210_COPPER:
+ case E1000_DEV_ID_I210_COPPER_OEM1:
+ case E1000_DEV_ID_I210_COPPER_IT:
+ case E1000_DEV_ID_I210_FIBER:
+ case E1000_DEV_ID_I210_SERDES:
+ case E1000_DEV_ID_I210_SGMII:
+ mac->type = e1000_i210;
+ break;
+ case E1000_DEV_ID_I211_COPPER:
+ mac->type = e1000_i211;
+ break;
+ default:
+ return -E1000_ERR_MAC_INIT;
+ break;
+ }
- if (phy->id == I347AT4_E_PHY_ID ||
- phy->id == M88E1112_E_PHY_ID)
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- else
- phy->ops.get_cable_length = igb_get_cable_length_m88;
+ /* Set media type */
+ /*
+ * The 82575 uses bits 22:23 for link mode. The mode can be changed
+ * based on the EEPROM. We cannot rely upon device ID. There
+ * is no distinguishable difference between fiber and internal
+ * SerDes mode on the 82575. There can be an external PHY attached
+ * on the SGMII interface. For this, we'll set sgmii_active to true.
+ */
+ hw->phy.media_type = e1000_media_type_copper;
+ dev_spec->sgmii_active = false;
- if (phy->id == I210_I_PHY_ID) {
- phy->ops.get_cable_length =
- igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state =
- igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state =
- igb_set_d3_lplu_state_82580;
- }
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+ case E1000_CTRL_EXT_LINK_MODE_SGMII:
+ dev_spec->sgmii_active = true;
break;
- case IGP03E1000_E_PHY_ID:
- phy->type = e1000_phy_igp_3;
- phy->ops.get_phy_info = igb_get_phy_info_igp;
- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+ case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+ hw->phy.media_type = e1000_media_type_internal_serdes;
break;
- case I82580_I_PHY_ID:
- case I350_I_PHY_ID:
- phy->type = e1000_phy_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
- phy->ops.get_cable_length = igb_get_cable_length_82580;
- phy->ops.get_phy_info = igb_get_phy_info_82580;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+ default:
break;
- case I210_I_PHY_ID:
- phy->type = e1000_phy_i210;
- phy->ops.get_phy_info = igb_get_phy_info_m88;
- phy->ops.check_polarity = igb_check_polarity_m88;
- phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+ }
+
+ /* mac initialization and operations */
+ ret_val = igb_init_mac_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* NVM initialization */
+ ret_val = igb_init_nvm_params_82575(hw);
+ if (ret_val)
+ goto out;
+
+ /* if part supports SR-IOV then initialize mailbox parameters */
+ switch (mac->type) {
+ case e1000_82576:
+ case e1000_i350:
+ igb_init_mbx_params_pf(hw);
break;
default:
- return -E1000_ERR_PHY;
+ break;
}
- return 0;
+ /* setup PHY parameters */
+ ret_val = igb_init_phy_params_82575(hw);
+
+out:
+ return ret_val;
}
/**
@@ -2302,18 +2345,157 @@ out:
return ret_val;
}
+static const u8 e1000_emc_temp_data[4] = {
+ E1000_EMC_INTERNAL_DATA,
+ E1000_EMC_DIODE1_DATA,
+ E1000_EMC_DIODE2_DATA,
+ E1000_EMC_DIODE3_DATA
+};
+static const u8 e1000_emc_therm_limit[4] = {
+ E1000_EMC_INTERNAL_THERM_LIMIT,
+ E1000_EMC_DIODE1_THERM_LIMIT,
+ E1000_EMC_DIODE2_THERM_LIMIT,
+ E1000_EMC_DIODE3_THERM_LIMIT
+};
+
+/* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ * @hw: pointer to hardware structure
+ *
+ * Updates the temperatures in mac.thermal_sensor_data
+ */
+s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+ if (num_sensors > E1000_MAX_SENSORS)
+ num_sensors = E1000_MAX_SENSORS;
+
+ for (i = 1; i < num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+
+ if (sensor_location != 0)
+ hw->phy.ops.read_i2c_byte(hw,
+ e1000_emc_temp_data[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ &data->sensor[i].temp);
+ }
+ return status;
+}
+
+/* igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ * @hw: pointer to hardware structure
+ *
+ * Sets the thermal sensor thresholds according to the NVM map
+ * and save off the threshold and location values into mac.thermal_sensor_data
+ */
+s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+{
+ s32 status = E1000_SUCCESS;
+ u16 ets_offset;
+ u16 ets_cfg;
+ u16 ets_sensor;
+ u8 low_thresh_delta;
+ u8 num_sensors;
+ u8 sensor_index;
+ u8 sensor_location;
+ u8 therm_limit;
+ u8 i;
+ struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+ if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+ return E1000_NOT_IMPLEMENTED;
+
+ memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
+
+ data->sensor[0].location = 0x1;
+ data->sensor[0].caution_thresh =
+ (rd32(E1000_THHIGHTC) & 0xFF);
+ data->sensor[0].max_op_thresh =
+ (rd32(E1000_THLOWTC) & 0xFF);
+
+ /* Return the internal sensor only if ETS is unsupported */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+ return status;
+
+ hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+ if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+ != NVM_ETS_TYPE_EMC)
+ return E1000_NOT_IMPLEMENTED;
+
+ low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
+ NVM_ETS_LTHRES_DELTA_SHIFT);
+ num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+ for (i = 1; i <= num_sensors; i++) {
+ hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+ sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+ NVM_ETS_DATA_INDEX_SHIFT);
+ sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+ NVM_ETS_DATA_LOC_SHIFT);
+ therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
+
+ hw->phy.ops.write_i2c_byte(hw,
+ e1000_emc_therm_limit[sensor_index],
+ E1000_I2C_THERMAL_SENSOR_ADDR,
+ therm_limit);
+
+ if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
+ data->sensor[i].location = sensor_location;
+ data->sensor[i].caution_thresh = therm_limit;
+ data->sensor[i].max_op_thresh = therm_limit -
+ low_thresh_delta;
+ }
+ }
+ return status;
+}
+
static struct e1000_mac_operations e1000_mac_ops_82575 = {
.init_hw = igb_init_hw_82575,
.check_for_link = igb_check_for_link_82575,
.rar_set = igb_rar_set,
.read_mac_addr = igb_read_mac_addr_82575,
.get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+#ifdef CONFIG_IGB_HWMON
+ .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
+ .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
+#endif
};
static struct e1000_phy_operations e1000_phy_ops_82575 = {
.acquire = igb_acquire_phy_82575,
.get_cfg_done = igb_get_cfg_done_82575,
.release = igb_release_phy_82575,
+ .write_i2c_byte = igb_write_i2c_byte,
+ .read_i2c_byte = igb_read_i2c_byte,
};
static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 44b76b3b681..73ab41f0e03 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -32,6 +32,10 @@ extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_DEF1_DEF2 << 8) | \
@@ -260,5 +264,16 @@ void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_set_eee_i350(struct e1000_hw *);
-
+s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
+s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
+
+#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
+#define E1000_EMC_INTERNAL_DATA 0x00
+#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20
+#define E1000_EMC_DIODE1_DATA 0x01
+#define E1000_EMC_DIODE1_THERM_LIMIT 0x19
+#define E1000_EMC_DIODE2_DATA 0x23
+#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A
+#define E1000_EMC_DIODE3_DATA 0x2A
+#define E1000_EMC_DIODE3_THERM_LIMIT 0x30
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 45dce06eff2..7e13337d3b9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -470,6 +470,7 @@
#define E1000_ERR_NO_SPACE 17
#define E1000_ERR_NVM_PBA_SECTION 18
#define E1000_ERR_INVM_VALUE_NOT_FOUND 19
+#define E1000_ERR_I2C 20
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
@@ -674,6 +675,18 @@
#define NVM_COMB_VER_SHFT 8
#define NVM_VER_INVALID 0xFFFF
#define NVM_ETRACK_SHIFT 16
+#define NVM_ETS_CFG 0x003E
+#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
+#define NVM_ETS_LTHRES_DELTA_SHIFT 6
+#define NVM_ETS_TYPE_MASK 0x0038
+#define NVM_ETS_TYPE_SHIFT 3
+#define NVM_ETS_TYPE_EMC 0x000
+#define NVM_ETS_NUM_SENSORS_MASK 0x0007
+#define NVM_ETS_DATA_LOC_MASK 0x3C00
+#define NVM_ETS_DATA_LOC_SHIFT 10
+#define NVM_ETS_DATA_INDEX_MASK 0x0300
+#define NVM_ETS_DATA_INDEX_SHIFT 8
+#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index c2a51dcda55..0d5cf9c63d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -325,6 +325,10 @@ struct e1000_mac_operations {
s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
+#ifdef CONFIG_IGB_HWMON
+ s32 (*get_thermal_sensor_data)(struct e1000_hw *);
+ s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
+#endif
};
@@ -342,6 +346,8 @@ struct e1000_phy_operations {
s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+ s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
@@ -354,6 +360,19 @@ struct e1000_nvm_operations {
s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
+#define E1000_MAX_SENSORS 3
+
+struct e1000_thermal_diode_data {
+ u8 location;
+ u8 temp;
+ u8 caution_thresh;
+ u8 max_op_thresh;
+};
+
+struct e1000_thermal_sensor_data {
+ struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
+};
+
struct e1000_info {
s32 (*get_invariants)(struct e1000_hw *);
struct e1000_mac_operations *mac_ops;
@@ -399,6 +418,7 @@ struct e1000_mac_info {
bool report_tx_early;
bool serdes_has_link;
bool tx_pkt_filtering;
+ struct e1000_thermal_sensor_data thermal_sensor_data;
};
struct e1000_phy_info {
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index fbcdbebb0b5..6a42344f24f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 1c89358a99a..e4e1a73b7c7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 101e6e4da97..a5c7200b9a7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e2b2c4b9c95..e6d6ce43326 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 5988b8958ba..38e0df35090 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index dbcfa3d5cae..c13b56d9edb 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index fbb7604db36..5b62adbe134 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 7012d458c6f..6bfc0c43aac 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2012 Intel Corporation.
+ Copyright(c) 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index fe76004aca4..2918c979b5b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index ed282f877d9..784fd1c4098 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index e5db48594e8..15343286082 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -75,6 +75,14 @@
#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */
#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */
#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */
+#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */
/* IEEE 1588 TIMESYNCH */
#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
@@ -124,6 +132,14 @@
/* Split and Replication RX Control - RW */
#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT 0x08100 /* Junction Temperature */
+#define E1000_THLOWTC 0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC 0x0810C /* High Threshold Control */
+#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */
+
/*
* Convenience macros
*
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 17f1686ee41..d27edbc6392 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,8 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
struct igb_adapter;
@@ -137,8 +139,6 @@ struct vf_data_storage {
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
-/* How many Tx Descriptors do we need to call netif_wake_queue ? */
-#define IGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -167,6 +167,17 @@ enum igb_tx_flags {
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
+/*
+ * The largest size we can write to the descriptor is 65535. In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR 15
+#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct igb_tx_buffer {
@@ -219,6 +230,7 @@ struct igb_ring {
struct igb_tx_buffer *tx_buffer_info;
struct igb_rx_buffer *rx_buffer_info;
};
+ unsigned long last_rx_timestamp;
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
@@ -272,10 +284,18 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
@@ -301,6 +321,32 @@ static inline int igb_desc_unused(struct igb_ring *ring)
return ring->count + ring->next_to_clean - ring->next_to_use - 1;
}
+struct igb_i2c_client_list {
+ struct i2c_client *client;
+ struct igb_i2c_client_list *next;
+};
+
+#ifdef CONFIG_IGB_HWMON
+
+#define IGB_HWMON_TYPE_LOC 0
+#define IGB_HWMON_TYPE_TEMP 1
+#define IGB_HWMON_TYPE_CAUTION 2
+#define IGB_HWMON_TYPE_MAX 3
+
+struct hwmon_attr {
+ struct device_attribute dev_attr;
+ struct e1000_hw *hw;
+ struct e1000_thermal_diode_data *sensor;
+ char name[12];
+ };
+
+struct hwmon_buff {
+ struct device *device;
+ struct hwmon_attr *hwmon_list;
+ unsigned int n_hwmon;
+ };
+#endif
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -386,11 +432,22 @@ struct igb_adapter {
struct delayed_work ptp_overflow_work;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
+ unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ u32 tx_hwtstamp_timeouts;
+ u32 rx_hwtstamp_cleared;
char fw_version[32];
+#ifdef CONFIG_IGB_HWMON
+ struct hwmon_buff igb_hwmon_buff;
+ bool ets;
+#endif
+ struct i2c_algo_bit_data i2c_algo;
+ struct i2c_adapter i2c_adap;
+ struct igb_i2c_client_list *i2c_clients;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -449,6 +506,7 @@ extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
+extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb);
@@ -466,7 +524,10 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-
+#ifdef CONFIG_IGB_HWMON
+extern void igb_sysfs_exit(struct igb_adapter *adapter);
+extern int igb_sysfs_init(struct igb_adapter *adapter);
+#endif
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index bfe9208c4b1..a3830a8ba4c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -92,6 +92,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
IGB_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
IGB_STAT("os2bmc_tx_by_host", stats.o2bspc),
IGB_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ IGB_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGB_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
};
#define IGB_NETDEV_STAT(_net_stat) { \
@@ -1889,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg)
- msleep(4000);
+ msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1;
@@ -2272,12 +2274,21 @@ static int igb_get_ts_info(struct net_device *dev,
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
+ case e1000_82575:
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
case e1000_82576:
case e1000_82580:
case e1000_i350:
case e1000_i210:
case e1000_i211:
info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
new file mode 100644
index 00000000000..0a9b073d0b0
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -0,0 +1,242 @@
+/*******************************************************************************
+
+ Intel(R) Gigabit Ethernet Linux driver
+ Copyright(c) 2007-2013 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_IGB_HWMON
+/* hwmon callback functions */
+static ssize_t igb_hwmon_show_location(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ return sprintf(buf, "loc%u\n",
+ igb_attr->sensor->location);
+}
+
+static ssize_t igb_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value;
+
+ /* reset the temp field */
+ igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
+
+ value = igb_attr->sensor->temp;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->caution_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+ dev_attr);
+ unsigned int value = igb_attr->sensor->max_op_thresh;
+
+ /* display millidegree */
+ value *= 1000;
+
+ return sprintf(buf, "%u\n", value);
+}
+
+/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+ unsigned int offset, int type) {
+ int rc;
+ unsigned int n_attr;
+ struct hwmon_attr *igb_attr;
+
+ n_attr = adapter->igb_hwmon_buff.n_hwmon;
+ igb_attr = &adapter->igb_hwmon_buff.hwmon_list[n_attr];
+
+ switch (type) {
+ case IGB_HWMON_TYPE_LOC:
+ igb_attr->dev_attr.show = igb_hwmon_show_location;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_label", offset);
+ break;
+ case IGB_HWMON_TYPE_TEMP:
+ igb_attr->dev_attr.show = igb_hwmon_show_temp;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_input", offset);
+ break;
+ case IGB_HWMON_TYPE_CAUTION:
+ igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_max", offset);
+ break;
+ case IGB_HWMON_TYPE_MAX:
+ igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+ snprintf(igb_attr->name, sizeof(igb_attr->name),
+ "temp%u_crit", offset);
+ break;
+ default:
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* These always the same regardless of type */
+ igb_attr->sensor =
+ &adapter->hw.mac.thermal_sensor_data.sensor[offset];
+ igb_attr->hw = &adapter->hw;
+ igb_attr->dev_attr.store = NULL;
+ igb_attr->dev_attr.attr.mode = S_IRUGO;
+ igb_attr->dev_attr.attr.name = igb_attr->name;
+ sysfs_attr_init(&igb_attr->dev_attr.attr);
+ rc = device_create_file(&adapter->pdev->dev,
+ &igb_attr->dev_attr);
+ if (rc == 0)
+ ++adapter->igb_hwmon_buff.n_hwmon;
+
+ return rc;
+}
+
+static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+{
+ int i;
+
+ if (adapter == NULL)
+ return;
+
+ for (i = 0; i < adapter->igb_hwmon_buff.n_hwmon; i++) {
+ device_remove_file(&adapter->pdev->dev,
+ &adapter->igb_hwmon_buff.hwmon_list[i].dev_attr);
+ }
+
+ kfree(adapter->igb_hwmon_buff.hwmon_list);
+
+ if (adapter->igb_hwmon_buff.device)
+ hwmon_device_unregister(adapter->igb_hwmon_buff.device);
+}
+
+/* called from igb_main.c */
+void igb_sysfs_exit(struct igb_adapter *adapter)
+{
+ igb_sysfs_del_adapter(adapter);
+}
+
+/* called from igb_main.c */
+int igb_sysfs_init(struct igb_adapter *adapter)
+{
+ struct hwmon_buff *igb_hwmon = &adapter->igb_hwmon_buff;
+ unsigned int i;
+ int n_attrs;
+ int rc = 0;
+
+ /* If this method isn't defined we don't support thermals */
+ if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+ goto exit;
+
+ /* Don't create thermal hwmon interface if no sensors present */
+ rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+ if (rc)
+ goto exit;
+
+ /* Allocation space for max attributes
+ * max num sensors * values (loc, temp, max, caution)
+ */
+ n_attrs = E1000_MAX_SENSORS * 4;
+ igb_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
+ GFP_KERNEL);
+ if (!igb_hwmon->hwmon_list) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ igb_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
+ if (IS_ERR(igb_hwmon->device)) {
+ rc = PTR_ERR(igb_hwmon->device);
+ goto err;
+ }
+
+ for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+ /* Only create hwmon sysfs entries for sensors that have
+ * meaningful data.
+ */
+ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+ continue;
+
+ /* Bail if any hwmon attr struct fails to initialize */
+ rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+ rc |= igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+ if (rc)
+ goto err;
+ }
+
+ goto exit;
+
+err:
+ igb_sysfs_del_adapter(adapter);
+exit:
+ return rc;
+}
+#endif
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 31cfe2ec75d..ed79a1c53b5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2012 Intel Corporation.
+ Copyright(c) 2007-2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
+#include <linux/i2c.h>
#include "igb.h"
#define MAJ 4
@@ -68,7 +69,8 @@ char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
+static const char igb_copyright[] =
+ "Copyright (c) 2007-2013 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -193,6 +195,7 @@ static const struct dev_pm_ops igb_pm_ops = {
};
#endif
static void igb_shutdown(struct pci_dev *);
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
@@ -234,6 +237,7 @@ static struct pci_driver igb_driver = {
.driver.pm = &igb_pm_ops,
#endif
.shutdown = igb_shutdown,
+ .sriov_configure = igb_pci_sriov_configure,
.err_handler = &igb_err_handler
};
@@ -565,6 +569,91 @@ exit:
return;
}
+/* igb_get_i2c_data - Reads the I2C SDA data bit
+ * @hw: pointer to hardware structure
+ * @i2cctl: Current value of I2CCTL register
+ *
+ * Returns the I2C data bit value
+ */
+static int igb_get_i2c_data(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+}
+
+/* igb_set_i2c_data - Sets the I2C data bit
+ * @data: pointer to hardware structure
+ * @state: I2C data value (0 or 1) to set
+ *
+ * Sets the I2C data bit
+ */
+static void igb_set_i2c_data(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ if (state)
+ i2cctl |= E1000_I2C_DATA_OUT;
+ else
+ i2cctl &= ~E1000_I2C_DATA_OUT;
+
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
+ i2cctl |= E1000_I2C_CLK_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+
+}
+
+/* igb_set_i2c_clk - Sets the I2C SCL clock
+ * @data: pointer to hardware structure
+ * @state: state to set clock
+ *
+ * Sets the I2C clock line to state
+ */
+static void igb_set_i2c_clk(void *data, int state)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ if (state) {
+ i2cctl |= E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_CLK_OUT;
+ i2cctl &= ~E1000_I2C_CLK_OE_N;
+ }
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+}
+
+/* igb_get_i2c_clk - Gets the I2C SCL clock state
+ * @data: pointer to hardware structure
+ *
+ * Gets the I2C clock state
+ */
+static int igb_get_i2c_clk(void *data)
+{
+ struct igb_adapter *adapter = (struct igb_adapter *)data;
+ struct e1000_hw *hw = &adapter->hw;
+ s32 i2cctl = rd32(E1000_I2CPARAMS);
+
+ return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+}
+
+static const struct i2c_algo_bit_data igb_i2c_algo = {
+ .setsda = igb_set_i2c_data,
+ .setscl = igb_set_i2c_clk,
+ .getsda = igb_get_i2c_data,
+ .getscl = igb_get_i2c_clk,
+ .udelay = 5,
+ .timeout = 20,
+};
+
/**
* igb_get_hw_dev - return device
* used by hardware layer to print debugging information
@@ -1708,6 +1797,18 @@ void igb_reset(struct igb_adapter *adapter)
igb_force_mac_fc(hw);
igb_init_dmac(adapter, pba);
+#ifdef CONFIG_IGB_HWMON
+ /* Re-initialize the thermal sensor on i350 devices. */
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (mac->type == e1000_i350 && hw->bus.func == 0) {
+ /* If present, re-initialize the external thermal sensor
+ * interface.
+ */
+ if (adapter->ets)
+ mac->ops.init_thermal_sensor_thresh(hw);
+ }
+ }
+#endif
if (!netif_running(adapter->netdev))
igb_power_down_link(adapter);
@@ -1822,6 +1923,37 @@ void igb_set_fw_version(struct igb_adapter *adapter)
return;
}
+static const struct i2c_board_info i350_sensor_info = {
+ I2C_BOARD_INFO("i350bb", 0Xf8),
+};
+
+/* igb_init_i2c - Init I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static s32 igb_init_i2c(struct igb_adapter *adapter)
+{
+ s32 status = E1000_SUCCESS;
+
+ /* I2C interface supported on i350 devices */
+ if (adapter->hw.mac.type != e1000_i350)
+ return E1000_SUCCESS;
+
+ /* Initialize the i2c bus which is controlled by the registers.
+ * This bus will use the i2c_algo_bit structue that implements
+ * the protocol through toggling of the 4 bits in the register.
+ */
+ adapter->i2c_adap.owner = THIS_MODULE;
+ adapter->i2c_algo = igb_i2c_algo;
+ adapter->i2c_algo.data = adapter;
+ adapter->i2c_adap.algo_data = &adapter->i2c_algo;
+ adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
+ strlcpy(adapter->i2c_adap.name, "igb BB",
+ sizeof(adapter->i2c_adap.name));
+ status = i2c_bit_add_bus(&adapter->i2c_adap);
+ return status;
+}
+
/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -2022,9 +2154,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "NVM Read Error\n");
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -2115,6 +2246,13 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* reset the hardware with the new settings */
igb_reset(adapter);
+ /* Init the I2C interface */
+ err = igb_init_i2c(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init i2c interface\n");
+ goto err_eeprom;
+ }
+
/* let the f/w know that the h/w is now under the control of the
* driver. */
igb_get_hw_control(adapter);
@@ -2135,7 +2273,27 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
+#ifdef CONFIG_IGB_HWMON
+ /* Initialize the thermal sensor on i350 devices. */
+ if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
+ u16 ets_word;
+ /*
+ * Read the NVM to determine if this i350 device supports an
+ * external thermal sensor.
+ */
+ hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
+ if (ets_word != 0x0000 && ets_word != 0xFFFF)
+ adapter->ets = true;
+ else
+ adapter->ets = false;
+ if (igb_sysfs_init(adapter))
+ dev_err(&pdev->dev,
+ "failed to allocate sysfs resources\n");
+ } else {
+ adapter->ets = false;
+ }
+#endif
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
@@ -2176,6 +2334,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_register:
igb_release_hw_control(adapter);
+ memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
err_eeprom:
if (!igb_check_reset_block(hw))
igb_reset_phy(hw);
@@ -2196,6 +2355,111 @@ err_dma:
return err;
}
+#ifdef CONFIG_PCI_IOV
+static int igb_disable_sriov(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* reclaim resources allocated to VFs */
+ if (adapter->vf_data) {
+ /* disable iov and allow time for transactions to clear */
+ if (igb_vfs_are_assigned(adapter)) {
+ dev_warn(&pdev->dev,
+ "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
+ return -EPERM;
+ } else {
+ pci_disable_sriov(pdev);
+ msleep(500);
+ }
+
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+ wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
+ wrfl();
+ msleep(100);
+ dev_info(&pdev->dev, "IOV Disabled\n");
+
+ /* Re-enable DMA Coalescing flag since IOV is turned off */
+ adapter->flags |= IGB_FLAG_DMAC;
+ }
+
+ return 0;
+}
+
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int old_vfs = pci_num_vf(pdev);
+ int err = 0;
+ int i;
+
+ if (!num_vfs)
+ goto out;
+ else if (old_vfs && old_vfs == num_vfs)
+ goto out;
+ else if (old_vfs && old_vfs != num_vfs)
+ err = igb_disable_sriov(pdev);
+
+ if (err)
+ goto out;
+
+ if (num_vfs > 7) {
+ err = -EPERM;
+ goto out;
+ }
+
+ adapter->vfs_allocated_count = num_vfs;
+
+ adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+
+ /* if allocation failed then we do not support SR-IOV */
+ if (!adapter->vf_data) {
+ adapter->vfs_allocated_count = 0;
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for VF Data Storage\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+
+ dev_info(&pdev->dev, "%d VFs allocated\n",
+ adapter->vfs_allocated_count);
+ for (i = 0; i < adapter->vfs_allocated_count; i++)
+ igb_vf_configure(adapter, i);
+
+ /* DMA Coalescing is not supported in IOV mode. */
+ adapter->flags &= ~IGB_FLAG_DMAC;
+ goto out;
+
+err_out:
+ kfree(adapter->vf_data);
+ adapter->vf_data = NULL;
+ adapter->vfs_allocated_count = 0;
+out:
+ return err;
+}
+
+#endif
+/*
+ * igb_remove_i2c - Cleanup I2C interface
+ * @adapter: pointer to adapter structure
+ *
+ */
+static void igb_remove_i2c(struct igb_adapter *adapter)
+{
+
+ /* free the adapter bus structure */
+ i2c_del_adapter(&adapter->i2c_adap);
+}
+
/**
* igb_remove - Device Removal Routine
* @pdev: PCI device information struct
@@ -2212,8 +2476,11 @@ static void igb_remove(struct pci_dev *pdev)
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_HWMON
+ igb_sysfs_exit(adapter);
+#endif
+ igb_remove_i2c(adapter);
igb_ptp_stop(adapter);
-
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2243,23 +2510,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
- /* reclaim resources allocated to VFs */
- if (adapter->vf_data) {
- /* disable iov and allow time for transactions to clear */
- if (igb_vfs_are_assigned(adapter)) {
- dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
- } else {
- pci_disable_sriov(pdev);
- msleep(500);
- }
-
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
- wrfl();
- msleep(100);
- dev_info(&pdev->dev, "IOV Disabled\n");
- }
+ igb_disable_sriov(pdev);
#endif
iounmap(hw->hw_addr);
@@ -2290,103 +2541,22 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- int old_vfs = pci_num_vf(adapter->pdev);
- int i;
/* Virtualization features not supported on i210 family. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
return;
- if (old_vfs) {
- dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
- "max_vfs setting of %d\n", old_vfs, max_vfs);
- adapter->vfs_allocated_count = old_vfs;
- }
-
- if (!adapter->vfs_allocated_count)
- return;
-
- adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
- sizeof(struct vf_data_storage), GFP_KERNEL);
-
- /* if allocation failed then we do not support SR-IOV */
- if (!adapter->vf_data) {
- adapter->vfs_allocated_count = 0;
- dev_err(&pdev->dev, "Unable to allocate memory for VF "
- "Data Storage\n");
- goto out;
- }
-
- if (!old_vfs) {
- if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
- goto err_out;
- }
- dev_info(&pdev->dev, "%d VFs allocated\n",
- adapter->vfs_allocated_count);
- for (i = 0; i < adapter->vfs_allocated_count; i++)
- igb_vf_configure(adapter, i);
+ igb_enable_sriov(pdev, max_vfs);
+ pci_sriov_set_totalvfs(pdev, 7);
- /* DMA Coalescing is not supported in IOV mode. */
- adapter->flags &= ~IGB_FLAG_DMAC;
- goto out;
-err_out:
- kfree(adapter->vf_data);
- adapter->vf_data = NULL;
- adapter->vfs_allocated_count = 0;
-out:
- return;
#endif /* CONFIG_PCI_IOV */
}
-/**
- * igb_sw_init - Initialize general software structures (struct igb_adapter)
- * @adapter: board private structure to initialize
- *
- * igb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-static int igb_sw_init(struct igb_adapter *adapter)
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
u32 max_rss_queues;
- pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
-
- /* set default ring sizes */
- adapter->tx_ring_count = IGB_DEFAULT_TXD;
- adapter->rx_ring_count = IGB_DEFAULT_RXD;
-
- /* set default ITR values */
- adapter->rx_itr_setting = IGB_DEFAULT_ITR;
- adapter->tx_itr_setting = IGB_DEFAULT_ITR;
-
- /* set default work limits */
- adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
-
- adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
-
- spin_lock_init(&adapter->stats64_lock);
-#ifdef CONFIG_PCI_IOV
- switch (hw->mac.type) {
- case e1000_82576:
- case e1000_i350:
- if (max_vfs > 7) {
- dev_warn(&pdev->dev,
- "Maximum of 7 VFs per PF, using max\n");
- adapter->vfs_allocated_count = 7;
- } else
- adapter->vfs_allocated_count = max_vfs;
- break;
- default:
- break;
- }
-#endif /* CONFIG_PCI_IOV */
-
/* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
case e1000_i211:
@@ -2445,11 +2615,64 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
break;
}
+}
+
+/**
+ * igb_sw_init - Initialize general software structures (struct igb_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * igb_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int igb_sw_init(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IGB_DEFAULT_TXD;
+ adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+ /* set default ITR values */
+ adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+ adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+
+ /* set default work limits */
+ adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+ adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+ VLAN_HLEN;
+ adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+ spin_lock_init(&adapter->stats64_lock);
+#ifdef CONFIG_PCI_IOV
+ switch (hw->mac.type) {
+ case e1000_82576:
+ case e1000_i350:
+ if (max_vfs > 7) {
+ dev_warn(&pdev->dev,
+ "Maximum of 7 VFs per PF, using max\n");
+ adapter->vfs_allocated_count = 7;
+ } else
+ adapter->vfs_allocated_count = max_vfs;
+ if (adapter->vfs_allocated_count)
+ dev_warn(&pdev->dev,
+ "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
+ break;
+ default:
+ break;
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ igb_init_queue_configuration(adapter);
/* Setup and initialize a copy of the hw vlan table array */
- adapter->shadow_vfta = kzalloc(sizeof(u32) *
- E1000_VLAN_FILTER_TBL_SIZE,
- GFP_ATOMIC);
+ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+ GFP_ATOMIC);
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter, true)) {
@@ -3131,6 +3354,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+#define IGB_MAX_BUILD_SKB_SIZE \
+ (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
+ (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
+
+ /* set build_skb flag */
+ if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
+ set_ring_build_skb_enabled(rx_ring);
+ else
+ clear_ring_build_skb_enabled(rx_ring);
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3150,8 +3387,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -3768,6 +4008,7 @@ static void igb_watchdog_task(struct work_struct *work)
}
igb_spoof_check(adapter);
+ igb_ptp_rx_hang(adapter);
/* Reset the timer */
if (!test_bit(__IGB_DOWN, &adapter->state))
@@ -4193,13 +4434,6 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
-/*
- * The largest size we can write to the descriptor is 65535. In order to
- * maintain a power of two alignment we have to limit ourselves to 32K.
- */
-#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
-
static void igb_tx_map(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
const u8 hdr_len)
@@ -4368,15 +4602,25 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
u8 hdr_len = 0;
- /* need: 1 descriptor per page,
+ /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
- * + 1 desc for skb->data,
* + 1 desc for context descriptor,
- * otherwise try next time */
- if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
+ * otherwise try next time
+ */
+ if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
+ unsigned short f;
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+ } else {
+ count += skb_shinfo(skb)->nr_frags;
+ }
+
+ if (igb_maybe_stop_tx(tx_ring, count + 3)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
@@ -4387,12 +4631,15 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
+ skb_tx_timestamp(skb);
+
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IGB_TX_FLAGS_TSTAMP;
adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
}
@@ -4415,7 +4662,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
igb_tx_map(tx_ring, first, hdr_len);
/* Make sure there is space in the ring for the next send. */
- igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
@@ -4969,7 +5216,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
{
unsigned char mac_addr[ETH_ALEN];
- eth_random_addr(mac_addr);
+ eth_zero_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
return 0;
@@ -5322,9 +5569,9 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
{
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- /* generate a new mac address as we were hotplug removed/added */
+ /* clear mac address as we were hotplug removed/added */
if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- eth_random_addr(vf_mac);
+ eth_zero_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -5703,7 +5950,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
break;
/* prevent any other reads prior to eop_desc */
- rmb();
+ read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
@@ -5819,9 +6066,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(total_packets &&
netif_carrier_ok(tx_ring->netdev) &&
- igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
+ igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -5870,6 +6118,41 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
DMA_FROM_DEVICE);
}
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+ struct page *page,
+ unsigned int truesize)
+{
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /* since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
/**
* igb_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
@@ -5892,6 +6175,11 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
{
struct page *page = rx_buffer->page;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
unsigned char *va = page_address(page) + rx_buffer->page_offset;
@@ -5914,38 +6202,88 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+ rx_buffer->page_offset, size, truesize);
- /* avoid re-using remote pages */
- if (unlikely(page_to_nid(page) != numa_node_id()))
- return false;
+ return igb_can_reuse_rx_page(rx_buffer, page, truesize);
+}
+static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+ void *page_addr;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
- return false;
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(NET_SKB_PAD +
+ NET_IP_ALIGN +
+ size);
+#endif
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+ /* If we spanned a buffer we have a huge mess so test for it */
+ BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
- /*
- * since we are the only owner of the page and we need to
- * increment it, just set the value to 2 in order to avoid
- * an unnecessary locked operation
- */
- atomic_set(&page->_count, 2);
-#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+ /* Guarantee this function can be used by verifying buffer sizes */
+ BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
+ NET_IP_ALIGN +
+ IGB_TS_HDR_LEN +
+ ETH_FRAME_LEN +
+ ETH_FCS_LEN));
- if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
- return false;
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
- /* bump ref count on page before it is given to the stack */
- get_page(page);
+ page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif
- return true;
+ /* build an skb to around the page buffer */
+ skb = build_skb(page_addr, truesize);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
+ }
+
+ if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
}
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
@@ -5957,13 +6295,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
-
page = rx_buffer->page;
prefetchw(page);
@@ -6363,8 +6694,17 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
break;
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_rx_buffer(rx_ring, rx_desc);
+ else
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -6451,6 +6791,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ if (ring_uses_build_skb(rx_ring))
+ return NET_SKB_PAD + NET_IP_ALIGN;
+ else
+ return 0;
+}
+
/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -6477,7 +6825,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
+ bi->page_offset +
+ igb_rx_offset(rx_ring));
rx_desc++;
bi++;
@@ -6903,6 +7253,72 @@ static void igb_shutdown(struct pci_dev *pdev)
}
}
+#ifdef CONFIG_PCI_IOV
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ igb_init_queue_configuration(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ igb_open(netdev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int igb_pci_disable_sriov(struct pci_dev *dev)
+{
+ int err = igb_disable_sriov(dev);
+
+ if (!err)
+ err = igb_sriov_reinit(dev);
+
+ return err;
+}
+
+static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
+{
+ int err = igb_enable_sriov(dev, num_vfs);
+
+ if (err)
+ goto out;
+
+ err = igb_sriov_reinit(dev);
+ if (!err)
+ return num_vfs;
+
+out:
+ return err;
+}
+
+#endif
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ if (num_vfs == 0)
+ return igb_pci_disable_sriov(dev);
+ else
+ return igb_pci_enable_sriov(dev, num_vfs);
+#endif
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
@@ -7308,4 +7724,133 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
}
}
+static DEFINE_SPINLOCK(i2c_clients_lock);
+
+/* igb_get_i2c_client - returns matching client
+ * in adapters's client list.
+ * @adapter: adapter struct
+ * @dev_addr: device address of i2c needed.
+ */
+static struct i2c_client *
+igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
+{
+ ulong flags;
+ struct igb_i2c_client_list *client_list;
+ struct i2c_client *client = NULL;
+ struct i2c_board_info client_info = {
+ I2C_BOARD_INFO("igb", 0x00),
+ };
+
+ spin_lock_irqsave(&i2c_clients_lock, flags);
+ client_list = adapter->i2c_clients;
+
+ /* See if we already have an i2c_client */
+ while (client_list) {
+ if (client_list->client->addr == (dev_addr >> 1)) {
+ client = client_list->client;
+ goto exit;
+ } else {
+ client_list = client_list->next;
+ }
+ }
+
+ /* no client_list found, create a new one */
+ client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
+ if (client_list == NULL)
+ goto exit;
+
+ /* dev_addr passed to us is left-shifted by 1 bit
+ * i2c_new_device call expects it to be flush to the right.
+ */
+ client_info.addr = dev_addr >> 1;
+ client_info.platform_data = adapter;
+ client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
+ if (client_list->client == NULL) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to create new i2c device..\n");
+ goto err_no_client;
+ }
+
+ /* insert new client at head of list */
+ client_list->next = adapter->i2c_clients;
+ adapter->i2c_clients = client_list;
+
+ client = client_list->client;
+ goto exit;
+
+err_no_client:
+ kfree(client_list);
+exit:
+ spin_unlock_irqrestore(&i2c_clients_lock, flags);
+ return client;
+}
+
+/* igb_read_i2c_byte - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @dev_addr: device address
+ * @data: value read
+ *
+ * Performs byte read operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ s32 status;
+ u16 swfw_mask = 0;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+ != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+
+ status = i2c_smbus_read_byte_data(this_client, byte_offset);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status < 0)
+ return E1000_ERR_I2C;
+ else {
+ *data = status;
+ return E1000_SUCCESS;
+ }
+}
+
+/* igb_write_i2c_byte - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @dev_addr: device address
+ * @data: value to write
+ *
+ * Performs byte write operation over I2C interface at
+ * a specified device address.
+ */
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+ struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
+ s32 status;
+ u16 swfw_mask = E1000_SWFW_PHY0_SM;
+
+ if (!this_client)
+ return E1000_ERR_I2C;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ return E1000_ERR_SWFW_SYNC;
+ status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ if (status)
+ return E1000_ERR_I2C;
+ else
+ return E1000_SUCCESS;
+
+}
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab3429729bd..0987822359f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
+#include <linux/ptp_classify.h>
#include "igb.h"
@@ -70,6 +71,7 @@
*/
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
+#define IGB_PTP_TX_TIMEOUT (HZ * 15)
#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
@@ -396,6 +398,15 @@ void igb_ptp_tx_work(struct work_struct *work)
if (!adapter->ptp_tx_skb)
return;
+ if (time_is_before_jiffies(adapter->ptp_tx_start +
+ IGB_PTP_TX_TIMEOUT)) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ adapter->tx_hwtstamp_timeouts++;
+ dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
+ return;
+ }
+
tsynctxctl = rd32(E1000_TSYNCTXCTL);
if (tsynctxctl & E1000_TSYNCTXCTL_VALID)
igb_ptp_tx_hwtstamp(adapter);
@@ -419,6 +430,51 @@ static void igb_ptp_overflow_check(struct work_struct *work)
}
/**
+ * igb_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
+ *
+ * This watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
+ */
+void igb_ptp_rx_hang(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *rx_ring;
+ u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
+
+ if (hw->mac.type != e1000_82576)
+ return;
+
+ /* If we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & E1000_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
+ }
+
+ /* Determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
+
+ /* Only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5 * HZ)) {
+ rd32(E1000_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
+ adapter->rx_hwtstamp_cleared++;
+ dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
+ }
+}
+
+/**
* igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @adapter: Board private structure.
*
@@ -643,7 +699,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
else
wr32(E1000_ETQF(3), 0);
-#define PTP_PORT 319
/* L4 Queue Filter[3]: filter by destination port and protocol */
if (is_l4) {
u32 ftqf = (IPPROTO_UDP /* UDP */
@@ -652,12 +707,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
| E1000_FTQF_MASK); /* mask all inputs */
ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
- wr32(E1000_IMIR(3), htons(PTP_PORT));
+ wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
wr32(E1000_IMIREXT(3),
(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
if (hw->mac.type == e1000_82576) {
/* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_PORT));
+ wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
}
wr32(E1000_FTQF(3), ftqf);
@@ -801,6 +856,10 @@ void igb_ptp_stop(struct igb_adapter *adapter)
}
cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fdca7b67277..a1463e3d14c 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */
struct {
unsigned long time_stamp;
+ union e1000_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
/* Rx */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 277f5dfe3d9..d60cd439341 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
bool cleaned = false;
i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+ buffer_info = &tx_ring->buffer_info[i];
+ eop_desc = buffer_info->next_to_watch;
+
+ do {
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ buffer_info->next_to_watch = NULL;
- while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = buffer_info->skb;
if (skb) {
@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++;
if (i == tx_ring->count)
i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
}
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
- }
+
+ eop_desc = buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -1399,12 +1412,10 @@ static void igbvf_set_multi(struct net_device *netdev)
int i;
if (!netdev_mc_empty(netdev)) {
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list) {
- dev_err(&adapter->pdev->dev,
- "failed to allocate multicast filter list\n");
+ mta_list = kmalloc_array(netdev_mc_count(netdev), ETH_ALEN,
+ GFP_ATOMIC);
+ if (!mta_list)
return;
- }
}
/* prepare a packed array of only addresses. */
@@ -1738,7 +1749,6 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -1964,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2024,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2064,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb,
- unsigned int first)
+ struct sk_buff *skb)
{
struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
@@ -2080,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE);
@@ -2103,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
@@ -2112,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@@ -2123,7 +2127,6 @@ dma_error:
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
if (count)
count--;
@@ -2142,7 +2145,8 @@ dma_error:
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
+ int tx_flags, int count,
+ unsigned int first, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc = NULL;
@@ -2192,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */
wmb();
+ tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2258,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue
*/
- count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+ count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
- skb->len, hdr_len);
+ first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else {
@@ -2736,30 +2741,24 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address."
- " Is the PF interface up?\n");
- eth_hw_addr_random(netdev);
- memcpy(adapter->hw.mac.addr, netdev->dev_addr,
- netdev->addr_len);
+ "PF still in reset state. Is the PF interface up?\n");
} else {
err = hw->mac.ops.read_mac_addr(hw);
- if (err) {
- dev_err(&pdev->dev, "Error reading MAC address\n");
- goto err_hw_init;
- }
+ if (err)
+ dev_info(&pdev->dev, "Error reading MAC address.\n");
+ else if (is_zero_ether_addr(adapter->hw.mac.addr))
+ dev_info(&pdev->dev, "MAC address not assigned by administrator.\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ netdev->addr_len);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
- netdev->dev_addr);
- err = -EIO;
- goto err_hw_init;
+ dev_info(&pdev->dev, "Assigning random MAC address.\n");
+ eth_hw_addr_random(netdev);
+ memcpy(adapter->hw.mac.addr, netdev->dev_addr,
+ netdev->addr_len);
}
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
-
setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
(unsigned long) adapter);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index ae96c10251b..ea480837343 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -500,9 +500,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
@@ -709,11 +708,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate transmit descriptor ring memory\n");
+ if (!txdr->buffer_info)
return -ENOMEM;
- }
/* round up to nearest 4K */
@@ -798,11 +794,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info) {
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate receive descriptor ring\n");
+ if (!rxdr->buffer_info)
return -ENOMEM;
- }
/* Round up to nearest 4K */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 687c83d1bda..be2989e6000 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2012 Intel Corporation.
+# Copyright(c) 1999 - 2013 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 8e786764c60..a8e10cff7a8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -35,6 +35,7 @@
#include <linux/cpumask.h>
#include <linux/aer.h>
#include <linux/if_vlan.h>
+#include <linux/jiffies.h>
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
@@ -91,21 +92,26 @@
*/
#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
-#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
-#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
-#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
-#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
-#define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
-#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9)
+enum ixgbe_tx_flags {
+ /* cmd_type flags */
+ IXGBE_TX_FLAGS_HW_VLAN = 0x01,
+ IXGBE_TX_FLAGS_TSO = 0x02,
+ IXGBE_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IXGBE_TX_FLAGS_CC = 0x08,
+ IXGBE_TX_FLAGS_IPV4 = 0x10,
+ IXGBE_TX_FLAGS_CSUM = 0x20,
+
+ /* software defined flags */
+ IXGBE_TX_FLAGS_SW_VLAN = 0x40,
+ IXGBE_TX_FLAGS_FCOE = 0x80,
+};
+
+/* VLAN info */
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -150,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@@ -195,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
@@ -224,6 +231,7 @@ struct ixgbe_ring {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
+ unsigned long last_rx_timestamp;
unsigned long state;
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -271,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
-#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
-#endif /* IXGBE_FCOE */
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -573,11 +576,14 @@ struct ixgbe_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
+ struct work_struct ptp_tx_work;
+ struct sk_buff *ptp_tx_skb;
+ unsigned long ptp_tx_start;
unsigned long last_overflow_check;
+ unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
- int rx_hwtstamp_filter;
u32 base_incval;
/* SR-IOV */
@@ -614,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
+ __IXGBE_READ_I2C,
};
struct ixgbe_cb {
@@ -694,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
@@ -742,15 +749,32 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb);
-extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
+extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb);
+static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
+ return;
+
+ __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+
+ /*
+ * Update the last_rx_timestamp timer in order to enable watchdog check
+ * for error case of latched timestamp on a dropped packet.
+ */
+ rx_ring->last_rx_timestamp = jiffies;
+}
+
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
+#endif
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 42537336110..d0113fc97b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,7 +41,6 @@
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
@@ -633,15 +632,15 @@ out:
* ixgbe_setup_mac_link_82598 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if auto-negotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
+ bool autoneg = false;
s32 status = 0;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -685,20 +684,18 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true if waiting is needed to complete
*
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -1006,15 +1003,16 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
}
/**
- * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
* @hw: pointer to hardware structure
- * @byte_offset: EEPROM byte offset to read
+ * @dev_addr: address to read from
+ * @byte_offset: byte offset to read from dev_addr
* @eeprom_data: value read
*
- * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ * Performs 8 byte read operation to SFP module's data over I2C interface.
**/
-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+ u8 byte_offset, u8 *eeprom_data)
{
s32 status = 0;
u16 sfp_addr = 0;
@@ -1028,7 +1026,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
* 0xC30D. These registers are used to talk to the SFP+
* module's EEPROM through the SDA/SCL (I2C) interface.
*/
- sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+ sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
hw->phy.ops.write_reg(hw,
IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
@@ -1060,7 +1058,6 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
*eeprom_data = (u8)(sfp_data >> 8);
} else {
status = IXGBE_ERR_PHY;
- goto out;
}
out:
@@ -1068,6 +1065,36 @@ out:
}
/**
+ * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, eeprom_data);
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, sff8472_data);
+}
+
+/**
* ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
* @hw: pointer to hardware structure
*
@@ -1300,6 +1327,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.write_reg = &ixgbe_write_phy_reg_generic,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598,
.check_overtemp = &ixgbe_tn_check_overtemp,
};
@@ -1311,4 +1339,3 @@ struct ixgbe_info ixgbe_82598_info = {
.eeprom_ops = &eeprom_ops_82598,
.phy_ops = &phy_ops_82598,
};
-
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1073aea5da4..203a00c2433 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -45,21 +45,17 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
@@ -234,13 +230,13 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
* ixgbe_get_link_capabilities_82599 - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
- * @negotiation: true when autoneg or autotry is enabled
+ * @autoneg: true when autoneg or autotry is enabled
*
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
- bool *negotiation)
+ bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
@@ -251,7 +247,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
goto out;
}
@@ -268,22 +264,22 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
switch (autoc & IXGBE_AUTOC_LMS_MASK) {
case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_1G_AN:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_10G_SERIAL:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR:
@@ -295,7 +291,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
@@ -306,12 +302,12 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
*speed |= IXGBE_LINK_SPEED_10GB_FULL;
if (autoc & IXGBE_AUTOC_KX_SUPP)
*speed |= IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
break;
case IXGBE_AUTOC_LMS_SGMII_1G_100M:
*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
- *negotiation = false;
+ *autoneg = false;
break;
default:
@@ -323,7 +319,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
- *negotiation = true;
+ *autoneg = true;
}
out:
@@ -510,14 +506,12 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status = 0;
@@ -527,11 +521,11 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
u32 i = 0;
bool link_up = false;
- bool negotiation;
+ bool autoneg = false;
/* Mask off requested but non-supported speeds */
status = hw->mac.ops.get_link_capabilities(hw, &link_speed,
- &negotiation);
+ &autoneg);
if (status != 0)
return status;
@@ -564,7 +558,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_10GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -617,7 +610,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
status = ixgbe_setup_mac_link_82599(hw,
IXGBE_LINK_SPEED_1GB_FULL,
- autoneg,
autoneg_wait_to_complete);
if (status != 0)
return status;
@@ -646,7 +638,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
highest_link_speed,
- autoneg,
autoneg_wait_to_complete);
out:
@@ -666,13 +657,12 @@ out:
* ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Implements the Intel SmartSpeed algorithm.
**/
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
+ ixgbe_link_speed speed,
bool autoneg_wait_to_complete)
{
s32 status = 0;
@@ -703,7 +693,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* First, try to get link with full advertisement */
hw->phy.smart_speed_active = false;
for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != 0)
goto out;
@@ -738,7 +728,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* Turn SmartSpeed on to disable KR support */
hw->phy.smart_speed_active = true;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
if (status != 0)
goto out;
@@ -764,7 +754,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
/* We didn't get link. Turn SmartSpeed back off. */
hw->phy.smart_speed_active = false;
- status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+ status = ixgbe_setup_mac_link_82599(hw, speed,
autoneg_wait_to_complete);
out:
@@ -778,14 +768,13 @@ out:
* ixgbe_setup_mac_link_82599 - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
*
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status = 0;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -799,6 +788,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
bool got_lock = false;
+ bool autoneg = false;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -911,20 +901,18 @@ out:
* ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true if waiting is needed to complete
*
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
- status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ status = hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -2253,6 +2241,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 5e68afdd502..99e472ebaa7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f7a0970a251..bc3948ead6e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 9bc17c0cb97..1f2c805684d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 1f4108ee154..1634de8b627 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 87592b458c9..ac780770863 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index ba835708fca..3164f5453b8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 4eac80d0185..05e23b80b5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index 4dec47faeb0..a4ef07631d1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index f1e002d5fa8..f3d68f9696b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
#include <linux/dcbnl.h>
#include "ixgbe_dcb_82598.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
#define BIT_DCB_MODE 0x01
@@ -301,7 +302,6 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
-#ifdef IXGBE_FCOE
static void ixgbe_dcbnl_devreset(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -320,7 +320,6 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev)
clear_bit(__IXGBE_RESETTING, &adapter->state);
}
-#endif
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
@@ -450,7 +449,6 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 0;
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
switch (tcid) {
@@ -461,14 +459,14 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
*num = adapter->dcb_cfg.num_tcs.pfc_tcs;
break;
default:
- rval = -EINVAL;
+ return -EINVAL;
break;
}
} else {
- rval = -EINVAL;
+ return -EINVAL;
}
- return rval;
+ return 0;
}
static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
@@ -541,6 +539,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i, err = 0;
__u8 max_tc = 0;
+ __u8 map_chg = 0;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
@@ -550,15 +549,22 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
GFP_KERNEL);
if (!adapter->ixgbe_ieee_ets)
return -ENOMEM;
- }
- memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+ /* initialize UP2TC mappings to invalid value */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ adapter->ixgbe_ieee_ets->prio_tc[i] =
+ IEEE_8021QAZ_MAX_TCS;
+ }
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
+ if (ets->prio_tc[i] != adapter->ixgbe_ieee_ets->prio_tc[i])
+ map_chg = 1;
}
+ memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
if (max_tc)
max_tc++;
@@ -567,6 +573,8 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (max_tc != netdev_get_num_tc(dev))
err = ixgbe_setup_tc(dev, max_tc);
+ else if (map_chg)
+ ixgbe_dcbnl_devreset(dev);
if (err)
goto err_out;
@@ -643,9 +651,11 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
return err;
err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
#ifdef IXGBE_FCOE
- if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
@@ -656,6 +666,23 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
ixgbe_dcbnl_devreset(dev);
}
#endif
+
+ /* VF devices should use default UP when available */
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == 0) {
+ int vf;
+
+ adapter->default_up = app->priority;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+
+ if (!vfinfo->pf_qos)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ app->priority, vf);
+ }
+ }
+
return 0;
}
@@ -683,6 +710,24 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
ixgbe_dcbnl_devreset(dev);
}
#endif
+ /* IF default priority is being removed clear VF default UP */
+ if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == 0 && adapter->default_up == app->priority) {
+ int vf;
+ long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app);
+ int qos = app_mask ? find_first_bit(&app_mask, 8) : 0;
+
+ adapter->default_up = qos;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+
+ if (!vfinfo->pf_qos)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ qos, vf);
+ }
+ }
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 3504686d3af..c5933f6dcee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 32685842434..f4d2e9e3c6d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include "ixgbe.h"
+#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16
@@ -156,7 +157,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
ixgbe_link_speed supported_link;
u32 link_speed = 0;
- bool autoneg;
+ bool autoneg = false;
bool link_up;
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
@@ -333,10 +334,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
return err;
/* this sets the link speed and restarts auto-neg */
hw->mac.autotry_restart = true;
- err = hw->mac.ops.setup_link(hw, advertised, true, true);
+ err = hw->mac.ops.setup_link(hw, advertised, true);
if (err) {
e_info(probe, "setup link failed with code %d\n", err);
- hw->mac.ops.setup_link(hw, old, true, true);
+ hw->mac.ops.setup_link(hw, old, true);
}
} else {
/* in this case we currently only support 10Gb/FULL */
@@ -1040,6 +1041,9 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
p = (char *) adapter +
ixgbe_gstrings_stats[i].stat_offset;
break;
+ default:
+ data[i] = 0;
+ continue;
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
@@ -1096,8 +1100,10 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
- memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ for (i = 0; i < IXGBE_TEST_LEN; i++) {
+ memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
@@ -1837,19 +1843,11 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
- /* Offline tests */
-
- e_info(hw, "offline testing starting\n");
-
- /* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
- if (ixgbe_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
-
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
int i;
for (i = 0; i < adapter->num_vfs; i++) {
@@ -1870,12 +1868,24 @@ static void ixgbe_diag_test(struct net_device *netdev,
}
}
+ /* Offline tests */
+ e_info(hw, "offline testing starting\n");
+
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
- else
- ixgbe_reset(adapter);
+ /* bringing adapter down disables SFP+ optics */
+ if (hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result
+ */
+ if (ixgbe_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbe_reset(adapter);
e_info(hw, "register testing starting\n");
if (ixgbe_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1908,16 +1918,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
skip_loopback:
ixgbe_reset(adapter);
+ /* clear testing bit and return adapter to previous state */
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
} else {
e_info(hw, "online testing starting\n");
+
+ /* if adapter is down, SFP+ optics will be disabled */
+ if (!if_running && hw->mac.ops.enable_tx_laser)
+ hw->mac.ops.enable_tx_laser(hw);
+
/* Online tests */
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- /* Online tests aren't run; pass by default */
+ /* Offline tests aren't run; pass by default */
data[0] = 0;
data[1] = 0;
data[2] = 0;
@@ -1925,6 +1941,10 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
+
+ /* if adapter was down, ensure SFP+ optics are disabled again */
+ if (!if_running && hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
skip_ol_tests:
msleep_interruptible(4 * 1000);
}
@@ -2093,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- u16 tx_itr_param, rx_itr_param;
+ u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false;
- /* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
- return -EINVAL;
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
+ /* reject Tx specific changes in case of mixed RxTx vectors */
+ if (ec->tx_coalesce_usecs)
+ return -EINVAL;
+ tx_itr_prev = adapter->rx_itr_setting;
+ } else {
+ tx_itr_prev = adapter->tx_itr_setting;
+ }
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@@ -2125,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
+ /* mixed Rx/Tx */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+
+#if IS_ENABLED(CONFIG_BQL)
+ /* detect ITR changes that require update of TXDCTL.WTHRESH */
+ if ((adapter->tx_itr_setting > 1) &&
+ (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
+ if ((tx_itr_prev == 1) ||
+ (tx_itr_prev > IXGBE_100K_ITR))
+ need_reset = true;
+ } else {
+ if ((tx_itr_prev > 1) &&
+ (tx_itr_prev < IXGBE_100K_ITR))
+ need_reset = true;
+ }
+#endif
/* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter);
+ need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
@@ -2695,6 +2736,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
@@ -2704,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0;
}
+static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
+{
+ unsigned int max_combined;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /* We only support one q_vector without MSI-X */
+ max_combined = 1;
+ } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ /* SR-IOV currently only allows one queue on the PF */
+ max_combined = 1;
+ } else if (tcs > 1) {
+ /* For DCB report channels per traffic class */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ max_combined = 4;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ max_combined = 8;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ max_combined = 16;
+ }
+ } else if (adapter->atr_sample_rate) {
+ /* support up to 64 queues with ATR */
+ max_combined = IXGBE_MAX_FDIR_INDICES;
+ } else {
+ /* support up to 16 queues with RSS */
+ max_combined = IXGBE_MAX_RSS_INDICES;
+ }
+
+ return max_combined;
+}
+
+static void ixgbe_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = ixgbe_max_channels(adapter);
+
+ /* report info for other vector */
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ /* record RSS queues */
+ ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+ /* nothing else to report if RSS is disabled */
+ if (ch->combined_count == 1)
+ return;
+
+ /* we do not support ATR queueing if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return;
+
+ /* same thing goes for being DCB enabled */
+ if (netdev_get_num_tc(dev) > 1)
+ return;
+
+ /* if ATR is disabled we can exit */
+ if (!adapter->atr_sample_rate)
+ return;
+
+ /* report flow director queues as maximum channels */
+ ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
+}
+
+static int ixgbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > ixgbe_max_channels(adapter))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ adapter->ring_feature[RING_F_FDIR].limit = count;
+
+ /* cap RSS limit at 16 */
+ if (count > IXGBE_MAX_RSS_INDICES)
+ count = IXGBE_MAX_RSS_INDICES;
+ adapter->ring_feature[RING_F_RSS].limit = count;
+
+#ifdef IXGBE_FCOE
+ /* cap FCoE limit at 8 */
+ if (count > IXGBE_FCRETA_SIZE)
+ count = IXGBE_FCRETA_SIZE;
+ adapter->ring_feature[RING_F_FCOE].limit = count;
+
+#endif
+ /* use setup TC to update any traffic class queue mapping */
+ return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+static int ixgbe_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+ u8 sff8472_rev, addr_mode;
+ int ret_val = 0;
+ bool page_swap = false;
+
+ /* avoid concurent i2c reads */
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+
+ /* used by the service task */
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ return ret_val;
+}
+
+static int ixgbe_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u8 databyte = 0xFF;
+ int i = 0;
+ int ret_val = 0;
+
+ /* ixgbe_get_module_info is called before this function in all
+ * cases, so we do not need any checks we already do above,
+ * and can trust ee->len to be a known value.
+ */
+
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Read the first block, SFF-8079 */
+ for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+
+ /* If the second block is requested, check if SFF-8472 is supported. */
+ if (ee->len == ETH_MODULE_SFF_8472_LEN) {
+ if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
+ return -EOPNOTSUPP;
+
+ /* Read the second block, SFF-8472 */
+ for (i = ETH_MODULE_SFF_8079_LEN;
+ i < ETH_MODULE_SFF_8472_LEN; i++) {
+ status = hw->phy.ops.read_i2c_sff8472(hw,
+ i - ETH_MODULE_SFF_8079_LEN, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ return ret_val;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -2732,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 252850d9a3e..f58db453a97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -544,15 +544,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
- first->tx_flags |= IXGBE_TX_FLAGS_FSO;
+ first->tx_flags |= IXGBE_TX_FLAGS_TSO;
}
/* set flag indicating FCOE to ixgbe_tx_map call */
- first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
+ first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
- /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
+ /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_transport_offset(skb) +
@@ -717,10 +716,8 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
/* Extra buffer to be shared by all DDPs for HW work around */
buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
- if (!buffer) {
- e_err(drv, "failed to allocate extra DDP buffer\n");
+ if (!buffer)
return -ENOMEM;
- }
dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dma)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index bf724da9937..3a02759b5e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 8c74f739011..ef5f7a678ce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) {
@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) {
- /* reserve no more than number of CPUs */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
-
/* alloc queues for FCoE separately */
fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i;
@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
- f->indices = min_t(u16, num_online_cpus(), f->limit);
- rss_i = max_t(u16, rss_i, f->indices);
+ rss_i = f->indices = f->limit;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = -1;
+ int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
+ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ if (cpu_online(v_idx)) {
+ cpu = v_idx;
+ node = cpu_to_node(cpu);
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index b3e3294cfe5..68478d6dfa2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -66,7 +66,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "3.11.33-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2012 Intel Corporation.";
+ "Copyright (c) 1999-2013 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -803,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ e_warn(drv, "initiating reset due to tx timeout\n");
ixgbe_service_event_schedule(adapter);
}
}
@@ -837,7 +838,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
break;
/* prevent any other reads prior to eop_desc */
- rmb();
+ read_barrier_depends();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
@@ -850,9 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
- if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
- ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -1442,7 +1440,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
+ ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -2181,10 +2179,10 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
return;
if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
- u32 autoneg;
+ u32 speed;
bool link_up = false;
- hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (link_up)
return;
@@ -2788,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/*
* set WTHRESH to encourage burst writeback, it should not be set
- * higher than 1 when ITR is 0 as it could cause false TX hangs
+ * higher than 1 when:
+ * - ITR is 0 as it could cause false TX hangs
+ * - ITR is set to > 100k int/sec and BQL is enabled
*
* In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
+#if IS_ENABLED(CONFIG_BQL)
+ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
+#else
if (!ring->q_vector || (ring->q_vector->itr < 8))
+#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
@@ -2815,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0;
}
+ /* initialize XPS */
+ if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+
+ if (q_vector)
+ netif_set_xps_queue(adapter->netdev,
+ &q_vector->affinity_mask,
+ ring->queue_index);
+ }
+
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
@@ -3997,25 +4011,25 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
**/
static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
{
- u32 autoneg;
- bool negotiation, link_up = false;
+ u32 speed;
+ bool autoneg, link_up = false;
u32 ret = IXGBE_ERR_LINK_SETUP;
if (hw->mac.ops.check_link)
- ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
+ ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (ret)
goto link_cfg_out;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
- &negotiation);
+ speed = hw->phy.autoneg_advertised;
+ if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ ret = hw->mac.ops.get_link_capabilities(hw, &speed,
+ &autoneg);
if (ret)
goto link_cfg_out;
if (hw->mac.ops.setup_link)
- ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
+ ret = hw->mac.ops.setup_link(hw, speed, link_up);
link_cfg_out:
return ret;
}
@@ -4467,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- unsigned int rss;
+ unsigned int rss, fdir;
u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -4482,38 +4496,57 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
- /* Set capability flags */
+ /* Set common capability flags and settings */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].limit = rss;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
+ adapter->atr_sample_rate = 20;
+ fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_FDIR].limit = fdir;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
+#ifdef CONFIG_IXGBE_DCA
+ adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ /* Default traffic class to use for FCoE */
+ adapter->fcoe.up = IXGBE_FCOE_DEFTC;
+#endif /* CONFIG_IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+
+ /* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
+ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
+
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
+ adapter->ring_feature[RING_F_FDIR].limit = 0;
+ adapter->atr_sample_rate = 0;
+ adapter->fdir_pballoc = 0;
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ break;
+ case ixgbe_mac_82599EB:
+ if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X540:
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
if (fwsm & IXGBE_FWSM_TS_ENABLED)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- case ixgbe_mac_82599EB:
- adapter->max_q_vectors = MAX_Q_VECTORS_82599;
- adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- /* Flow Director hash filters enabled */
- adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].limit =
- IXGBE_MAX_FDIR_INDICES;
- adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
-#ifdef IXGBE_FCOE
- adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-#ifdef CONFIG_IXGBE_DCB
- /* Default traffic class to use for FCoE */
- adapter->fcoe.up = IXGBE_FCOE_DEFTC;
-#endif
-#endif /* IXGBE_FCOE */
break;
default:
break;
@@ -4872,7 +4905,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
- (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5535,6 +5568,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
break;
}
+ adapter->last_rx_ptp_check = jiffies;
+
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
ixgbe_ptp_start_cyclecounter(adapter);
@@ -5615,6 +5650,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
*/
+ e_warn(drv, "initiating reset to clear Tx work after link loss\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
}
}
@@ -5679,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
+ /* concurent i2c reads are not supported */
+ if (test_bit(__IXGBE_READ_I2C, &adapter->state))
+ return;
+
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -5739,8 +5779,8 @@ sfp_out:
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg;
- bool negotiation;
+ u32 speed;
+ bool autoneg = false;
if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
return;
@@ -5751,11 +5791,11 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ speed = hw->phy.autoneg_advertised;
+ if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
if (hw->mac.ops.setup_link)
- hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
+ hw->mac.ops.setup_link(hw, speed, true);
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
@@ -5879,7 +5919,6 @@ static void ixgbe_service_task(struct work_struct *work)
struct ixgbe_adapter *adapter = container_of(work,
struct ixgbe_adapter,
service_task);
-
ixgbe_reset_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
ixgbe_sfp_link_config_subtask(adapter);
@@ -5887,7 +5926,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_watchdog_subtask(adapter);
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
- ixgbe_ptp_overflow_check(adapter);
+
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+ ixgbe_ptp_overflow_check(adapter);
+ ixgbe_ptp_rx_hang(adapter);
+ }
ixgbe_service_event_complete(adapter);
}
@@ -5900,6 +5943,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -5942,10 +5988,9 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
- /* mss_l4len_id: use 1 as index for TSO */
+ /* mss_l4len_id: use 0 as index for TSO */
mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb);
@@ -5967,12 +6012,9 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) {
- if (unlikely(skb->no_fcs))
- first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS;
- if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
- return;
- }
+ if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
+ !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+ return;
} else {
u8 l4_hdr = 0;
switch (first->protocol) {
@@ -6030,30 +6072,32 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
type_tucmd, mss_l4len_idx);
}
-static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
+#define IXGBE_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
+ IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-
- if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
+ IXGBE_ADVTXD_DCMD_VLE);
/* set segmentation enable bits for TSO/FSO */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FSO))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
+ IXGBE_ADVTXD_DCMD_TSE);
+
+ /* set timestamp bit if present */
+ cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
+ IXGBE_ADVTXD_MAC_TSTAMP);
/* insert frame checksum */
- if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS))
- cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS);
+ cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -6061,36 +6105,27 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
u32 tx_flags, unsigned int paylen)
{
- __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
/* enable L4 checksum for TSO and TX checksum offload */
- if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CSUM,
+ IXGBE_ADVTXD_POPTS_TXSM);
/* enble IPv4 checksum for TSO */
- if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
-
- /* use index 1 context for TSO/FSO/FCOE */
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TSO)
-#endif
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_IPV4,
+ IXGBE_ADVTXD_POPTS_IXSM);
/*
* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
*/
-#ifdef IXGBE_FCOE
- if (tx_flags & (IXGBE_TX_FLAGS_TXSW | IXGBE_TX_FLAGS_FCOE))
-#else
- if (tx_flags & IXGBE_TX_FLAGS_TXSW)
-#endif
- olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
+ olinfo_status |= IXGBE_SET_FLAG(tx_flags,
+ IXGBE_TX_FLAGS_CC,
+ IXGBE_ADVTXD_CC);
- tx_desc->read.olinfo_status = olinfo_status;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
@@ -6100,22 +6135,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
const u8 hdr_len)
{
- dma_addr_t dma;
struct sk_buff *skb = first->skb;
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
- __le32 cmd_type;
+ u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IXGBE_TX_DESC(tx_ring, i);
- ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen);
- cmd_type = ixgbe_tx_cmd_type(tx_flags);
+ ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
#ifdef IXGBE_FCOE
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@@ -6129,19 +6164,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
#endif
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
+ tx_buffer = first;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -6149,18 +6187,18 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IXGBE_MAX_DATA_PER_TXD;
size -= IXGBE_MAX_DATA_PER_TXD;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -6168,6 +6206,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
tx_desc = IXGBE_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
#ifdef IXGBE_FCOE
size = min_t(unsigned int, data_len, skb_frag_size(frag));
@@ -6178,22 +6217,13 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
- tx_desc->read.olinfo_status = 0;
-
- frag++;
}
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IXGBE_TXD_CMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
@@ -6354,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
-#ifdef IXGBE_FCOE
- __be16 protocol = vlan_get_protocol(skb);
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_ring_feature *f;
+ int txq;
- if (((protocol == htons(ETH_P_FCOE)) ||
- (protocol == htons(ETH_P_FIP))) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- struct ixgbe_ring_feature *f;
+ /*
+ * only execute the code below if protocol is FCoE
+ * or FIP and we have FCoE enabled on the adapter
+ */
+ switch (vlan_get_protocol(skb)) {
+ case __constant_htons(ETH_P_FCOE):
+ case __constant_htons(ETH_P_FIP):
+ adapter = netdev_priv(dev);
- f = &adapter->ring_feature[RING_F_FCOE];
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ break;
+ default:
+ return __netdev_pick_tx(dev, skb);
+ }
- while (txq >= f->indices)
- txq -= f->indices;
- txq += adapter->ring_feature[RING_F_FCOE].offset;
+ f = &adapter->ring_feature[RING_F_FCOE];
- return txq;
- }
-#endif
+ txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
- return txq;
- }
+ while (txq >= f->indices)
+ txq -= f->indices;
- return skb_tx_hash(dev, skb);
+ return txq + f->offset;
}
+#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -6446,6 +6478,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
+
+ /* schedule check for Tx timestamp */
+ adapter->ptp_tx_skb = skb_get(skb);
+ adapter->ptp_tx_start = jiffies;
+ schedule_work(&adapter->ptp_tx_work);
}
#ifdef CONFIG_PCI_IOV
@@ -6454,7 +6491,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
* Tx switch had been disabled.
*/
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- tx_flags |= IXGBE_TX_FLAGS_TXSW;
+ tx_flags |= IXGBE_TX_FLAGS_CC;
#endif
/* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
@@ -6785,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
}
+#endif /* CONFIG_IXGBE_DCB */
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -6810,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_DCB
if (tc) {
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@@ -6832,15 +6871,28 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false;
}
- ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc);
+
+#endif /* CONFIG_IXGBE_DCB */
+ ixgbe_init_interrupt_scheme(adapter);
+
if (netif_running(dev))
- ixgbe_open(dev);
+ return ixgbe_open(dev);
return 0;
}
-#endif /* CONFIG_IXGBE_DCB */
+#ifdef CONFIG_PCI_IOV
+void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ rtnl_lock();
+ ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
+ rtnl_unlock();
+}
+
+#endif
void ixgbe_do_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -6986,7 +7038,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
+static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr)
{
@@ -7063,7 +7115,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
}
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
+ struct net_device *dev,
+ u32 filter_mask)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
u16 mode;
@@ -7083,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
+#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
+#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7195,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
- unsigned int indices = num_possible_cpus();
- unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7246,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
+ if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB
- if (ii->mac == ixgbe_mac_82598EB)
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_RSS_INDICES);
- else
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_FDIR_INDICES);
+ /* 8 TC w/ 4 queues per TC */
+ indices = 4 * MAX_TRAFFIC_CLASS;
+#else
+ indices = IXGBE_MAX_RSS_INDICES;
#endif
+ }
- if (ii->mac == ixgbe_mac_82598EB)
- indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
- else
- indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-
-#ifdef IXGBE_FCOE
- indices += min_t(unsigned int, num_possible_cpus(),
- IXGBE_MAX_FCOE_INDICES);
-#endif
- indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7367,7 +7411,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#ifdef CONFIG_PCI_IOV
- ixgbe_enable_sriov(adapter, ii);
+ /* SR-IOV not supported on the 82598 */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ goto skip_sriov;
+ /* Mailbox */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ ixgbe_enable_sriov(adapter);
+ pci_sriov_set_totalvfs(pdev, 63);
+skip_sriov:
#endif
netdev->features = NETIF_F_SG |
@@ -7411,13 +7463,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ unsigned int fcoe_l;
+
if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
+ adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC;
@@ -7445,9 +7501,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
- if (!is_valid_ether_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
e_dev_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
@@ -7624,8 +7679,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- ixgbe_disable_sriov(adapter);
-
+#ifdef CONFIG_PCI_IOV
+ /*
+ * Only disable SR-IOV on unload if the user specified the now
+ * deprecated max_vfs module parameter.
+ */
+ if (max_vfs)
+ ixgbe_disable_sriov(adapter);
+#endif
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
@@ -7730,6 +7791,8 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if (vfdev) {
e_dev_err("Issuing VFLR to VF %d\n", vf);
pci_write_config_dword(vfdev, 0xA8, 0x00008000);
+ /* Free device reference count */
+ pci_dev_put(vfdev);
}
pci_cleanup_aer_uncorrect_error_status(pdev);
@@ -7839,6 +7902,7 @@ static struct pci_driver ixgbe_driver = {
.resume = ixgbe_resume,
#endif
.shutdown = ixgbe_shutdown,
+ .sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 1f3e32b576a..d4a64e66539 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 42dd65e6ac9..e44ff47659b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 71659edf81a..060d2ad2ac9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -494,11 +494,9 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete)
{
@@ -854,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
- &identifier);
+ &identifier);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@@ -872,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* ID Module
@@ -986,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
vendor_oui =
@@ -1206,6 +1190,22 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
}
/**
+ * ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ sff8472_data);
+}
+
+/**
* ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
* @hw: pointer to hardware structure
* @byte_offset: EEPROM byte offset to write
@@ -1293,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100);
- ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index cc18165b4c0..886a3431cf5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
#include "ixgbe_type.h"
#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
/* EEPROM byte offsets */
#define IXGBE_SFF_IDENTIFIER 0x0
@@ -41,6 +42,8 @@
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -51,6 +54,7 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_ADDRESSING_MODE 0x4
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -88,6 +92,9 @@
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+/* SFP+ SFF-8472 Compliance code */
+#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
@@ -98,7 +105,6 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
- bool autoneg,
bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -126,6 +132,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
+s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index bb9256a1b0a..331987d6815 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -96,15 +96,12 @@
#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
+#define IXGBE_PTP_TX_TIMEOUT (HZ * 15)
#ifndef NSECS_PER_SEC
#define NSECS_PER_SEC 1000000000ULL
#endif
-static struct sock_filter ptp_filter[] = {
- PTP_FILTER
-};
-
/**
* ixgbe_ptp_setup_sdp
* @hw: the hardware private structure
@@ -405,149 +402,145 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
}
}
-
/**
- * ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
- * @work: structure containing information about this work task
+ * ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow
+ * @adapter: private adapter struct
*
- * this work function is scheduled to continue reading the timecounter
+ * this watchdog task periodically reads the timecounter
* in order to prevent missing when the system time registers wrap
- * around. This needs to be run approximately twice a minute when no
- * PTP activity is occurring.
+ * around. This needs to be run approximately twice a minute.
*/
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
- unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
+ bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
+ IXGBE_OVERFLOW_PERIOD);
struct timespec ts;
- if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
- (elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
+ if (timeout) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
}
}
/**
- * ixgbe_ptp_match - determine if this skb matches a ptp packet
- * @skb: pointer to the skb
- * @hwtstamp: pointer to the hwtstamp_config to check
- *
- * Determine whether the skb should have been timestamped, assuming the
- * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
- * should have a timestamp waiting in the registers, and 0 otherwise.
+ * ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched
+ * @adapter: private network adapter structure
*
- * V1 packets have to check the version type to determine whether they are
- * correct. However, we can't directly access the data because it might be
- * fragmented in the SKB, in paged memory. In order to work around this, we
- * use skb_copy_bits which will properly copy the data whether it is in the
- * paged memory fragments or not. We have to copy the IP header as well as the
- * message type.
+ * this watchdog task is scheduled to detect error case where hardware has
+ * dropped an Rx packet that was timestamped when the ring is full. The
+ * particular error is rare but leaves the device in a state unable to timestamp
+ * any future packets.
*/
-static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
{
- struct iphdr iph;
- u8 msgtype;
- unsigned int type, offset;
-
- if (rx_filter == HWTSTAMP_FILTER_NONE)
- return 0;
-
- type = sk_run_filter(skb, ptp_filter);
-
- if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
- return type & PTP_CLASS_V2;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_ring *rx_ring;
+ u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ unsigned long rx_event;
+ int n;
- /* For the remaining cases actually check message type */
- switch (type) {
- case PTP_CLASS_V1_IPV4:
- skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
- offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
- break;
- case PTP_CLASS_V1_IPV6:
- offset = OFF_PTP6 + OFF_PTP_CONTROL;
- break;
- default:
- /* other cases invalid or handled above */
- return 0;
+ /* if we don't have a valid timestamp in the registers, just update the
+ * timeout counter and exit
+ */
+ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) {
+ adapter->last_rx_ptp_check = jiffies;
+ return;
}
- /* Make sure our buffer is long enough */
- if (skb->len < offset)
- return 0;
+ /* determine the most recent watchdog or rx_timestamp event */
+ rx_event = adapter->last_rx_ptp_check;
+ for (n = 0; n < adapter->num_rx_queues; n++) {
+ rx_ring = adapter->rx_ring[n];
+ if (time_after(rx_ring->last_rx_timestamp, rx_event))
+ rx_event = rx_ring->last_rx_timestamp;
+ }
- skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+ /* only need to read the high RXSTMP register to clear the lock */
+ if (time_is_before_jiffies(rx_event + 5*HZ)) {
+ IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ adapter->last_rx_ptp_check = jiffies;
- switch (rx_filter) {
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
- break;
- default:
- return 0;
+ e_warn(drv, "clearing RX Timestamp hang");
}
}
/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
- * @q_vector: structure containing interrupt and ring information
- * @skb: particular skb to send timestamp with
+ * @adapter: the private adapter struct
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
-void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
- struct sk_buff *skb)
+static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter;
- struct ixgbe_hw *hw;
+ struct ixgbe_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval = 0, ns;
- u32 tsynctxctl;
unsigned long flags;
- /* we cannot process timestamps on a ring without a q_vector */
- if (!q_vector || !q_vector->adapter)
- return;
-
- adapter = q_vector->adapter;
- hw = &adapter->hw;
-
- tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
- /*
- * if TX timestamp is not valid, exit after clearing the
- * timestamp registers
- */
- if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
- return;
-
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
+ skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
+
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+}
+
+/**
+ * ixgbe_ptp_tx_hwtstamp_work
+ * @work: pointer to the work struct
+ *
+ * This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
+ * timestamp has been taken for the current skb. It is necesary, because the
+ * descriptor's "done" bit does not correlate with the timestamp event.
+ */
+static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter,
+ ptp_tx_work);
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
+ IXGBE_PTP_TX_TIMEOUT);
+ u32 tsynctxctl;
+
+ /* we have to have a valid skb */
+ if (!adapter->ptp_tx_skb)
+ return;
+
+ if (timeout) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ e_warn(drv, "clearing Tx Timestamp hang");
+ return;
+ }
+
+ tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID)
+ ixgbe_ptp_tx_hwtstamp(adapter);
+ else
+ /* reschedule to keep checking if it's not available yet */
+ schedule_work(&adapter->ptp_tx_work);
}
/**
- * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
+ * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
- * @rx_desc: the rx descriptor
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
* value, then store that result into the shhwtstamps structure which
* is passed up the network stack
*/
-void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
@@ -563,37 +556,17 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
adapter = q_vector->adapter;
hw = &adapter->hw;
- if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
- return;
-
+ /*
+ * Read the tsyncrxctl register afterwards in order to prevent taking an
+ * I/O hit on every packet.
+ */
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
-
- /* Check if we have a valid timestamp and make sure the skb should
- * have been timestamped */
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
- /*
- * Always read the registers, in order to clear a possible fault
- * because of stagnant RX timestamp values for a packet that never
- * reached the queue.
- */
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
- /*
- * If the timestamp bit is set in the packet's descriptor, we know the
- * timestamp belongs to this packet. No other packet can be
- * timestamped until the registers for timestamping have been read.
- * Therefor only one packet with this bit can be in the queue at a
- * time, and the rx timestamp values that were in the registers belong
- * to this packet.
- *
- * If nothing went wrong, then it should have a skb_shared_tx that we
- * can turn into a skb_shared_hwtstamps.
- */
- if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
- return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
ns = timecounter_cyc2time(&adapter->tc, regval);
@@ -698,9 +671,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return 0;
}
- /* Store filter value for later use */
- adapter->rx_hwtstamp_filter = config.rx_filter;
-
/* define ethertype filter for timestamping L2 packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
@@ -902,11 +872,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
return;
}
- /* initialize the ptp filter */
- if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
- e_dev_warn("ptp_filter_init failed\n");
-
spin_lock_init(&adapter->tmreg_lock);
+ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
@@ -938,6 +905,12 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
ixgbe_ptp_setup_sdp(adapter);
+ cancel_work_sync(&adapter->ptp_tx_work);
+ if (adapter->ptp_tx_skb) {
+ dev_kfree_skb_any(adapter->ptp_tx_skb);
+ adapter->ptp_tx_skb = NULL;
+ }
+
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 85cddac673e..d44b4d21268 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -44,50 +44,11 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
+static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
- int pre_existing_vfs = 0;
-
- pre_existing_vfs = pci_num_vf(adapter->pdev);
- if (!pre_existing_vfs && !adapter->num_vfs)
- return;
-
- /* If there are pre-existing VFs then we have to force
- * use of that many because they were not deleted the last
- * time someone removed the PF driver. That would have
- * been because they were allocated to guest VMs and can't
- * be removed. Go ahead and just re-enable the old amount.
- * If the user wants to change the number of VFs they can
- * use ethtool while making sure no VFs are allocated to
- * guest VMs... i.e. the right way.
- */
- if (pre_existing_vfs) {
- adapter->num_vfs = pre_existing_vfs;
- dev_warn(&adapter->pdev->dev, "Virtual Functions already "
- "enabled for this device - Please reload all "
- "VF drivers to avoid spoofed packet errors\n");
- } else {
- int err;
- /*
- * The 82599 supports up to 64 VFs per physical function
- * but this implementation limits allocation to 63 so that
- * basic networking resources are still available to the
- * physical function. If the user requests greater thn
- * 63 VFs then it is an error - reset to default of zero.
- */
- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
-
- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
- if (err) {
- e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- adapter->num_vfs = 0;
- return;
- }
- }
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
@@ -128,12 +89,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
kcalloc(adapter->num_vfs,
sizeof(struct vf_data_storage), GFP_KERNEL);
if (adapter->vfinfo) {
- /* Now that we're sure SR-IOV is enabled
- * and memory allocated set up the mailbox parameters
- */
- ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
-
/* limit trafffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(adapter->num_vfs < 16)) {
@@ -157,10 +112,62 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
/* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+/* Note this function is called when the user wants to enable SR-IOV
+ * VFs using the now deprecated module parameter
+ */
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
+{
+ int pre_existing_vfs = 0;
+
+ pre_existing_vfs = pci_num_vf(adapter->pdev);
+ if (!pre_existing_vfs && !adapter->num_vfs)
return;
+
+ if (!pre_existing_vfs)
+ dev_warn(&adapter->pdev->dev,
+ "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
+
+ /* If there are pre-existing VFs then we have to force
+ * use of that many - over ride any module parameter value.
+ * This may result from the user unloading the PF driver
+ * while VFs were assigned to guest VMs or because the VFs
+ * have been created via the new PCI SR-IOV sysfs interface.
+ */
+ if (pre_existing_vfs) {
+ adapter->num_vfs = pre_existing_vfs;
+ dev_warn(&adapter->pdev->dev,
+ "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
+ } else {
+ int err;
+ /*
+ * The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function. If the user requests greater thn
+ * 63 VFs then it is an error - reset to default of zero.
+ */
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ adapter->num_vfs = 0;
+ return;
+ }
}
- /* Oh oh */
+ if (!__ixgbe_enable_sriov(adapter))
+ return;
+
+ /* If we have gotten to this point then there is no memory available
+ * to manage the VF devices - print message and bail.
+ */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
ixgbe_disable_sriov(adapter);
@@ -200,11 +207,12 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
}
#endif /* #ifdef CONFIG_PCI_IOV */
-void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 gpie;
u32 vmdctl;
+ int rss;
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
@@ -219,7 +227,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
/* if SR-IOV is already disabled then there is nothing to do */
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- return;
+ return 0;
#ifdef CONFIG_PCI_IOV
/*
@@ -229,7 +237,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
*/
if (ixgbe_vfs_are_assigned(adapter)) {
e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
- return;
+ return -EPERM;
}
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
@@ -252,10 +260,94 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
adapter->ring_feature[RING_F_VMDQ].offset = 0;
+ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+
/* take a breather then clean up driver data */
msleep(100);
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ return 0;
+}
+
+static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
+ int err = 0;
+ int i;
+ int pre_existing_vfs = pci_num_vf(dev);
+
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ err = ixgbe_disable_sriov(adapter);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ goto out;
+
+ if (err)
+ goto err_out;
+
+ /* While the SR-IOV capability structure reports total VFs to be
+ * 64 we limit the actual number that can be allocated to 63 so
+ * that some transmit/receive resources can be reserved to the
+ * PF. The PCI bus driver already checks for other values out of
+ * range.
+ */
+ if (num_vfs > 63) {
+ err = -EPERM;
+ goto err_out;
+ }
+
+ adapter->num_vfs = num_vfs;
+
+ err = __ixgbe_enable_sriov(adapter);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(dev, (i | 0x10000000));
+
+ err = pci_enable_sriov(dev, num_vfs);
+ if (err) {
+ e_dev_warn("Failed to enable PCI sriov: %d\n", err);
+ goto err_out;
+ }
+ ixgbe_sriov_reinit(adapter);
+
+out:
+ return num_vfs;
+
+err_out:
+ return err;
+#endif
+ return 0;
+}
+
+static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
+{
+ struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+ u32 current_flags = adapter->flags;
+
+ err = ixgbe_disable_sriov(adapter);
+
+ /* Only reinit if no error and state changed */
+ if (!err && current_flags != adapter->flags) {
+ /* ixgbe_disable_sriov() doesn't clear VMDQ flag */
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+#ifdef CONFIG_PCI_IOV
+ ixgbe_sriov_reinit(adapter);
+#endif
+ }
+
+ return err;
+}
+
+int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ if (num_vfs == 0)
+ return ixgbe_pci_sriov_disable(dev);
+ else
+ return ixgbe_pci_sriov_enable(dev, num_vfs);
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
@@ -447,15 +539,6 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
- u16 vid, u16 qos, u32 vf)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
-}
-
static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 1be1d30e4e7..4713f9fc7f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -41,12 +41,20 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
-void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
-void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii);
+void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
#endif
+int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+ u16 vid, u16 qos, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
+}
#endif /* _IXGBE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 16ddf14e8ba..d118def16f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 9cd8a13711d..6652e96c352 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -2822,7 +2822,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
- s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
@@ -2869,12 +2869,12 @@ struct ixgbe_phy_operations {
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
- s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
- bool);
+ s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+ s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
s32 (*check_overtemp)(struct ixgbe_hw *);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index c73b9299339..66c5e946284 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2012 Intel Corporation.
+ Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -72,14 +72,13 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
* ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
* @hw: pointer to hardware structure
* @speed: new link speed
- * @autoneg: true if autonegotiation enabled
* @autoneg_wait_to_complete: true when waiting for completion is needed
**/
static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
- ixgbe_link_speed speed, bool autoneg,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
- return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+ return hw->phy.ops.setup_link_speed(hw, speed,
autoneg_wait_to_complete);
}
@@ -879,6 +878,7 @@ static struct ixgbe_phy_operations phy_ops_X540 = {
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_byte = &ixgbe_read_i2c_byte_generic,
.write_i2c_byte = &ixgbe_write_i2c_byte_generic,
+ .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic,
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
.check_overtemp = &ixgbe_tn_check_overtemp,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8f2070439b5..c9d0c12d6f0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -99,6 +99,7 @@ static int ixgbevf_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = -1;
+ hw->mac.get_link_status = 1;
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 257357ae66c..c3db6cd69b6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -750,12 +750,37 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_other(int irq, void *data)
{
struct ixgbevf_adapter *adapter = data;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg;
+ bool got_ack = false;
hw->mac.get_link_status = 1;
+ if (!hw->mbx.ops.check_for_ack(hw))
+ got_ack = true;
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer, jiffies);
+ if (!hw->mbx.ops.check_for_msg(hw)) {
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 1));
+ adapter->link_up = false;
+ }
+
+ if (msg & IXGBE_VT_MSGTYPE_NACK)
+ dev_info(&pdev->dev,
+ "Last Request of type %2.2x to PF Nacked\n",
+ msg & 0xFF);
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
+ }
+
+ /* checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it
+ */
+ if (got_ack)
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -2095,6 +2120,9 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int i;
+ if (!adapter->link_up)
+ return;
+
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
adapter->stats.vfgprc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
@@ -2217,9 +2245,23 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
if (link_up) {
if (!netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
- (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- 10 : 1);
+ char *link_speed_string;
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ link_speed_string = "10 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ link_speed_string = "1 Gbps";
+ break;
+ case IXGBE_LINK_SPEED_100_FULL:
+ link_speed_string = "100 Mbps";
+ break;
+ default:
+ link_speed_string = "unknown speed";
+ break;
+ }
+ dev_info(&adapter->pdev->dev,
+ "NIC Link is Up, %s\n", link_speed_string);
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
}
@@ -2227,7 +2269,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
adapter->link_up = false;
adapter->link_speed = 0;
if (netif_carrier_ok(netdev)) {
- hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
}
@@ -3328,8 +3370,6 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
/* The HW MAC address was set and/or determined in sw_init */
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
-
if (!is_valid_ether_addr(netdev->dev_addr)) {
pr_err("invalid MAC address\n");
err = -EIO;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index bc58f1dc22f..5409fe876a4 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -695,9 +695,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, lp->dev->name);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index c124e67a1a1..6a2127489af 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -302,9 +302,9 @@ ltq_etop_hw_init(struct net_device *dev)
static void
ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "Lantiq ETOP");
- strcpy(info->bus_info, "internal");
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
+ strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int
@@ -393,8 +393,8 @@ ltq_etop_mdio_probe(struct net_device *dev)
return -ENODEV;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
- 0, priv->pldata->mii_mode);
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &ltq_etop_mdio_link, priv->pldata->mii_mode);
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -655,7 +655,7 @@ ltq_etop_init(struct net_device *dev)
/* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
if (random_mac)
- dev->addr_assign_type |= NET_ADDR_RANDOM;
+ dev->addr_assign_type = NET_ADDR_RANDOM;
ltq_etop_set_multicast_list(dev);
err = ltq_etop_mdio_init(dev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 84c13263c51..29140502b71 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1879,12 +1879,10 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
memset(rxq->rx_desc_area, 0, size);
rxq->rx_desc_area_size = size;
- rxq->rx_skb = kmalloc(rxq->rx_ring_size * sizeof(*rxq->rx_skb),
- GFP_KERNEL);
- if (rxq->rx_skb == NULL) {
- netdev_err(mp->dev, "can't allocate rx skb ring\n");
+ rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+ GFP_KERNEL);
+ if (rxq->rx_skb == NULL)
goto out_free;
- }
rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
@@ -2789,7 +2787,7 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy_reset(mp);
- phy_attach(mp->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_GMII);
+ phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII);
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 74f1c157a48..77b7c80262f 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -164,7 +164,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!bus->irq) {
- dev_err(&pdev->dev, "Cannot allocate PHY IRQ array\n");
mdiobus_free(bus);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 10d678d3dd0..037ed866c22 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -627,7 +627,6 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
netif_addr_lock_bh(dev);
update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
@@ -1391,7 +1390,7 @@ static void phy_init(struct pxa168_eth_private *pep, int speed, int duplex)
struct phy_device *phy = pep->phy;
ethernet_phy_reset(pep);
- phy_attach(pep->dev, dev_name(&phy->dev), 0, PHY_INTERFACE_MODE_MII);
+ phy_attach(pep->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_MII);
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
@@ -1444,10 +1443,10 @@ static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRIVER_NAME, 32);
- strncpy(info->version, DRIVER_VERSION, 32);
- strncpy(info->fw_version, "N/A", 32);
- strncpy(info->bus_info, "N/A", 32);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static const struct ethtool_ops pxa168_ethtool_ops = {
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 5544a1fe2f9..171f4b3dda0 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3855,7 +3855,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return dev;
}
@@ -3917,10 +3916,9 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* space for skge@pci:0000:04:00.0 */
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
- if (!hw) {
- dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ if (!hw)
goto err_out_free_regions;
- }
+
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
hw->pdev = pdev;
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 3269eb38cc5..fc07ca35721 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4801,7 +4801,6 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
return dev;
}
@@ -4970,10 +4969,8 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
- if (!hw) {
- dev_err(&pdev->dev, "cannot allocate hardware struct\n");
+ if (!hw)
goto err_out_free_regions;
- }
hw->pdev = pdev;
sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 03447dad07e..00f25b5f297 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -35,6 +35,8 @@
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/mlx4/driver.h>
+#include <linux/in.h>
+#include <net/ip.h>
#include "mlx4_en.h"
#include "en_port.h"
@@ -494,7 +496,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
@@ -589,7 +591,7 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
priv->prof->rss_rings = rss_rings;
@@ -664,27 +666,90 @@ static int mlx4_en_validate_flow(struct net_device *dev,
if ((cmd->fs.flow_type & FLOW_EXT)) {
if (cmd->fs.m_ext.vlan_etype ||
- !(cmd->fs.m_ext.vlan_tci == 0 ||
- cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
+ !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
+ 0 ||
+ (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
+ cpu_to_be16(VLAN_VID_MASK)))
return -EINVAL;
+
+ if (cmd->fs.m_ext.vlan_tci) {
+ if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+
+ }
}
return 0;
}
+static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h,
+ struct mlx4_spec_list *spec_l2,
+ unsigned char *mac)
+{
+ int err = 0;
+ __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
+ memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
+
+ if ((cmd->fs.flow_type & FLOW_EXT) &&
+ (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
+ spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
+ spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
+ }
+
+ list_add_tail(&spec_l2->list, rule_list_h);
+
+ return err;
+}
+
+static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h,
+ struct mlx4_spec_list *spec_l2,
+ __be32 ipv4_dst)
+{
+#ifdef CONFIG_INET
+ unsigned char mac[ETH_ALEN];
+
+ if (!ipv4_is_multicast(ipv4_dst)) {
+ if (cmd->fs.flow_type & FLOW_MAC_EXT)
+ memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
+ else
+ memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
+ } else {
+ ip_eth_mc_map(ipv4_dst, mac);
+ }
+
+ return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
+#else
+ return -EINVAL;
+#endif
+}
+
static int add_ip_rule(struct mlx4_en_priv *priv,
- struct ethtool_rxnfc *cmd,
- struct list_head *list_h)
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h)
{
- struct mlx4_spec_list *spec_l3;
+ int err;
+ struct mlx4_spec_list *spec_l2 = NULL;
+ struct mlx4_spec_list *spec_l3 = NULL;
struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
- spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
- if (!spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- return -ENOMEM;
+ spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ if (!spec_l2 || !spec_l3) {
+ err = -ENOMEM;
+ goto free_spec;
}
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
+ cmd->fs.h_u.
+ usr_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
if (l3_mask->ip4src)
@@ -695,34 +760,52 @@ static int add_ip_rule(struct mlx4_en_priv *priv,
list_add_tail(&spec_l3->list, list_h);
return 0;
+
+free_spec:
+ kfree(spec_l2);
+ kfree(spec_l3);
+ return err;
}
static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
struct ethtool_rxnfc *cmd,
struct list_head *list_h, int proto)
{
- struct mlx4_spec_list *spec_l3;
- struct mlx4_spec_list *spec_l4;
+ int err;
+ struct mlx4_spec_list *spec_l2 = NULL;
+ struct mlx4_spec_list *spec_l3 = NULL;
+ struct mlx4_spec_list *spec_l4 = NULL;
struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
- spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
- spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
- if (!spec_l4 || !spec_l3) {
- en_err(priv, "Fail to alloc ethtool rule.\n");
- kfree(spec_l3);
- kfree(spec_l4);
- return -ENOMEM;
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
+ spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
+ if (!spec_l2 || !spec_l3 || !spec_l4) {
+ err = -ENOMEM;
+ goto free_spec;
}
spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
if (proto == TCP_V4_FLOW) {
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
+ spec_l2,
+ cmd->fs.h_u.
+ tcp_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
} else {
+ err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
+ spec_l2,
+ cmd->fs.h_u.
+ udp_ip4_spec.ip4dst);
+ if (err)
+ goto free_spec;
spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
@@ -744,6 +827,12 @@ static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
list_add_tail(&spec_l4->list, list_h);
return 0;
+
+free_spec:
+ kfree(spec_l2);
+ kfree(spec_l3);
+ kfree(spec_l4);
+ return err;
}
static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
@@ -751,43 +840,23 @@ static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
struct list_head *rule_list_h)
{
int err;
- __be64 be_mac;
struct ethhdr *eth_spec;
- struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_spec_list *spec_l2;
- __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+ struct mlx4_en_priv *priv = netdev_priv(dev);
err = mlx4_en_validate_flow(dev, cmd);
if (err)
return err;
- spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
- if (!spec_l2)
- return -ENOMEM;
-
- if (cmd->fs.flow_type & FLOW_MAC_EXT) {
- memcpy(&be_mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
- } else {
- u64 mac = priv->mac & MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
- }
-
- spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
- if ((cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) != ETHER_FLOW)
- memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
-
- if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
- spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
- spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
- }
-
- list_add_tail(&spec_l2->list, rule_list_h);
-
switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
+ spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
+ if (!spec_l2)
+ return -ENOMEM;
+
eth_spec = &cmd->fs.h_u.ether_spec;
- memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
+ mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
+ &eth_spec->h_dest[0]);
spec_l2->eth.ether_type = eth_spec->h_proto;
if (eth_spec->h_proto)
spec_l2->eth.ether_type_enable = 1;
@@ -861,6 +930,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = 0;
memset(&loc_rule->flow_spec, 0,
sizeof(struct ethtool_rx_flow_spec));
+ list_del(&loc_rule->list);
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) {
@@ -871,6 +941,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
loc_rule->id = reg_id;
memcpy(&loc_rule->flow_spec, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec));
+ list_add_tail(&loc_rule->list, &priv->ethtool_list);
out_free_list:
list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
@@ -904,6 +975,7 @@ static int mlx4_en_flow_detach(struct net_device *dev,
}
rule->id = 0;
memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
+ list_del(&rule->list);
out:
return err;
@@ -952,7 +1024,8 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
cmd->cmd == ETHTOOL_GRXCLSRULE ||
cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
- mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ (mdev->dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
return -EINVAL;
switch (cmd->cmd) {
@@ -988,7 +1061,8 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ if (mdev->dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
return -EINVAL;
switch (cmd->cmd) {
@@ -1037,7 +1111,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
port_up = 1;
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
}
mlx4_en_free_resources(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 3a2b8c65642..e3c3d122979 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv,
return i;
}
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
+
+ /* Drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) &&
+ !(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
+
+ /* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
+ * is requested
+ */
+ if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
+ priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
+}
+
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{
struct mlx4_en_profile *params = &mdev->profile;
@@ -191,10 +213,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
printk_once(KERN_INFO "%s", mlx4_en_version);
- mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev) {
- dev_err(&dev->pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 75a3f467bb5..5088dc5c3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
.priority = MLX4_DOMAIN_RFS,
};
int rc;
- __be64 mac;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp.list, &rule.list);
- mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
-
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
- memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0;
@@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
return 0;
}
+static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+{
+ unsigned int i;
+ for (i = ETH_ALEN - 1; i; --i) {
+ dst_mac[i] = src_mac & 0xff;
+ src_mac >>= 8;
+ }
+ memset(&dst_mac[ETH_ALEN], 0, 2);
+}
+
+static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
+ unsigned char *mac, int *qpn, u64 *reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = priv->port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (err)
+ en_warn(priv, "Failed Attaching Unicast\n");
+
+ return err;
+}
+
+static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
+ unsigned char *mac, int qpn, u64 reg_id)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = qpn;
+ memcpy(&gid[10], mac, ETH_ALEN);
+ gid[5] = priv->port;
+
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ en_err(priv, "Invalid steering mode.\n");
+ }
+}
+
+static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ struct mlx4_mac_entry *entry;
+ int index = 0;
+ int err = 0;
+ u64 reg_id;
+ int *qpn = &priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
+ priv->dev->dev_addr);
+ index = mlx4_register_mac(dev, priv->port, mac);
+ if (index < 0) {
+ err = index;
+ en_err(priv, "Failed adding MAC: %pM\n",
+ priv->dev->dev_addr);
+ return err;
+ }
+
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
+ int base_qpn = mlx4_get_base_qpn(dev, priv->port);
+ *qpn = base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
+ if (err) {
+ en_err(priv, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
+ if (err)
+ goto steer_err;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+ entry->reg_id = reg_id;
+
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
+
+ return 0;
+
+alloc_err:
+ mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, priv->port, mac);
+ return err;
+}
+
+static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int qpn = priv->base_qpn;
+ u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+
+ en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
+ priv->dev->dev_addr);
+ mlx4_unregister_mac(dev, priv->port, mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ priv->dev->dev_addr)) {
+ en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
+ priv->port, priv->dev->dev_addr, qpn);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_qp_release_range(dev, qpn, 1);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ break;
+ }
+ }
+ }
+}
+
+static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
+ unsigned char *new_mac, unsigned char *prev_mac)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_dev *dev = mdev->dev;
+ int err = 0;
+ u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+
+ bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ qpn, entry->reg_id);
+ mlx4_unregister_mac(dev, priv->port,
+ prev_mac_u64);
+ hlist_del_rcu(&entry->hlist);
+ synchronize_rcu();
+ memcpy(entry->mac, new_mac, ETH_ALEN);
+ entry->reg_id = 0;
+ mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
+ hlist_add_head_rcu(&entry->hlist,
+ &priv->mac_hash[mac_hash]);
+ mlx4_register_mac(dev, priv->port, new_mac_u64);
+ err = mlx4_en_uc_steer_add(priv, new_mac,
+ &qpn,
+ &entry->reg_id);
+ return err;
+ }
+ }
+ return -EINVAL;
+ }
+
+ return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
+}
+
u64 mlx4_en_mac_to_u64(u8 *addr)
{
u64 mac = 0;
@@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
- priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
queue_work(mdev->workqueue, &priv->mac_task);
return 0;
}
@@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
/* Remove old MAC and insert the new one */
- err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac);
+ err = mlx4_en_replace_mac(priv, priv->base_qpn,
+ priv->dev->dev_addr, priv->prev_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
+ memcpy(priv->prev_mac, priv->dev->dev_addr,
+ sizeof(priv->prev_mac));
} else
- en_dbg(HW, priv, "Port is down while "
- "registering mac, exiting...\n");
+ en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
mutex_unlock(&mdev->state_lock);
}
@@ -482,7 +708,6 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
if (!tmp) {
- en_err(priv, "failed to allocate multicast list\n");
mlx4_en_clear_list(dev);
return;
}
@@ -526,181 +751,153 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
}
}
if (!found) {
- new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ new_mc = kmemdup(src_tmp,
+ sizeof(struct mlx4_en_mc_list),
GFP_KERNEL);
- if (!new_mc) {
- en_err(priv, "Failed to allocate current multicast list\n");
+ if (!new_mc)
return;
- }
- memcpy(new_mc, src_tmp,
- sizeof(struct mlx4_en_mc_list));
+
new_mc->action = MCLIST_ADD;
list_add_tail(&new_mc->list, dst);
}
}
}
-static void mlx4_en_set_multicast(struct net_device *dev)
+static void mlx4_en_set_rx_mode(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->port_up)
return;
- queue_work(priv->mdev->workqueue, &priv->mcast_task);
+ queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
}
-static void mlx4_en_do_set_multicast(struct work_struct *work)
+static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
{
- struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
- mcast_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_mc_list *mclist, *tmp;
- u64 mcast_addr = 0;
- u8 mc_list[16] = {0};
int err = 0;
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up) {
- en_dbg(HW, priv, "Card is not up, "
- "ignoring multicast change.\n");
- goto out;
- }
- if (!priv->port_up) {
- en_dbg(HW, priv, "Port is down, "
- "ignoring multicast change.\n");
- goto out;
- }
-
- if (!netif_carrier_ok(dev)) {
- if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
- if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
- netif_carrier_on(dev);
- en_dbg(LINK, priv, "Link Up\n");
- }
- }
- }
-
- /*
- * Promsicuous mode: disable all filters
- */
-
- if (dev->flags & IFF_PROMISC) {
- if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
- if (netif_msg_rx_status(priv))
- en_warn(priv, "Entering promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_PROMISC;
-
- /* Enable promiscouos mode */
- switch (mdev->dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_add(mdev->dev,
- priv->port,
- priv->base_qpn,
- MLX4_FS_PROMISC_UPLINK);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- break;
-
- case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling unicast promiscuous mode\n");
-
- /* Add the default qp number as multicast
- * promisc
- */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev,
- priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed enabling multicast promiscuous mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
- break;
-
- case MLX4_STEERING_MODE_A0:
- err = mlx4_SET_PORT_qpn_calc(mdev->dev,
- priv->port,
- priv->base_qpn,
- 1);
- if (err)
- en_err(priv, "Failed enabling promiscuous mode\n");
- break;
- }
-
- /* Disable port multicast filter (unconditionally) */
- err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
- 0, MLX4_MCAST_DISABLE);
- if (err)
- en_err(priv, "Failed disabling "
- "multicast filter\n");
-
- /* Disable port VLAN filter */
- err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
- en_err(priv, "Failed disabling VLAN filter\n");
- }
- goto out;
- }
-
- /*
- * Not in promiscuous mode
- */
-
- if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv))
- en_warn(priv, "Leaving promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+ en_warn(priv, "Entering promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_PROMISC;
- /* Disable promiscouos mode */
+ /* Enable promiscouos mode */
switch (mdev->dev->caps.steering_mode) {
case MLX4_STEERING_MODE_DEVICE_MANAGED:
- err = mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_UPLINK);
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
break;
case MLX4_STEERING_MODE_B0:
- err = mlx4_unicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling unicast promiscuous mode\n");
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev,
- priv->base_qpn,
- priv->port);
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
break;
case MLX4_STEERING_MODE_A0:
err = mlx4_SET_PORT_qpn_calc(mdev->dev,
priv->port,
- priv->base_qpn, 0);
+ priv->base_qpn,
+ 1);
if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ en_err(priv, "Failed enabling promiscuous mode\n");
break;
}
- /* Enable port VLAN filter */
+ /* Disable port multicast filter (unconditionally) */
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ en_err(priv, "Failed disabling multicast filter\n");
+
+ /* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
- en_err(priv, "Failed enabling VLAN filter\n");
+ en_err(priv, "Failed disabling VLAN filter\n");
}
+}
+
+static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
+ struct mlx4_en_dev *mdev)
+{
+ int err = 0;
+
+ if (netif_msg_rx_status(priv))
+ en_warn(priv, "Leaving promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
+ }
+
+ /* Enable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+ if (err)
+ en_err(priv, "Failed enabling VLAN filter\n");
+}
+
+static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_mc_list *mclist, *tmp;
+ u64 mcast_addr = 0;
+ u8 mc_list[16] = {0};
+ int err = 0;
/* Enable/disable the multicast filter according to IFF_ALLMULTI */
if (dev->flags & IFF_ALLMULTI) {
@@ -767,9 +964,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
/* Update multicast list - we cache all addresses so they won't
* change while HW is updated holding the command semaphor */
- netif_tx_lock_bh(dev);
+ netif_addr_lock_bh(dev);
mlx4_en_cache_mclist(dev);
- netif_tx_unlock_bh(dev);
+ netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
@@ -814,6 +1011,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
}
}
}
+}
+
+static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
+ struct net_device *dev,
+ struct mlx4_en_dev *mdev)
+{
+ struct netdev_hw_addr *ha;
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n, *tmp;
+ bool found;
+ u64 mac;
+ int err = 0;
+ struct hlist_head *bucket;
+ unsigned int i;
+ int removed = 0;
+ u32 prev_flags;
+
+ /* Note that we do not need to protect our mac_hash traversal with rcu,
+ * since all modification code is protected by mdev->state_lock
+ */
+
+ /* find what to remove */
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
+ bucket = &priv->mac_hash[i];
+ hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
+ found = false;
+ netdev_for_each_uc_addr(ha, dev) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ /* MAC address of the port is not in uc list */
+ if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
+ found = true;
+
+ if (!found) {
+ mac = mlx4_en_mac_to_u64(entry->mac);
+ mlx4_en_uc_steer_release(priv, entry->mac,
+ priv->base_qpn,
+ entry->reg_id);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+
+ hlist_del_rcu(&entry->hlist);
+ kfree_rcu(entry, rcu);
+ en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
+ entry->mac, priv->port);
+ ++removed;
+ }
+ }
+ }
+
+ /* if we didn't remove anything, there is no use in trying to add
+ * again once we are in a forced promisc mode state
+ */
+ if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
+ return;
+
+ prev_flags = priv->flags;
+ priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
+
+ /* find what to add */
+ netdev_for_each_uc_addr(ha, dev) {
+ found = false;
+ bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
+ hlist_for_each_entry(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
+ ha->addr, priv->port);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ mac = mlx4_en_mac_to_u64(ha->addr);
+ memcpy(entry->mac, ha->addr, ETH_ALEN);
+ err = mlx4_register_mac(mdev->dev, priv->port, mac);
+ if (err < 0) {
+ en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ }
+ err = mlx4_en_uc_steer_add(priv, ha->addr,
+ &priv->base_qpn,
+ &entry->reg_id);
+ if (err) {
+ en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
+ ha->addr, priv->port, err);
+ mlx4_unregister_mac(mdev->dev, priv->port, mac);
+ kfree(entry);
+ priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
+ break;
+ } else {
+ unsigned int mac_hash;
+ en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
+ ha->addr, priv->port);
+ mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ hlist_add_head_rcu(&entry->hlist, bucket);
+ }
+ }
+ }
+
+ if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Forcing promiscuous mode on port:%d\n",
+ priv->port);
+ } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
+ en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
+ priv->port);
+ }
+}
+
+static void mlx4_en_do_set_rx_mode(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ rx_mode_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
+ goto out;
+ }
+ if (!priv->port_up) {
+ en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
+ goto out;
+ }
+
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
+ if (dev->priv_flags & IFF_UNICAST_FLT)
+ mlx4_en_do_uc_filter(priv, dev, mdev);
+
+ /* Promsicuous mode: disable all filters */
+ if ((dev->flags & IFF_PROMISC) ||
+ (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
+ mlx4_en_set_promisc_mode(priv, mdev);
+ goto out;
+ }
+
+ /* Not in promiscuous mode */
+ if (priv->flags & MLX4_EN_FLAG_PROMISC)
+ mlx4_en_clear_promisc_mode(priv, mdev);
+
+ mlx4_en_do_multicast(priv, dev, mdev);
out:
mutex_unlock(&mdev->state_lock);
}
@@ -876,9 +1237,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
- en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
- "rx_frames:%d rx_usecs:%d\n",
- priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+ en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
+ priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -959,8 +1319,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
cq->moder_time = moder_time;
err = mlx4_en_set_cq_moder(priv, cq);
if (err)
- en_err(priv, "Failed modifying moderation "
- "for cq:%d\n", ring);
+ en_err(priv, "Failed modifying moderation for cq:%d\n",
+ ring);
}
priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes;
@@ -977,12 +1337,12 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
struct mlx4_en_dev *mdev = priv->mdev;
int err;
- err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
- if (err)
- en_dbg(HW, priv, "Could not update stats\n");
-
mutex_lock(&mdev->state_lock);
if (mdev->device_up) {
+ err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+ if (err)
+ en_dbg(HW, priv, "Could not update stats\n");
+
if (priv->port_up)
mlx4_en_auto_moderation(priv);
@@ -1039,6 +1399,9 @@ int mlx4_en_start_port(struct net_device *dev)
INIT_LIST_HEAD(&priv->mc_list);
INIT_LIST_HEAD(&priv->curr_list);
+ INIT_LIST_HEAD(&priv->ethtool_list);
+ memset(&priv->ethtool_rules[0], 0,
+ sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
@@ -1074,8 +1437,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
- err = mlx4_get_eth_qp(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn);
+ err = mlx4_en_get_qp(priv);
if (err) {
en_err(priv, "Failed getting eth qp\n");
goto cq_err;
@@ -1138,8 +1500,8 @@ int mlx4_en_start_port(struct net_device *dev)
priv->prof->rx_pause,
priv->prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto tx_err;
}
/* Set default qp number */
@@ -1167,23 +1529,16 @@ int mlx4_en_start_port(struct net_device *dev)
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
- if (mdev->dev->caps.steering_mode ==
- MLX4_STEERING_MODE_DEVICE_MANAGED) {
- mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_UPLINK);
- mlx4_flow_steer_promisc_remove(mdev->dev,
- priv->port,
- MLX4_FS_PROMISC_ALL_MULTI);
- }
/* Schedule multicast task to populate multicast list */
- queue_work(mdev->workqueue, &priv->mcast_task);
+ queue_work(mdev->workqueue, &priv->rx_mode_task);
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
priv->port_up = true;
netif_tx_start_all_queues(dev);
+ netif_device_attach(dev);
+
return 0;
tx_err:
@@ -1195,7 +1550,7 @@ tx_err:
rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -1206,11 +1561,12 @@ cq_err:
}
-void mlx4_en_stop_port(struct net_device *dev)
+void mlx4_en_stop_port(struct net_device *dev, int detach)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_mc_list *mclist, *tmp;
+ struct ethtool_flow_id *flow, *tmp_flow;
int i;
u8 mc_list[16] = {0};
@@ -1221,12 +1577,42 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
+ if (detach)
+ netif_device_detach(dev);
netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
+ netif_tx_disable(dev);
+
/* Set port as not active */
priv->port_up = false;
+ /* Promsicuous mode */
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
+ MLX4_EN_FLAG_MC_PROMISC);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
+ priv->port);
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ }
+
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
mc_list[5] = priv->port; /* needed for B0 steering support */
@@ -1263,8 +1649,20 @@ void mlx4_en_stop_port(struct net_device *dev)
mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */
- mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
+ mlx4_en_put_qp(priv);
+ if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+ mdev->mac_removed[priv->port] = 1;
+
+ /* Remove flow steering rules for the port*/
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ ASSERT_RTNL();
+ list_for_each_entry_safe(flow, tmp_flow,
+ &priv->ethtool_list, list) {
+ mlx4_flow_detach(mdev->dev, flow->id);
+ list_del(&flow->list);
+ }
+ }
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -1284,15 +1682,12 @@ static void mlx4_en_restart(struct work_struct *work)
watchdog_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
- int i;
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
- mlx4_en_stop_port(dev);
- for (i = 0; i < priv->tx_ring_num; i++)
- netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
+ mlx4_en_stop_port(dev, 1);
if (mlx4_en_start_port(dev))
en_err(priv, "Failed restarting port %d\n", priv->port);
}
@@ -1362,7 +1757,7 @@ static int mlx4_en_close(struct net_device *dev)
mutex_lock(&mdev->state_lock);
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 0);
netif_carrier_off(dev);
mutex_unlock(&mdev->state_lock);
@@ -1437,9 +1832,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
if (!priv->dev->rx_cpu_rmap)
goto err;
-
- INIT_LIST_HEAD(&priv->filters);
- spin_lock_init(&priv->filters_lock);
#endif
return 0;
@@ -1503,7 +1895,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
* the port */
en_dbg(DRV, priv, "Change MTU called with card down!?\n");
} else {
- mlx4_en_stop_port(dev);
+ mlx4_en_stop_port(dev, 1);
err = mlx4_en_start_port(dev);
if (err) {
en_err(priv, "Failed restarting port:%d\n",
@@ -1527,17 +1919,92 @@ static int mlx4_en_set_features(struct net_device *netdev,
priv->ctrl_flags &=
cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ mlx4_en_update_loopback_state(netdev, features);
+
return 0;
}
+static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr, u16 flags)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses, allow only
+ * permanent addresses if ndm_state is given
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Add FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+static int mlx4_en_fdb_del(struct ndmsg *ndm,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+ int err;
+
+ if (!mlx4_is_mfunc(mdev))
+ return -EOPNOTSUPP;
+
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ en_info(priv, "Del FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static int mlx4_en_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev, int idx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_dev *mdev = priv->mdev->dev;
+
+ if (mlx4_is_mfunc(mdev))
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats,
- .ndo_set_rx_mode = mlx4_en_set_multicast,
+ .ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu,
@@ -1552,6 +2019,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
+ .ndo_fdb_add = mlx4_en_fdb_add,
+ .ndo_fdb_del = mlx4_en_fdb_del,
+ .ndo_fdb_dump = mlx4_en_fdb_dump,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1608,7 +2078,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
- INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+ INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
@@ -1618,22 +2088,35 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
#endif
+ for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
+ INIT_HLIST_HEAD(&priv->mac_hash[i]);
+
/* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
- priv->mac = mdev->dev->caps.def_mac[priv->port];
- if (ILLEGAL_MAC(priv->mac)) {
- en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
- priv->port, priv->mac);
+
+ /* Set default MAC */
+ dev->addr_len = ETH_ALEN;
+ mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
+ priv->port, dev->dev_addr);
err = -EINVAL;
goto out;
}
+ memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
+
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
err = mlx4_en_alloc_resources(priv);
if (err)
goto out;
+#ifdef CONFIG_RFS_ACCEL
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
@@ -1653,13 +2136,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
- /* Set defualt MAC */
- dev->addr_len = ETH_ALEN;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
- dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
- }
-
/*
* Set driver features
*/
@@ -1679,6 +2155,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_STEERING_MODE_DEVICE_MANAGED)
dev->hw_features |= NETIF_F_NTUPLE;
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -1692,6 +2171,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* Configure port */
mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index fed26d867f4..ce38654bbdd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
- struct ethhdr *ethh;
- dma_addr_t dma;
- u64 s_mac;
int factor = priv->cqe_factor;
if (!priv->port_up)
@@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
- /* Get pointer to first fragment since we haven't skb yet and
- * cast it to ethhdr struct */
- dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
- DMA_FROM_DEVICE);
- ethh = (struct ethhdr *)(page_address(frags[0].page) +
- frags[0].offset);
- s_mac = mlx4_en_mac_to_u64(ethh->h_source);
-
- /* If source MAC is equal to our own MAC and not performing
- * the selftest or flb disabled - drop the packet */
- if (s_mac == priv->mac &&
- !((dev->features & NETIF_F_LOOPBACK) ||
- priv->validate_loopback))
- goto next;
+ /* Check if we need to drop the packet if SRIOV is not enabled
+ * and not performing the selftest or flb disabled
+ */
+ if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
+ struct ethhdr *ethh;
+ dma_addr_t dma;
+ /* Get pointer to first fragment since we haven't
+ * skb yet and cast it to ethhdr struct
+ */
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+ DMA_FROM_DEVICE);
+ ethh = (struct ethhdr *)(page_address(frags[0].page) +
+ frags[0].offset);
+
+ if (is_multicast_ether_addr(ethh->h_dest)) {
+ struct mlx4_mac_entry *entry;
+ struct hlist_node *n;
+ struct hlist_head *bucket;
+ unsigned int mac_hash;
+
+ /* Drop the packet, since HW loopback-ed it */
+ mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
+ bucket = &priv->mac_hash[mac_hash];
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
+ if (ether_addr_equal_64bits(entry->mac,
+ ethh->h_source)) {
+ rcu_read_unlock();
+ goto next;
+ }
+ }
+ rcu_read_unlock();
+ }
+ }
/*
* Packet is OK - process it.
@@ -835,11 +852,9 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof *context , GFP_KERNEL);
- if (!context) {
- en_err(priv, "Failed to allocate qp context\n");
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
return -ENOMEM;
- }
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index bf2e5d3f177..3488c6d9e6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
priv->loopback_ok = 0;
priv->validate_loopback = 1;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
+
/* xmit */
if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n");
@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit:
priv->validate_loopback = 0;
+ mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6771b69f40d..49308cc65ee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -294,6 +294,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
cnt++;
}
+ netdev_tx_reset_queue(ring->tx_queue);
+
if (cnt)
en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
@@ -515,10 +517,6 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
wmb();
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
- tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
- (!!vlan_tx_tag_present(skb));
- tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -592,7 +590,21 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(ring->tx_queue);
priv->port_stats.queue_stopped++;
- return NETDEV_TX_BUSY;
+ /* If queue was emptied after the if, and before the
+ * stop_queue - need to wake the queue, or else it will remain
+ * stopped forever.
+ * Need a memory barrier to make sure ring->cons was not
+ * updated before queue was stopped.
+ */
+ wmb();
+
+ if (unlikely(((int)(ring->prod - ring->cons)) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ netif_tx_wake_queue(ring->tx_queue);
+ priv->port_stats.wake_queue++;
+ } else {
+ return NETDEV_TX_BUSY;
+ }
}
/* Track current inflight packets for performance analysis */
@@ -630,7 +642,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++;
}
- if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
+ if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 8b3d0512a46..38b62c78d5d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -127,7 +127,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
- [3] = "Device manage flow steering support"
+ [3] = "Device manage flow steering support",
+ [4] = "Automatic mac reassignment support"
};
int i;
@@ -478,6 +479,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
+#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
dev_cap->flags2 = 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -637,6 +639,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
MLX4_GET(dev_cap->reserved_lkey, outbox,
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
+ if (field & 1<<6)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1287,14 +1292,14 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* Enable Ethernet flow steering
* with udp unicast and tcp unicast
*/
- MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_ETH_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
/* Enable IPoIB flow steering
* with udp unicast and tcp unicast
*/
- MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
INIT_HCA_FS_IB_BITS_OFFSET);
MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index dbf2f69cc59..3af33ff669c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -171,7 +171,6 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
- u8 fs_hash_enable_bits;
u8 steering_mode; /* for QUERY_HCA */
u64 dev_cap_enabled;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5163af31499..b9dde139dac 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1415,22 +1415,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (mlx4_is_master(dev))
mlx4_parav_master_pf_caps(dev);
- priv->fs_hash_mode = MLX4_FS_L2_HASH;
-
- switch (priv->fs_hash_mode) {
- case MLX4_FS_L2_HASH:
- init_hca.fs_hash_enable_bits = 0;
- break;
-
- case MLX4_FS_L2_L3_L4_HASH:
- /* Enable flow steering with
- * udp unicast and tcp unicast
- */
- init_hca.fs_hash_enable_bits =
- MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
- break;
- }
-
profile = default_profile;
if (dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
@@ -1849,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
- INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn =
- dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
- (port - 1) * (1 << log_num_mac);
+ info->base_qpn = mlx4_get_base_qpn(dev, port);
}
sprintf(info->dev_name, "mlx4_port%d", port);
@@ -2070,10 +2051,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
- priv = kzalloc(sizeof *priv, GFP_KERNEL);
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(&pdev->dev, "Device struct alloc failed, "
- "aborting.\n");
err = -ENOMEM;
goto err_release_regions;
}
@@ -2162,7 +2141,8 @@ slave_start:
dev->num_slaves = MLX4_MAX_NUM_SLAVES;
else {
dev->num_slaves = 0;
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init slave mfunc"
" interface, aborting.\n");
goto err_cmd;
@@ -2186,7 +2166,8 @@ slave_start:
/* In master functions, the communication channel must be initialized
* after obtaining its address from fw */
if (mlx4_is_master(dev)) {
- if (mlx4_multi_func_init(dev)) {
+ err = mlx4_multi_func_init(dev);
+ if (err) {
mlx4_err(dev, "Failed to init master mfunc"
"interface, aborting.\n");
goto err_close;
@@ -2203,6 +2184,7 @@ slave_start:
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
+ err = -ENOSYS;
mlx4_err(dev, "INTx is not supported in multi-function mode."
" aborting.\n");
goto err_free_eq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 1ee4db3c640..52685524708 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -664,7 +664,7 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
dw |= ctrl->priority << 16;
hw->ctrl = cpu_to_be32(dw);
- hw->vf_vep_port = cpu_to_be32(ctrl->port);
+ hw->port = ctrl->port;
hw->qpn = cpu_to_be32(ctrl->qpn);
}
@@ -1157,7 +1157,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
.priority = MLX4_DOMAIN_NIC,
};
- rule.allow_loopback = ~block_mcast_loopback;
+ rule.allow_loopback = !block_mcast_loopback;
rule.port = port;
rule.qpn = qp->qpn;
INIT_LIST_HEAD(&rule.list);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 116c5c29d2d..ed4a6959e82 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -60,11 +60,6 @@
#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
#define MLX4_FS_NUM_MCG (1 << 17)
-enum {
- MLX4_FS_L2_HASH = 0,
- MLX4_FS_L2_L3_L4_HASH,
-};
-
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -658,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast;
};
-struct mlx4_mac_entry {
- u64 mac;
- u64 reg_id;
-};
-
struct mlx4_port_info {
struct mlx4_dev *dev;
int port;
@@ -672,7 +662,6 @@ struct mlx4_port_info {
char dev_mtu_name[16];
struct device_attribute port_mtu_attr;
struct mlx4_mac_table mac_table;
- struct radix_tree_root mac_tree;
struct mlx4_vlan_table vlan_table;
int base_qpn;
};
@@ -696,9 +685,12 @@ struct mlx4_steer {
struct mlx4_net_trans_rule_hw_ctrl {
__be32 ctrl;
- __be32 vf_vep_port;
+ u8 rsvd1;
+ u8 funcid;
+ u8 vep;
+ u8 port;
__be32 qpn;
- __be32 reserved;
+ __be32 rsvd2;
};
struct mlx4_net_trans_rule_hw_ib {
@@ -918,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
-int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8d54412ada6..c313d7e943a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -198,7 +198,6 @@ enum cq_type {
*/
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
#define XNOR(x, y) (!(x) == !(y))
-#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info {
@@ -427,10 +426,26 @@ struct mlx4_en_frag_info {
#endif
struct ethtool_flow_id {
+ struct list_head list;
struct ethtool_rx_flow_spec flow_spec;
u64 id;
};
+enum {
+ MLX4_EN_FLAG_PROMISC = (1 << 0),
+ MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
+ /* whether we need to enable hardware loopback by putting dmac
+ * in Tx WQE
+ */
+ MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
+ /* whether we need to drop packets that hardware loopback-ed */
+ MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
+ MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
+};
+
+#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
+#define MLX4_EN_MAC_HASH_IDX 5
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -441,6 +456,8 @@ struct mlx4_en_priv {
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
+ /* To allow rules removal while port is going down */
+ struct list_head ethtool_list;
unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
@@ -469,7 +486,7 @@ struct mlx4_en_priv {
int registered;
int allocated;
int stride;
- u64 mac;
+ unsigned char prev_mac[ETH_ALEN + 2];
int mac_index;
unsigned max_mtu;
int base_qpn;
@@ -478,8 +495,6 @@ struct mlx4_en_priv {
struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags;
u32 flags;
-#define MLX4_EN_FLAG_PROMISC 0x1
-#define MLX4_EN_FLAG_MC_PROMISC 0x2
u8 num_tx_rings_p_up;
u32 tx_ring_num;
u32 rx_ring_num;
@@ -493,7 +508,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp;
- struct work_struct mcast_task;
+ struct work_struct rx_mode_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
@@ -510,6 +525,7 @@ struct mlx4_en_priv {
bool wol;
struct device *ddev;
int base_tx_qpn;
+ struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
@@ -529,14 +545,24 @@ enum mlx4_en_wol {
MLX4_EN_WOL_ENABLED = (1ULL << 62),
};
+struct mlx4_mac_entry {
+ struct hlist_node hlist;
+ unsigned char mac[ETH_ALEN + 2];
+ u64 reg_id;
+ struct rcu_head rcu;
+};
+
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
+void mlx4_en_update_loopback_state(struct net_device *dev,
+ netdev_features_t features);
+
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof);
int mlx4_en_start_port(struct net_device *dev);
-void mlx4_en_stop_port(struct net_device *dev);
+void mlx4_en_stop_port(struct net_device *dev, int detach);
void mlx4_en_free_resources(struct mlx4_en_priv *priv);
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 4c51b05efa2..719ead15e49 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u64 *reg_id)
-{
- __be64 be_mac;
- int err;
-
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
-
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
-
- qp.qpn = *qpn;
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- struct mlx4_spec_list spec_eth = { {NULL} };
- __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
-
- struct mlx4_net_trans_rule rule = {
- .queue_mode = MLX4_NET_TRANS_Q_FIFO,
- .exclusive = 0,
- .allow_loopback = 1,
- .promisc_mode = MLX4_FS_PROMISC_NONE,
- .priority = MLX4_DOMAIN_NIC,
- };
-
- rule.port = port;
- rule.qpn = *qpn;
- INIT_LIST_HEAD(&rule.list);
-
- spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
- memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
- memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
- list_add_tail(&spec_eth.list, &rule.list);
-
- err = mlx4_flow_attach(dev, &rule, reg_id);
- break;
- }
- default:
- return -EINVAL;
- }
- if (err)
- mlx4_warn(dev, "Failed Attaching Unicast\n");
-
- return err;
-}
-
-static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u64 reg_id)
-{
- switch (dev->caps.steering_mode) {
- case MLX4_STEERING_MODE_B0: {
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
-
- qp.qpn = qpn;
- mac &= MLX4_MAC_MASK;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
-
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
- break;
- }
- case MLX4_STEERING_MODE_DEVICE_MANAGED: {
- mlx4_flow_detach(dev, reg_id);
- break;
- }
- default:
- mlx4_err(dev, "Invalid steering mode.\n");
- }
-}
-
static int validate_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, int index)
{
@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev,
return -EINVAL;
}
-int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
- int index = 0;
- int err = 0;
- u64 reg_id;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
- (unsigned long long) mac);
- index = mlx4_register_mac(dev, port, mac);
- if (index < 0) {
- err = index;
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
- (unsigned long long) mac);
- return err;
- }
-
- if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- *qpn = info->base_qpn + index;
- return 0;
- }
-
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- goto qp_err;
- }
-
- err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
- if (err)
- goto steer_err;
-
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- err = -ENOMEM;
- goto alloc_err;
- }
- entry->mac = mac;
- entry->reg_id = reg_id;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err)
- goto insert_err;
- return 0;
-
-insert_err:
- kfree(entry);
-
-alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
-
-steer_err:
- mlx4_qp_release_range(dev, *qpn, 1);
-
-qp_err:
- mlx4_unregister_mac(dev, port, mac);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
-
-void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
-{
- struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_entry *entry;
-
- mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
- (unsigned long long) mac);
- mlx4_unregister_mac(dev, port, mac);
-
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
- " qpn %d\n", port,
- (unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_qp_release_range(dev, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- kfree(entry);
- }
- }
-}
-EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
-
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries)
{
@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_register_mac);
+int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
+{
+ return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ (port - 1) * (1 << dev->caps.log_num_macs);
+}
+EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- struct mlx4_mac_entry *entry;
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (!entry)
- return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac,
- qpn, entry->reg_id);
- mlx4_unregister_mac(dev, port, entry->mac);
- entry->mac = new_mac;
- entry->reg_id = 0;
- mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac,
- &qpn, &entry->reg_id);
- return err;
- }
-
/* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
@@ -439,7 +262,7 @@ out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 561ed2a22a1..5997adc943d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3018,7 +3018,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
- port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
+ port = ctrl->port;
eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
/* Clear a space in the inbox for eth header */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index b71eb39ab44..fbcb9e74d7f 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1080,7 +1080,6 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, mac, netdev->addr_len);
ks8842_write_mac_addr(adapter, mac);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 286816a4e78..33bcb63d56a 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -69,7 +69,6 @@ union ks8851_tx_hdr {
* @mii: The MII state information for the mii calls.
* @rxctrl: RX settings for @rxctrl_work.
* @tx_work: Work queue for tx packets
- * @irq_work: Work queue for servicing interrupts
* @rxctrl_work: Work queue for updating RX mode and multicast lists
* @txq: Queue of packets for transmission.
* @spi_msg1: pre-setup SPI transfer with one message, @spi_xfer1.
@@ -121,7 +120,6 @@ struct ks8851_net {
struct ks8851_rxctrl rxctrl;
struct work_struct tx_work;
- struct work_struct irq_work;
struct work_struct rxctrl_work;
struct sk_buff_head txq;
@@ -444,23 +442,6 @@ static void ks8851_init_mac(struct ks8851_net *ks)
}
/**
- * ks8851_irq - device interrupt handler
- * @irq: Interrupt number passed from the IRQ handler.
- * @pw: The private word passed to register_irq(), our struct ks8851_net.
- *
- * Disable the interrupt from happening again until we've processed the
- * current status by scheduling ks8851_irq_work().
- */
-static irqreturn_t ks8851_irq(int irq, void *pw)
-{
- struct ks8851_net *ks = pw;
-
- disable_irq_nosync(irq);
- schedule_work(&ks->irq_work);
- return IRQ_HANDLED;
-}
-
-/**
* ks8851_rdfifo - read data from the receive fifo
* @ks: The device state.
* @buff: The buffer address
@@ -595,19 +576,20 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
}
/**
- * ks8851_irq_work - work queue handler for dealing with interrupt requests
- * @work: The work structure that was scheduled by schedule_work()
+ * ks8851_irq - IRQ handler for dealing with interrupt requests
+ * @irq: IRQ number
+ * @_ks: cookie
*
- * This is the handler invoked when the ks8851_irq() is called to find out
- * what happened, as we cannot allow ourselves to sleep whilst waiting for
- * anything other process has the chip's lock.
+ * This handler is invoked when the IRQ line asserts to find out what happened.
+ * As we cannot allow ourselves to sleep in HARDIRQ context, this handler runs
+ * in thread context.
*
* Read the interrupt status, work out what needs to be done and then clear
* any of the interrupts that are not needed.
*/
-static void ks8851_irq_work(struct work_struct *work)
+static irqreturn_t ks8851_irq(int irq, void *_ks)
{
- struct ks8851_net *ks = container_of(work, struct ks8851_net, irq_work);
+ struct ks8851_net *ks = _ks;
unsigned status;
unsigned handled = 0;
@@ -688,7 +670,7 @@ static void ks8851_irq_work(struct work_struct *work)
if (status & IRQ_TXI)
netif_wake_queue(ks->netdev);
- enable_irq(ks->netdev->irq);
+ return IRQ_HANDLED;
}
/**
@@ -896,7 +878,6 @@ static int ks8851_net_stop(struct net_device *dev)
mutex_unlock(&ks->lock);
/* stop any outstanding work */
- flush_work(&ks->irq_work);
flush_work(&ks->tx_work);
flush_work(&ks->rxctrl_work);
@@ -1052,7 +1033,6 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
return ks8851_write_mac_addr(dev);
}
@@ -1438,7 +1418,6 @@ static int ks8851_probe(struct spi_device *spi)
spin_lock_init(&ks->statelock);
INIT_WORK(&ks->tx_work, ks8851_tx_work);
- INIT_WORK(&ks->irq_work, ks8851_irq_work);
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
/* initialise pre-made spi transfer messages */
@@ -1505,8 +1484,9 @@ static int ks8851_probe(struct spi_device *spi)
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
- ret = request_irq(spi->irq, ks8851_irq, IRQF_TRIGGER_LOW,
- ndev->name, ks);
+ ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ ndev->name, ks);
if (ret < 0) {
dev_err(&spi->dev, "failed to get irq\n");
goto err_irq;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ef8f9f92e54..a343066f7b4 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1237,7 +1237,6 @@ static int ks_set_mac_address(struct net_device *netdev, void *paddr)
struct sockaddr *addr = paddr;
u8 *da;
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
da = (u8 *)netdev->dev_addr;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index a99456c3dd8..5d98a9f7bfc 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -527,7 +527,6 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
return enc28j60_set_hw_macaddr(dev);
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index f8408d6e961..4f9937e026e 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -664,10 +664,9 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = kmalloc(bytes, GFP_KERNEL);
- if (hdr == NULL) {
- dev_err(dev, "could not malloc firmware hdr\n");
+ if (hdr == NULL)
return -ENOMEM;
- }
+
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
status = myri10ge_validate_firmware(mgp, hdr);
kfree(hdr);
diff --git a/drivers/net/ethernet/natsemi/ibmlana.c b/drivers/net/ethernet/natsemi/ibmlana.c
deleted file mode 100644
index 923e640d604..00000000000
--- a/drivers/net/ethernet/natsemi/ibmlana.c
+++ /dev/null
@@ -1,1075 +0,0 @@
-/*
-net-3-driver for the IBM LAN Adapter/A
-
-This is an extension to the Linux operating system, and is covered by the
-same GNU General Public License that covers that work.
-
-Copyright 1999 by Alfred Arnold (alfred@ccac.rwth-aachen.de,
- alfred.arnold@lancom.de)
-
-This driver is based both on the SK_MCA driver, which is itself based on the
-SK_G16 and 3C523 driver.
-
-paper sources:
- 'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
- Hans-Peter Messmer for the basic Microchannel stuff
-
- 'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
- for help on Ethernet driver programming
-
- 'DP83934CVUL-20/25 MHz SONIC-T Ethernet Controller Datasheet' by National
- Semiconductor for info on the MAC chip
-
- 'LAN Technical Reference Ethernet Adapter Interface Version 1 Release 1.0
- Document Number SC30-3661-00' by IBM for info on the adapter itself
-
- Also see http://www.national.com/analog
-
-special acknowledgements to:
- - Bob Eager for helping me out with documentation from IBM
- - Jim Shorney for his endless patience with me while I was using
- him as a beta tester to trace down the address filter bug ;-)
-
- Missing things:
-
- -> set debug level via ioctl instead of compile-time switches
- -> I didn't follow the development of the 2.1.x kernels, so my
- assumptions about which things changed with which kernel version
- are probably nonsense
-
-History:
- Nov 6th, 1999
- startup from SK_MCA driver
- Dec 6th, 1999
- finally got docs about the card. A big thank you to Bob Eager!
- Dec 12th, 1999
- first packet received
- Dec 13th, 1999
- recv queue done, tcpdump works
- Dec 15th, 1999
- transmission part works
- Dec 28th, 1999
- added usage of the isa_functions for Linux 2.3 . Things should
- still work with 2.0.x....
- Jan 28th, 2000
- in Linux 2.2.13, the version.h file mysteriously didn't get
- included. Added a workaround for this. Furthermore, it now
- not only compiles as a modules ;-)
- Jan 30th, 2000
- newer kernels automatically probe more than one board, so the
- 'startslot' as a variable is also needed here
- Apr 12th, 2000
- the interrupt mask register is not set 'hard' instead of individually
- setting registers, since this seems to set bits that shouldn't be
- set
- May 21st, 2000
- reset interrupt status immediately after CAM load
- add a recovery delay after releasing the chip's reset line
- May 24th, 2000
- finally found the bug in the address filter setup - damned signed
- chars!
- June 1st, 2000
- corrected version codes, added support for the latest 2.3 changes
- Oct 28th, 2002
- cleaned up for the 2.5 tree <alan@lxorguk.ukuu.org.uk>
-
- *************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/time.h>
-#include <linux/mca.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-
-#define _IBM_LANA_DRIVER_
-#include "ibmlana.h"
-
-#undef DEBUG
-
-#define DRV_NAME "ibmlana"
-
-/* ------------------------------------------------------------------------
- * global static data - not more since we can handle multiple boards and
- * have to pack all state info into the device struct!
- * ------------------------------------------------------------------------ */
-
-static char *MediaNames[Media_Count] = {
- "10BaseT", "10Base5", "Unknown", "10Base2"
-};
-
-/* ------------------------------------------------------------------------
- * private subfunctions
- * ------------------------------------------------------------------------ */
-
-#ifdef DEBUG
- /* dump all registers */
-
-static void dumpregs(struct net_device *dev)
-{
- int z;
-
- for (z = 0; z < 160; z += 2) {
- if (!(z & 15))
- printk("REGS: %04x:", z);
- printk(" %04x", inw(dev->base_addr + z));
- if ((z & 15) == 14)
- printk("\n");
- }
-}
-
-/* dump parts of shared memory - only needed during debugging */
-
-static void dumpmem(struct net_device *dev, u32 start, u32 len)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int z;
-
- printk("Address %04x:\n", start);
- for (z = 0; z < len; z++) {
- if ((z & 15) == 0)
- printk("%04x:", z);
- printk(" %02x", readb(priv->base + start + z));
- if ((z & 15) == 15)
- printk("\n");
- }
- if ((z & 15) != 0)
- printk("\n");
-}
-
-/* print exact time - ditto */
-
-static void PrTime(void)
-{
- struct timeval tv;
-
- do_gettimeofday(&tv);
- printk("%9d:%06d: ", (int) tv.tv_sec, (int) tv.tv_usec);
-}
-#endif /* DEBUG */
-
-/* deduce resources out of POS registers */
-
-static void getaddrs(struct mca_device *mdev, int *base, int *memlen,
- int *iobase, int *irq, ibmlana_medium *medium)
-{
- u_char pos0, pos1;
-
- pos0 = mca_device_read_stored_pos(mdev, 2);
- pos1 = mca_device_read_stored_pos(mdev, 3);
-
- *base = 0xc0000 + ((pos1 & 0xf0) << 9);
- *memlen = (pos1 & 0x01) ? 0x8000 : 0x4000;
- *iobase = (pos0 & 0xe0) << 7;
- switch (pos0 & 0x06) {
- case 0:
- *irq = 5;
- break;
- case 2:
- *irq = 15;
- break;
- case 4:
- *irq = 10;
- break;
- case 6:
- *irq = 11;
- break;
- }
- *medium = (pos0 & 0x18) >> 3;
-}
-
-/* wait on register value with mask and timeout */
-
-static int wait_timeout(struct net_device *dev, int regoffs, u16 mask,
- u16 value, int timeout)
-{
- unsigned long fin = jiffies + timeout;
-
- while (time_before(jiffies,fin))
- if ((inw(dev->base_addr + regoffs) & mask) == value)
- return 1;
-
- return 0;
-}
-
-
-/* reset the whole board */
-
-static void ResetBoard(struct net_device *dev)
-{
- unsigned char bcmval;
-
- /* read original board control value */
-
- bcmval = inb(dev->base_addr + BCMREG);
-
- /* set reset bit for a while */
-
- bcmval |= BCMREG_RESET;
- outb(bcmval, dev->base_addr + BCMREG);
- udelay(10);
- bcmval &= ~BCMREG_RESET;
- outb(bcmval, dev->base_addr + BCMREG);
-
- /* switch over to RAM again */
-
- bcmval |= BCMREG_RAMEN | BCMREG_RAMWIN;
- outb(bcmval, dev->base_addr + BCMREG);
-}
-
-/* calculate RAM layout & set up descriptors in RAM */
-
-static void InitDscrs(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- u32 addr, baddr, raddr;
- int z;
- tda_t tda;
- rda_t rda;
- rra_t rra;
-
- /* initialize RAM */
-
- memset_io(priv->base, 0xaa,
- dev->mem_start - dev->mem_start); /* XXX: typo? */
-
- /* setup n TX descriptors - independent of RAM size */
-
- priv->tdastart = addr = 0;
- priv->txbufstart = baddr = sizeof(tda_t) * TXBUFCNT;
- for (z = 0; z < TXBUFCNT; z++) {
- tda.status = 0;
- tda.config = 0;
- tda.length = 0;
- tda.fragcount = 1;
- tda.startlo = baddr;
- tda.starthi = 0;
- tda.fraglength = 0;
- if (z == TXBUFCNT - 1)
- tda.link = priv->tdastart;
- else
- tda.link = addr + sizeof(tda_t);
- tda.link |= 1;
- memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
- addr += sizeof(tda_t);
- baddr += PKTSIZE;
- }
-
- /* calculate how many receive buffers fit into remaining memory */
-
- priv->rxbufcnt = (dev->mem_end - dev->mem_start - baddr) / (sizeof(rra_t) + sizeof(rda_t) + PKTSIZE);
-
- /* calculate receive addresses */
-
- priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
- priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
- priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
-
- for (z = 0; z < priv->rxbufcnt; z++) {
- rra.startlo = baddr;
- rra.starthi = 0;
- rra.cntlo = PKTSIZE >> 1;
- rra.cnthi = 0;
- memcpy_toio(priv->base + raddr, &rra, sizeof(rra_t));
-
- rda.status = 0;
- rda.length = 0;
- rda.startlo = 0;
- rda.starthi = 0;
- rda.seqno = 0;
- if (z < priv->rxbufcnt - 1)
- rda.link = addr + sizeof(rda_t);
- else
- rda.link = 1;
- rda.inuse = 1;
- memcpy_toio(priv->base + addr, &rda, sizeof(rda_t));
-
- baddr += PKTSIZE;
- raddr += sizeof(rra_t);
- addr += sizeof(rda_t);
- }
-
- /* initialize current pointers */
-
- priv->nextrxdescr = 0;
- priv->lastrxdescr = priv->rxbufcnt - 1;
- priv->nexttxdescr = 0;
- priv->currtxdescr = 0;
- priv->txusedcnt = 0;
- memset(priv->txused, 0, sizeof(priv->txused));
-}
-
-/* set up Rx + Tx descriptors in SONIC */
-
-static int InitSONIC(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* set up start & end of resource area */
-
- outw(0, SONIC_URRA);
- outw(priv->rrastart, dev->base_addr + SONIC_RSA);
- outw(priv->rrastart + (priv->rxbufcnt * sizeof(rra_t)), dev->base_addr + SONIC_REA);
- outw(priv->rrastart, dev->base_addr + SONIC_RRP);
- outw(priv->rrastart, dev->base_addr + SONIC_RWP);
-
- /* set EOBC so that only one packet goes into one buffer */
-
- outw((PKTSIZE - 4) >> 1, dev->base_addr + SONIC_EOBC);
-
- /* let SONIC read the first RRA descriptor */
-
- outw(CMDREG_RRRA, dev->base_addr + SONIC_CMDREG);
- if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_RRRA, 0, 2)) {
- printk(KERN_ERR "%s: SONIC did not respond on RRRA command - giving up.", dev->name);
- return 0;
- }
-
- /* point SONIC to the first RDA */
-
- outw(0, dev->base_addr + SONIC_URDA);
- outw(priv->rdastart, dev->base_addr + SONIC_CRDA);
-
- /* set upper half of TDA address */
-
- outw(0, dev->base_addr + SONIC_UTDA);
-
- return 1;
-}
-
-/* stop SONIC so we can reinitialize it */
-
-static void StopSONIC(struct net_device *dev)
-{
- /* disable interrupts */
-
- outb(inb(dev->base_addr + BCMREG) & (~BCMREG_IEN), dev->base_addr + BCMREG);
- outb(0, dev->base_addr + SONIC_IMREG);
-
- /* reset the SONIC */
-
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- udelay(10);
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
-}
-
-/* initialize card and SONIC for proper operation */
-
-static void putcam(camentry_t * cams, int *camcnt, char *addr)
-{
- camentry_t *pcam = cams + (*camcnt);
- u8 *uaddr = (u8 *) addr;
-
- pcam->index = *camcnt;
- pcam->addr0 = (((u16) uaddr[1]) << 8) | uaddr[0];
- pcam->addr1 = (((u16) uaddr[3]) << 8) | uaddr[2];
- pcam->addr2 = (((u16) uaddr[5]) << 8) | uaddr[4];
- (*camcnt)++;
-}
-
-static void InitBoard(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int camcnt;
- camentry_t cams[16];
- u32 cammask;
- struct netdev_hw_addr *ha;
- u16 rcrval;
-
- /* reset the SONIC */
-
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- udelay(10);
-
- /* clear all spurious interrupts */
-
- outw(inw(dev->base_addr + SONIC_ISREG), dev->base_addr + SONIC_ISREG);
-
- /* set up the SONIC's bus interface - constant for this adapter -
- must be done while the SONIC is in reset */
-
- outw(DCREG_USR1 | DCREG_USR0 | DCREG_WC1 | DCREG_DW32, dev->base_addr + SONIC_DCREG);
- outw(0, dev->base_addr + SONIC_DCREG2);
-
- /* remove reset form the SONIC */
-
- outw(0, dev->base_addr + SONIC_CMDREG);
- udelay(10);
-
- /* data sheet requires URRA to be programmed before setting up the CAM contents */
-
- outw(0, dev->base_addr + SONIC_URRA);
-
- /* program the CAM entry 0 to the device address */
-
- camcnt = 0;
- putcam(cams, &camcnt, dev->dev_addr);
-
- /* start putting the multicast addresses into the CAM list. Stop if
- it is full. */
-
- netdev_for_each_mc_addr(ha, dev) {
- putcam(cams, &camcnt, ha->addr);
- if (camcnt == 16)
- break;
- }
-
- /* calculate CAM mask */
-
- cammask = (1 << camcnt) - 1;
-
- /* feed CDA into SONIC, initialize RCR value (always get broadcasts) */
-
- memcpy_toio(priv->base, cams, sizeof(camentry_t) * camcnt);
- memcpy_toio(priv->base + (sizeof(camentry_t) * camcnt), &cammask, sizeof(cammask));
-
-#ifdef DEBUG
- printk("CAM setup:\n");
- dumpmem(dev, 0, sizeof(camentry_t) * camcnt + sizeof(cammask));
-#endif
-
- outw(0, dev->base_addr + SONIC_CAMPTR);
- outw(camcnt, dev->base_addr + SONIC_CAMCNT);
- outw(CMDREG_LCAM, dev->base_addr + SONIC_CMDREG);
- if (!wait_timeout(dev, SONIC_CMDREG, CMDREG_LCAM, 0, 2)) {
- printk(KERN_ERR "%s:SONIC did not respond on LCAM command - giving up.", dev->name);
- return;
- } else {
- /* clear interrupt condition */
-
- outw(ISREG_LCD, dev->base_addr + SONIC_ISREG);
-
-#ifdef DEBUG
- printk("Loading CAM done, address pointers %04x:%04x\n",
- inw(dev->base_addr + SONIC_URRA),
- inw(dev->base_addr + SONIC_CAMPTR));
- {
- int z;
-
- printk("\n-->CAM: PTR %04x CNT %04x\n",
- inw(dev->base_addr + SONIC_CAMPTR),
- inw(dev->base_addr + SONIC_CAMCNT));
- outw(CMDREG_RST, dev->base_addr + SONIC_CMDREG);
- for (z = 0; z < camcnt; z++) {
- outw(z, dev->base_addr + SONIC_CAMEPTR);
- printk("Entry %d: %04x %04x %04x\n", z,
- inw(dev->base_addr + SONIC_CAMADDR0),
- inw(dev->base_addr + SONIC_CAMADDR1),
- inw(dev->base_addr + SONIC_CAMADDR2));
- }
- outw(0, dev->base_addr + SONIC_CMDREG);
- }
-#endif
- }
-
- rcrval = RCREG_BRD | RCREG_LB_NONE;
-
- /* if still multicast addresses left or ALLMULTI is set, set the multicast
- enable bit */
-
- if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
- rcrval |= RCREG_AMC;
-
- /* promiscuous mode ? */
-
- if (dev->flags & IFF_PROMISC)
- rcrval |= RCREG_PRO;
-
- /* program receive mode */
-
- outw(rcrval, dev->base_addr + SONIC_RCREG);
-#ifdef DEBUG
- printk("\nRCRVAL: %04x\n", rcrval);
-#endif
-
- /* set up descriptors in shared memory + feed them into SONIC registers */
-
- InitDscrs(dev);
- if (!InitSONIC(dev))
- return;
-
- /* reset all pending interrupts */
-
- outw(0xffff, dev->base_addr + SONIC_ISREG);
-
- /* enable transmitter + receiver interrupts */
-
- outw(CMDREG_RXEN, dev->base_addr + SONIC_CMDREG);
- outw(IMREG_PRXEN | IMREG_RBEEN | IMREG_PTXEN | IMREG_TXEREN, dev->base_addr + SONIC_IMREG);
-
- /* turn on card interrupts */
-
- outb(inb(dev->base_addr + BCMREG) | BCMREG_IEN, dev->base_addr + BCMREG);
-
-#ifdef DEBUG
- printk("Register dump after initialization:\n");
- dumpregs(dev);
-#endif
-}
-
-/* start transmission of a descriptor */
-
-static void StartTx(struct net_device *dev, int descr)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int addr;
-
- addr = priv->tdastart + (descr * sizeof(tda_t));
-
- /* put descriptor address into SONIC */
-
- outw(addr, dev->base_addr + SONIC_CTDA);
-
- /* trigger transmitter */
-
- priv->currtxdescr = descr;
- outw(CMDREG_TXP, dev->base_addr + SONIC_CMDREG);
-}
-
-/* ------------------------------------------------------------------------
- * interrupt handler(s)
- * ------------------------------------------------------------------------ */
-
-/* receive buffer area exhausted */
-
-static void irqrbe_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* point the SONIC back to the RRA start */
-
- outw(priv->rrastart, dev->base_addr + SONIC_RRP);
- outw(priv->rrastart, dev->base_addr + SONIC_RWP);
-}
-
-/* receive interrupt */
-
-static void irqrx_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- rda_t rda;
- u32 rdaaddr, lrdaaddr;
-
- /* loop until ... */
-
- while (1) {
- /* read descriptor that was next to be filled by SONIC */
-
- rdaaddr = priv->rdastart + (priv->nextrxdescr * sizeof(rda_t));
- lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
- memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
-
- /* iron out upper word halves of fields we use - SONIC will duplicate
- bits 0..15 to 16..31 */
-
- rda.status &= 0xffff;
- rda.length &= 0xffff;
- rda.startlo &= 0xffff;
-
- /* stop if the SONIC still owns it, i.e. there is no data for us */
-
- if (rda.inuse)
- break;
-
- /* good packet? */
-
- else if (rda.status & RCREG_PRX) {
- struct sk_buff *skb;
-
- /* fetch buffer */
-
- skb = netdev_alloc_skb(dev, rda.length + 2);
- if (skb == NULL)
- dev->stats.rx_dropped++;
- else {
- /* copy out data */
-
- memcpy_fromio(skb_put(skb, rda.length),
- priv->base +
- rda.startlo, rda.length);
-
- /* set up skb fields */
-
- skb->protocol = eth_type_trans(skb, dev);
- skb_checksum_none_assert(skb);
-
- /* bookkeeping */
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += rda.length;
-
- /* pass to the upper layers */
- netif_rx(skb);
- }
- }
-
- /* otherwise check error status bits and increase statistics */
-
- else {
- dev->stats.rx_errors++;
- if (rda.status & RCREG_FAER)
- dev->stats.rx_frame_errors++;
- if (rda.status & RCREG_CRCR)
- dev->stats.rx_crc_errors++;
- }
-
- /* descriptor processed, will become new last descriptor in queue */
-
- rda.link = 1;
- rda.inuse = 1;
- memcpy_toio(priv->base + rdaaddr, &rda,
- sizeof(rda_t));
-
- /* set up link and EOL = 0 in currently last descriptor. Only write
- the link field since the SONIC may currently already access the
- other fields. */
-
- memcpy_toio(priv->base + lrdaaddr + 20, &rdaaddr, 4);
-
- /* advance indices */
-
- priv->lastrxdescr = priv->nextrxdescr;
- if ((++priv->nextrxdescr) >= priv->rxbufcnt)
- priv->nextrxdescr = 0;
- }
-}
-
-/* transmit interrupt */
-
-static void irqtx_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- tda_t tda;
-
- /* fetch descriptor (we forgot the size ;-) */
- memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
-
- /* update statistics */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tda.length;
-
- /* update our pointers */
- priv->txused[priv->currtxdescr] = 0;
- priv->txusedcnt--;
-
- /* if there are more descriptors present in RAM, start them */
- if (priv->txusedcnt > 0)
- StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
-
- /* tell the upper layer we can go on transmitting */
- netif_wake_queue(dev);
-}
-
-static void irqtxerr_handler(struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- tda_t tda;
-
- /* fetch descriptor to check status */
- memcpy_fromio(&tda, priv->base + priv->tdastart + (priv->currtxdescr * sizeof(tda_t)), sizeof(tda_t));
-
- /* update statistics */
- dev->stats.tx_errors++;
- if (tda.status & (TCREG_NCRS | TCREG_CRSL))
- dev->stats.tx_carrier_errors++;
- if (tda.status & TCREG_EXC)
- dev->stats.tx_aborted_errors++;
- if (tda.status & TCREG_OWC)
- dev->stats.tx_window_errors++;
- if (tda.status & TCREG_FU)
- dev->stats.tx_fifo_errors++;
-
- /* update our pointers */
- priv->txused[priv->currtxdescr] = 0;
- priv->txusedcnt--;
-
- /* if there are more descriptors present in RAM, start them */
- if (priv->txusedcnt > 0)
- StartTx(dev, (priv->currtxdescr + 1) % TXBUFCNT);
-
- /* tell the upper layer we can go on transmitting */
- netif_wake_queue(dev);
-}
-
-/* general interrupt entry */
-
-static irqreturn_t irq_handler(int dummy, void *device)
-{
- struct net_device *dev = device;
- u16 ival;
-
- /* in case we're not meant... */
- if (!(inb(dev->base_addr + BCMREG) & BCMREG_IPEND))
- return IRQ_NONE;
-
- /* loop through the interrupt bits until everything is clear */
- while (1) {
- ival = inw(dev->base_addr + SONIC_ISREG);
-
- if (ival & ISREG_RBE) {
- irqrbe_handler(dev);
- outw(ISREG_RBE, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_PKTRX) {
- irqrx_handler(dev);
- outw(ISREG_PKTRX, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_TXDN) {
- irqtx_handler(dev);
- outw(ISREG_TXDN, dev->base_addr + SONIC_ISREG);
- }
- if (ival & ISREG_TXER) {
- irqtxerr_handler(dev);
- outw(ISREG_TXER, dev->base_addr + SONIC_ISREG);
- }
- break;
- }
- return IRQ_HANDLED;
-}
-
-/* ------------------------------------------------------------------------
- * driver methods
- * ------------------------------------------------------------------------ */
-
-/* MCA info */
-
-#if 0 /* info available elsewhere, but this is kept for reference */
-static int ibmlana_getinfo(char *buf, int slot, void *d)
-{
- int len = 0, i;
- struct net_device *dev = (struct net_device *) d;
- ibmlana_priv *priv;
-
- /* can't say anything about an uninitialized device... */
-
- if (dev == NULL)
- return len;
- priv = netdev_priv(dev);
-
- /* print info */
-
- len += sprintf(buf + len, "IRQ: %d\n", priv->realirq);
- len += sprintf(buf + len, "I/O: %#lx\n", dev->base_addr);
- len += sprintf(buf + len, "Memory: %#lx-%#lx\n", dev->mem_start, dev->mem_end - 1);
- len += sprintf(buf + len, "Transceiver: %s\n", MediaNames[priv->medium]);
- len += sprintf(buf + len, "Device: %s\n", dev->name);
- len += sprintf(buf + len, "MAC address:");
- for (i = 0; i < 6; i++)
- len += sprintf(buf + len, " %02x", dev->dev_addr[i]);
- buf[len++] = '\n';
- buf[len] = 0;
-
- return len;
-}
-#endif
-
-/* open driver. Means also initialization and start of LANCE */
-
-static int ibmlana_open(struct net_device *dev)
-{
- int result;
- ibmlana_priv *priv = netdev_priv(dev);
-
- /* register resources - only necessary for IRQ */
-
- result = request_irq(priv->realirq, irq_handler, IRQF_SHARED,
- dev->name, dev);
- if (result != 0) {
- printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
- return result;
- }
- dev->irq = priv->realirq;
-
- /* set up the card and SONIC */
- InitBoard(dev);
-
- /* initialize operational flags */
- netif_start_queue(dev);
- return 0;
-}
-
-/* close driver. Shut down board and free allocated resources */
-
-static int ibmlana_close(struct net_device *dev)
-{
- /* turn off board */
-
- /* release resources */
- if (dev->irq != 0)
- free_irq(dev->irq, dev);
- dev->irq = 0;
- return 0;
-}
-
-/* transmit a block. */
-
-static netdev_tx_t ibmlana_tx(struct sk_buff *skb, struct net_device *dev)
-{
- ibmlana_priv *priv = netdev_priv(dev);
- int tmplen, addr;
- unsigned long flags;
- tda_t tda;
- int baddr;
-
- /* find out if there are free slots for a frame to transmit. If not,
- the upper layer is in deep desperation and we simply ignore the frame. */
-
- if (priv->txusedcnt >= TXBUFCNT) {
- dev->stats.tx_dropped++;
- goto tx_done;
- }
-
- /* copy the frame data into the next free transmit buffer - fillup missing */
- tmplen = skb->len;
- if (tmplen < 60)
- tmplen = 60;
- baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
- memcpy_toio(priv->base + baddr, skb->data, skb->len);
-
- /* copy filler into RAM - in case we're filling up...
- we're filling a bit more than necessary, but that doesn't harm
- since the buffer is far larger...
- Sorry Linus for the filler string but I couldn't resist ;-) */
-
- if (tmplen > skb->len) {
- char *fill = "NetBSD is a nice OS too! ";
- unsigned int destoffs = skb->len, l = strlen(fill);
-
- while (destoffs < tmplen) {
- memcpy_toio(priv->base + baddr + destoffs, fill, l);
- destoffs += l;
- }
- }
-
- /* set up the new frame descriptor */
- addr = priv->tdastart + (priv->nexttxdescr * sizeof(tda_t));
- memcpy_fromio(&tda, priv->base + addr, sizeof(tda_t));
- tda.length = tda.fraglength = tmplen;
- memcpy_toio(priv->base + addr, &tda, sizeof(tda_t));
-
- /* if there were no active descriptors, trigger the SONIC */
- spin_lock_irqsave(&priv->lock, flags);
-
- priv->txusedcnt++;
- priv->txused[priv->nexttxdescr] = 1;
-
- /* are all transmission slots used up ? */
- if (priv->txusedcnt >= TXBUFCNT)
- netif_stop_queue(dev);
-
- if (priv->txusedcnt == 1)
- StartTx(dev, priv->nexttxdescr);
- priv->nexttxdescr = (priv->nexttxdescr + 1) % TXBUFCNT;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-tx_done:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* switch receiver mode. */
-
-static void ibmlana_set_multicast_list(struct net_device *dev)
-{
- /* first stop the SONIC... */
- StopSONIC(dev);
- /* ...then reinit it with the new flags */
- InitBoard(dev);
-}
-
-/* ------------------------------------------------------------------------
- * hardware check
- * ------------------------------------------------------------------------ */
-
-static int ibmlana_irq;
-static int ibmlana_io;
-static int startslot; /* counts through slots when probing multiple devices */
-
-static short ibmlana_adapter_ids[] __initdata = {
- IBM_LANA_ID,
- 0x0000
-};
-
-static char *ibmlana_adapter_names[] = {
- "IBM LAN Adapter/A",
- NULL
-};
-
-
-static const struct net_device_ops ibmlana_netdev_ops = {
- .ndo_open = ibmlana_open,
- .ndo_stop = ibmlana_close,
- .ndo_start_xmit = ibmlana_tx,
- .ndo_set_rx_mode = ibmlana_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int ibmlana_init_one(struct device *kdev)
-{
- struct mca_device *mdev = to_mca_device(kdev);
- struct net_device *dev;
- int slot = mdev->slot, z, rc;
- int base = 0, irq = 0, iobase = 0, memlen = 0;
- ibmlana_priv *priv;
- ibmlana_medium medium;
-
- dev = alloc_etherdev(sizeof(ibmlana_priv));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = ibmlana_irq;
- dev->base_addr = ibmlana_io;
-
- base = dev->mem_start;
- irq = dev->irq;
-
- /* deduce card addresses */
- getaddrs(mdev, &base, &memlen, &iobase, &irq, &medium);
-
- /* were we looking for something different ? */
- if (dev->irq && dev->irq != irq) {
- rc = -ENODEV;
- goto err_out;
- }
- if (dev->mem_start && dev->mem_start != base) {
- rc = -ENODEV;
- goto err_out;
- }
-
- /* announce success */
- printk(KERN_INFO "%s: IBM LAN Adapter/A found in slot %d\n", dev->name, slot + 1);
-
- /* try to obtain I/O range */
- if (!request_region(iobase, IBM_LANA_IORANGE, DRV_NAME)) {
- printk(KERN_ERR "%s: cannot allocate I/O range at %#x!\n", DRV_NAME, iobase);
- startslot = slot + 1;
- rc = -EBUSY;
- goto err_out;
- }
-
- priv = netdev_priv(dev);
- priv->slot = slot;
- priv->realirq = mca_device_transform_irq(mdev, irq);
- priv->medium = medium;
- spin_lock_init(&priv->lock);
-
- /* set base + irq for this device (irq not allocated so far) */
-
- dev->irq = 0;
- dev->mem_start = base;
- dev->mem_end = base + memlen;
- dev->base_addr = iobase;
-
- priv->base = ioremap(base, memlen);
- if (!priv->base) {
- printk(KERN_ERR "%s: cannot remap memory!\n", DRV_NAME);
- startslot = slot + 1;
- rc = -EBUSY;
- goto err_out_reg;
- }
-
- mca_device_set_name(mdev, ibmlana_adapter_names[mdev->index]);
- mca_device_set_claim(mdev, 1);
-
- /* set methods */
- dev->netdev_ops = &ibmlana_netdev_ops;
- dev->flags |= IFF_MULTICAST;
-
- /* copy out MAC address */
-
- for (z = 0; z < ETH_ALEN; z++)
- dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
-
- /* print config */
-
- printk(KERN_INFO "%s: IRQ %d, I/O %#lx, memory %#lx-%#lx, "
- "MAC address %pM.\n",
- dev->name, priv->realirq, dev->base_addr,
- dev->mem_start, dev->mem_end - 1,
- dev->dev_addr);
- printk(KERN_INFO "%s: %s medium\n", dev->name, MediaNames[priv->medium]);
-
- /* reset board */
-
- ResetBoard(dev);
-
- /* next probe will start at next slot */
-
- startslot = slot + 1;
-
- rc = register_netdev(dev);
- if (rc)
- goto err_out_claimed;
-
- dev_set_drvdata(kdev, dev);
- return 0;
-
-err_out_claimed:
- mca_device_set_claim(mdev, 0);
- iounmap(priv->base);
-err_out_reg:
- release_region(iobase, IBM_LANA_IORANGE);
-err_out:
- free_netdev(dev);
- return rc;
-}
-
-static int ibmlana_remove_one(struct device *kdev)
-{
- struct mca_device *mdev = to_mca_device(kdev);
- struct net_device *dev = dev_get_drvdata(kdev);
- ibmlana_priv *priv = netdev_priv(dev);
-
- unregister_netdev(dev);
- /*DeinitBoard(dev); */
- release_region(dev->base_addr, IBM_LANA_IORANGE);
- mca_device_set_claim(mdev, 0);
- iounmap(priv->base);
- free_netdev(dev);
- return 0;
-}
-
-/* ------------------------------------------------------------------------
- * modularization support
- * ------------------------------------------------------------------------ */
-
-module_param_named(irq, ibmlana_irq, int, 0);
-module_param_named(io, ibmlana_io, int, 0);
-MODULE_PARM_DESC(irq, "IBM LAN/A IRQ number");
-MODULE_PARM_DESC(io, "IBM LAN/A I/O base address");
-MODULE_LICENSE("GPL");
-
-static struct mca_driver ibmlana_driver = {
- .id_table = ibmlana_adapter_ids,
- .driver = {
- .name = "ibmlana",
- .bus = &mca_bus_type,
- .probe = ibmlana_init_one,
- .remove = ibmlana_remove_one,
- },
-};
-
-static int __init ibmlana_init_module(void)
-{
- return mca_register_driver(&ibmlana_driver);
-}
-
-static void __exit ibmlana_cleanup_module(void)
-{
- mca_unregister_driver(&ibmlana_driver);
-}
-
-module_init(ibmlana_init_module);
-module_exit(ibmlana_cleanup_module);
diff --git a/drivers/net/ethernet/natsemi/ibmlana.h b/drivers/net/ethernet/natsemi/ibmlana.h
deleted file mode 100644
index accd5efc9c8..00000000000
--- a/drivers/net/ethernet/natsemi/ibmlana.h
+++ /dev/null
@@ -1,278 +0,0 @@
-#ifndef _IBM_LANA_INCLUDE_
-#define _IBM_LANA_INCLUDE_
-
-#ifdef _IBM_LANA_DRIVER_
-
-/* maximum packet size */
-
-#define PKTSIZE 1524
-
-/* number of transmit buffers */
-
-#define TXBUFCNT 4
-
-/* Adapter ID's */
-#define IBM_LANA_ID 0xffe0
-
-/* media enumeration - defined in a way that it fits onto the LAN/A's
- POS registers... */
-
-typedef enum {
- Media_10BaseT, Media_10Base5,
- Media_Unknown, Media_10Base2, Media_Count
-} ibmlana_medium;
-
-/* private structure */
-
-typedef struct {
- unsigned int slot; /* MCA-Slot-# */
- int realirq; /* memorizes actual IRQ, even when
- currently not allocated */
- ibmlana_medium medium; /* physical cannector */
- u32 tdastart, txbufstart, /* addresses */
- rrastart, rxbufstart, rdastart, rxbufcnt, txusedcnt;
- int nextrxdescr, /* next rx descriptor to be used */
- lastrxdescr, /* last free rx descriptor */
- nexttxdescr, /* last tx descriptor to be used */
- currtxdescr, /* tx descriptor currently tx'ed */
- txused[TXBUFCNT]; /* busy flags */
- void __iomem *base;
- spinlock_t lock;
-} ibmlana_priv;
-
-/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
- a full 64K I/O range... */
-
-#define IBM_LANA_IORANGE 0xa0
-
-/* Command Register: */
-
-#define SONIC_CMDREG 0x00
-#define CMDREG_HTX 0x0001 /* halt transmission */
-#define CMDREG_TXP 0x0002 /* start transmission */
-#define CMDREG_RXDIS 0x0004 /* disable receiver */
-#define CMDREG_RXEN 0x0008 /* enable receiver */
-#define CMDREG_STP 0x0010 /* stop timer */
-#define CMDREG_ST 0x0020 /* start timer */
-#define CMDREG_RST 0x0080 /* software reset */
-#define CMDREG_RRRA 0x0100 /* force SONIC to read first RRA */
-#define CMDREG_LCAM 0x0200 /* force SONIC to read CAM descrs */
-
-/* Data Configuration Register */
-
-#define SONIC_DCREG 0x02
-#define DCREG_EXBUS 0x8000 /* Extended Bus Mode */
-#define DCREG_LBR 0x2000 /* Latched Bus Retry */
-#define DCREG_PO1 0x1000 /* Programmable Outputs */
-#define DCREG_PO0 0x0800
-#define DCREG_SBUS 0x0400 /* Synchronous Bus Mode */
-#define DCREG_USR1 0x0200 /* User Definable Pins */
-#define DCREG_USR0 0x0100
-#define DCREG_WC0 0x0000 /* 0..3 Wait States */
-#define DCREG_WC1 0x0040
-#define DCREG_WC2 0x0080
-#define DCREG_WC3 0x00c0
-#define DCREG_DW16 0x0000 /* 16 bit Bus Mode */
-#define DCREG_DW32 0x0020 /* 32 bit Bus Mode */
-#define DCREG_BMS 0x0010 /* Block Mode Select */
-#define DCREG_RFT4 0x0000 /* 4/8/16/24 bytes RX Threshold */
-#define DCREG_RFT8 0x0004
-#define DCREG_RFT16 0x0008
-#define DCREG_RFT24 0x000c
-#define DCREG_TFT8 0x0000 /* 8/16/24/28 bytes TX Threshold */
-#define DCREG_TFT16 0x0001
-#define DCREG_TFT24 0x0002
-#define DCREG_TFT28 0x0003
-
-/* Receive Control Register */
-
-#define SONIC_RCREG 0x04
-#define RCREG_ERR 0x8000 /* accept damaged and collided pkts */
-#define RCREG_RNT 0x4000 /* accept packets that are < 64 */
-#define RCREG_BRD 0x2000 /* accept broadcasts */
-#define RCREG_PRO 0x1000 /* promiscuous mode */
-#define RCREG_AMC 0x0800 /* accept all multicasts */
-#define RCREG_LB_NONE 0x0000 /* no loopback */
-#define RCREG_LB_MAC 0x0200 /* MAC loopback */
-#define RCREG_LB_ENDEC 0x0400 /* ENDEC loopback */
-#define RCREG_LB_XVR 0x0600 /* Transceiver loopback */
-#define RCREG_MC 0x0100 /* Multicast received */
-#define RCREG_BC 0x0080 /* Broadcast received */
-#define RCREG_LPKT 0x0040 /* last packet in RBA */
-#define RCREG_CRS 0x0020 /* carrier sense present */
-#define RCREG_COL 0x0010 /* recv'd packet with collision */
-#define RCREG_CRCR 0x0008 /* recv'd packet with CRC error */
-#define RCREG_FAER 0x0004 /* recv'd packet with inv. framing */
-#define RCREG_LBK 0x0002 /* recv'd loopback packet */
-#define RCREG_PRX 0x0001 /* recv'd packet is OK */
-
-/* Transmit Control Register */
-
-#define SONIC_TCREG 0x06
-#define TCREG_PINT 0x8000 /* generate interrupt after TDA read */
-#define TCREG_POWC 0x4000 /* timer start out of window detect */
-#define TCREG_CRCI 0x2000 /* inhibit CRC generation */
-#define TCREG_EXDIS 0x1000 /* disable excessive deferral timer */
-#define TCREG_EXD 0x0400 /* excessive deferral occurred */
-#define TCREG_DEF 0x0200 /* single deferral occurred */
-#define TCREG_NCRS 0x0100 /* no carrier detected */
-#define TCREG_CRSL 0x0080 /* carrier lost */
-#define TCREG_EXC 0x0040 /* excessive collisions occurred */
-#define TCREG_OWC 0x0020 /* out of window collision occurred */
-#define TCREG_PMB 0x0008 /* packet monitored bad */
-#define TCREG_FU 0x0004 /* FIFO underrun */
-#define TCREG_BCM 0x0002 /* byte count mismatch of fragments */
-#define TCREG_PTX 0x0001 /* packet transmitted OK */
-
-/* Interrupt Mask Register */
-
-#define SONIC_IMREG 0x08
-#define IMREG_BREN 0x4000 /* interrupt when bus retry occurred */
-#define IMREG_HBLEN 0x2000 /* interrupt when heartbeat lost */
-#define IMREG_LCDEN 0x1000 /* interrupt when CAM loaded */
-#define IMREG_PINTEN 0x0800 /* interrupt when PINT in TDA set */
-#define IMREG_PRXEN 0x0400 /* interrupt when packet received */
-#define IMREG_PTXEN 0x0200 /* interrupt when packet was sent */
-#define IMREG_TXEREN 0x0100 /* interrupt when send failed */
-#define IMREG_TCEN 0x0080 /* interrupt when timer completed */
-#define IMREG_RDEEN 0x0040 /* interrupt when RDA exhausted */
-#define IMREG_RBEEN 0x0020 /* interrupt when RBA exhausted */
-#define IMREG_RBAEEN 0x0010 /* interrupt when RBA too short */
-#define IMREG_CRCEN 0x0008 /* interrupt when CRC counter rolls */
-#define IMREG_FAEEN 0x0004 /* interrupt when FAE counter rolls */
-#define IMREG_MPEN 0x0002 /* interrupt when MP counter rolls */
-#define IMREG_RFOEN 0x0001 /* interrupt when Rx FIFO overflows */
-
-/* Interrupt Status Register */
-
-#define SONIC_ISREG 0x0a
-#define ISREG_BR 0x4000 /* bus retry occurred */
-#define ISREG_HBL 0x2000 /* heartbeat lost */
-#define ISREG_LCD 0x1000 /* CAM loaded */
-#define ISREG_PINT 0x0800 /* PINT in TDA set */
-#define ISREG_PKTRX 0x0400 /* packet received */
-#define ISREG_TXDN 0x0200 /* packet was sent */
-#define ISREG_TXER 0x0100 /* send failed */
-#define ISREG_TC 0x0080 /* timer completed */
-#define ISREG_RDE 0x0040 /* RDA exhausted */
-#define ISREG_RBE 0x0020 /* RBA exhausted */
-#define ISREG_RBAE 0x0010 /* RBA too short for received frame */
-#define ISREG_CRC 0x0008 /* CRC counter rolls over */
-#define ISREG_FAE 0x0004 /* FAE counter rolls over */
-#define ISREG_MP 0x0002 /* MP counter rolls over */
-#define ISREG_RFO 0x0001 /* Rx FIFO overflows */
-
-#define SONIC_UTDA 0x0c /* current transmit descr address */
-#define SONIC_CTDA 0x0e
-
-#define SONIC_URDA 0x1a /* current receive descr address */
-#define SONIC_CRDA 0x1c
-
-#define SONIC_CRBA0 0x1e /* current receive buffer address */
-#define SONIC_CRBA1 0x20
-
-#define SONIC_RBWC0 0x22 /* word count in receive buffer */
-#define SONIC_RBWC1 0x24
-
-#define SONIC_EOBC 0x26 /* minimum space to be free in RBA */
-
-#define SONIC_URRA 0x28 /* upper address of CDA & Recv Area */
-
-#define SONIC_RSA 0x2a /* start of receive resource area */
-
-#define SONIC_REA 0x2c /* end of receive resource area */
-
-#define SONIC_RRP 0x2e /* resource read pointer */
-
-#define SONIC_RWP 0x30 /* resource write pointer */
-
-#define SONIC_CAMEPTR 0x42 /* CAM entry pointer */
-
-#define SONIC_CAMADDR2 0x44 /* CAM address ports */
-#define SONIC_CAMADDR1 0x46
-#define SONIC_CAMADDR0 0x48
-
-#define SONIC_CAMPTR 0x4c /* lower address of CDA */
-
-#define SONIC_CAMCNT 0x4e /* # of CAM descriptors to load */
-
-/* Data Configuration Register 2 */
-
-#define SONIC_DCREG2 0x7e
-#define DCREG2_EXPO3 0x8000 /* extended programmable outputs */
-#define DCREG2_EXPO2 0x4000
-#define DCREG2_EXPO1 0x2000
-#define DCREG2_EXPO0 0x1000
-#define DCREG2_HD 0x0800 /* heartbeat disable */
-#define DCREG2_JD 0x0200 /* jabber timer disable */
-#define DCREG2_AUTO 0x0100 /* enable AUI/TP auto selection */
-#define DCREG2_XWRAP 0x0040 /* TP transceiver loopback */
-#define DCREG2_PH 0x0010 /* HOLD request timing */
-#define DCREG2_PCM 0x0004 /* packet compress when matched */
-#define DCREG2_PCNM 0x0002 /* packet compress when not matched */
-#define DCREG2_RJCM 0x0001 /* inverse packet match via CAM */
-
-/* Board Control Register: Enable RAM, Interrupts... */
-
-#define BCMREG 0x80
-#define BCMREG_RAMEN 0x80 /* switch over to RAM */
-#define BCMREG_IPEND 0x40 /* interrupt pending ? */
-#define BCMREG_RESET 0x08 /* reset board */
-#define BCMREG_16BIT 0x04 /* adapter in 16-bit slot */
-#define BCMREG_RAMWIN 0x02 /* enable RAM window */
-#define BCMREG_IEN 0x01 /* interrupt enable */
-
-/* MAC Address PROM */
-
-#define MACADDRPROM 0x92
-
-/* structure of a CAM entry */
-
-typedef struct {
- u32 index; /* pointer into CAM area */
- u32 addr0; /* address part (bits 0..15 used) */
- u32 addr1;
- u32 addr2;
-} camentry_t;
-
-/* structure of a receive resource */
-
-typedef struct {
- u32 startlo; /* start address (bits 0..15 used) */
- u32 starthi;
- u32 cntlo; /* size in 16-bit quantities */
- u32 cnthi;
-} rra_t;
-
-/* structure of a receive descriptor */
-
-typedef struct {
- u32 status; /* packet status */
- u32 length; /* length in bytes */
- u32 startlo; /* start address */
- u32 starthi;
- u32 seqno; /* frame sequence */
- u32 link; /* pointer to next descriptor */
- /* bit 0 = EOL */
- u32 inuse; /* !=0 --> free for SONIC to write */
-} rda_t;
-
-/* structure of a transmit descriptor */
-
-typedef struct {
- u32 status; /* transmit status */
- u32 config; /* value for TCR */
- u32 length; /* total length */
- u32 fragcount; /* number of fragments */
- u32 startlo; /* start address of fragment */
- u32 starthi;
- u32 fraglength; /* length of this fragment */
- /* more address/length triplets may */
- /* follow here */
- u32 link; /* pointer to next descriptor */
- /* bit 0 = EOL */
-} tda_t;
-
-#endif /* _IBM_LANA_DRIVER_ */
-
-#endif /* _IBM_LANA_INCLUDE_ */
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index f4ad60c97ea..7a5e295588b 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -862,9 +862,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
prev_eedata = eedata;
}
- /* Store MAC Address in perm_addr */
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
-
np = netdev_priv(dev);
np->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 7c94c089212..bfd887382e1 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8014,7 +8014,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 92dd72d3f9d..f8f073880f8 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -82,9 +82,9 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct vxgedev *vdev = netdev_priv(dev);
- strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
- strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
- strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
+ strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, vdev->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(vdev->pdev), sizeof(info->bus_info));
info->regdump_len = sizeof(struct vxge_hw_vpath_reg)
* vdev->no_of_vpath;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 7c87105ca04..794444e0949 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -4682,7 +4682,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Store the fw version for ethttool option */
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
- memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index cbd6a529d0c..162da8975b0 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -878,8 +878,8 @@ static int w90p910_ether_ioctl(struct net_device *dev,
static void w90p910_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 87fa5919c45..0b8de12bcbc 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3055,7 +3055,6 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
/* synchronized against open : rtnl_lock() held by caller */
memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -5766,9 +5765,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->perm_addr)) {
+ if (!is_valid_ether_addr(dev->dev_addr)) {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 3466ca1e8f6..c4122c86f82 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -800,7 +800,7 @@ static int lpc_mii_probe(struct net_device *ndev)
else
netdev_info(ndev, "using RMII interface\n");
phydev = phy_connect(ndev, dev_name(&phydev->dev),
- &lpc_handle_link_change, 0,
+ &lpc_handle_link_change,
lpc_phy_interface_mode(&pldat->pdev->dev));
if (IS_ERR(phydev)) {
@@ -1239,9 +1239,10 @@ static int lpc_eth_open(struct net_device *ndev)
static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, dev_name(ndev->dev.parent));
+ strlcpy(info->driver, MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(ndev->dev.parent),
+ sizeof(info->bus_info));
}
static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index b5499198e02..921729f9c85 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -1350,10 +1350,10 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver));
- strncpy(info->version, DRV_VERSION, sizeof(info->version));
- strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strncpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
info->n_stats = 0;
info->testinfo_len = 0;
info->regdump_len = 0;
@@ -1534,12 +1534,10 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac && is_valid_ether_addr(mac)) {
+ if (mac && is_valid_ether_addr(mac))
memcpy(netdev->dev_addr, mac, ETH_ALEN);
- netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
- } else {
+ else
eth_hw_addr_random(netdev);
- }
p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index bf829ee3007..cac33e5f9bc 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1808,9 +1808,10 @@ static int check_if_running(struct net_device *dev)
static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct hamachi_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fbaed4fa72f..d28593b1fc3 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1326,9 +1326,10 @@ static void set_rx_mode(struct net_device *dev)
static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct yellowfin_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 7f556a84925..1bcaf45aa86 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -201,11 +201,8 @@ netxen_setup_minidump(struct netxen_adapter *adapter)
adapter->mdump.md_template =
kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
- if (!adapter->mdump.md_template) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory "
- "for minidump template.\n");
+ if (!adapter->mdump.md_template)
return -ENOMEM;
- }
err = netxen_get_minidump_template(adapter);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 946160fa584..9fbb1cdbfa4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -670,11 +670,9 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
}
cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add mac address filter\n",
- adapter->netdev->name);
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
list_add_tail(&cur->list, &adapter->mac_list);
return nx_p3_sre_macaddr_change(adapter,
@@ -2568,16 +2566,10 @@ netxen_dump_fw(struct netxen_adapter *adapter)
adapter->mdump.md_capture_size;
if (!adapter->mdump.md_capture_buff) {
adapter->mdump.md_capture_buff =
- vmalloc(adapter->mdump.md_dump_size);
- if (!adapter->mdump.md_capture_buff) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate memory for minidump "
- "capture_buffer(%d bytes).\n",
- adapter->mdump.md_dump_size);
+ vzalloc(adapter->mdump.md_dump_size);
+ if (!adapter->mdump.md_capture_buff)
return;
- }
- memset(adapter->mdump.md_capture_buff, 0,
- adapter->mdump.md_dump_size);
+
if (netxen_collect_minidump(adapter)) {
adapter->mdump.has_valid_dump = 0;
adapter->mdump.md_dump_size = 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 695667d471a..4782dcfde73 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -197,41 +197,33 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
struct nx_host_sds_ring *sds_ring;
struct nx_host_tx_ring *tx_ring;
struct netxen_rx_buffer *rx_buf;
- int ring, i, size;
+ int ring, i;
struct netxen_cmd_buffer *cmd_buf_arr;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- size = sizeof(struct nx_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate tx ring struct\n",
- netdev->name);
+ tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
return -ENOMEM;
- }
+
adapter->tx_ring = tx_ring;
tx_ring->num_desc = adapter->num_txd;
tx_ring->txq = netdev_get_tx_queue(netdev, 0);
cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
- netdev->name);
+ if (cmd_buf_arr == NULL)
goto err_out;
- }
+
tx_ring->cmd_buf_arr = cmd_buf_arr;
recv_ctx = &adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof (struct nx_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&pdev->dev, "%s: failed to allocate rds ring struct\n",
- netdev->name);
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct nx_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 69e321a6507..501f49207da 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -501,12 +501,11 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
for (i = 0; i < 6; i++)
netdev->dev_addr[i] = *(p + 5 - i);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
return 0;
@@ -3177,11 +3176,8 @@ netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
}
cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
- if (cur == NULL) {
- printk(KERN_ERR "%s: failed to add vlan ip to list\n",
- adapter->netdev->name);
+ if (cur == NULL)
return;
- }
cur->ip_addr = ifa->ifa_address;
list_add_tail(&cur->list, &adapter->vlan_ip_list);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 67a679aaf29..8fd38cb6d26 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2591,13 +2591,11 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
else
qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
- qdev->lrg_buf =
- kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),
- GFP_KERNEL);
- if (qdev->lrg_buf == NULL) {
- netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n");
+ qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
+ sizeof(struct ql_rcv_buf_cb),
+ GFP_KERNEL);
+ if (qdev->lrg_buf == NULL)
return -ENOMEM;
- }
qdev->lrg_buf_q_alloc_virt_addr =
pci_alloc_consistent(qdev->pdev,
@@ -3867,7 +3865,6 @@ static int ql3xxx_probe(struct pci_dev *pdev,
ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index c4b8ced8382..7722a203e38 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -6,4 +6,6 @@ obj-$(CONFIG_QLCNIC) := qlcnic.o
qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
- qlcnic_sysfs.o qlcnic_minidump.o
+ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
+ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
+ qlcnic_minidump.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index bc7ec64e9c7..11c3db6daff 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -33,11 +33,13 @@
#include <linux/if_vlan.h>
#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+#include "qlcnic_83xx_hw.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 30
-#define QLCNIC_LINUX_VERSIONID "5.0.30"
+#define _QLCNIC_LINUX_MINOR 1
+#define _QLCNIC_LINUX_SUBVERSION 34
+#define QLCNIC_LINUX_VERSIONID "5.1.34"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -96,7 +98,6 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
-
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -203,6 +204,7 @@ struct uni_data_desc{
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FDT_LOCATION 0x3F0000
#define QLCNIC_B0_FW_IMAGE_REGION 0x74
#define QLCNIC_C0_FW_IMAGE_REGION 0x97
#define QLCNIC_BOOTLD_REGION 0X72
@@ -223,6 +225,36 @@ struct qlcnic_flt_entry {
u32 end_addr;
};
+/* Flash Descriptor Table */
+struct qlcnic_fdt {
+ u32 valid;
+ u16 ver;
+ u16 len;
+ u16 cksum;
+ u16 unused;
+ u8 model[16];
+ u16 mfg_id;
+ u16 id;
+ u8 flag;
+ u8 erase_cmd;
+ u8 alt_erase_cmd;
+ u8 write_enable_cmd;
+ u8 write_enable_bits;
+ u8 write_statusreg_cmd;
+ u8 unprotected_sec_cmd;
+ u8 read_manuf_cmd;
+ u32 block_size;
+ u32 alt_block_size;
+ u32 flash_size;
+ u32 write_enable_data;
+ u8 readid_addr_len;
+ u8 write_disable_bits;
+ u8 read_dev_id_len;
+ u8 chip_erase_cmd;
+ u16 read_timeo;
+ u8 protected_sec_cmd;
+ u8 resvd[65];
+};
/* Magic number to let user know flash is programmed */
#define QLCNIC_BDINFO_MAGIC 0x12345678
@@ -267,6 +299,12 @@ struct qlcnic_flt_entry {
extern char qlcnic_driver_name[];
+extern int qlcnic_use_msi;
+extern int qlcnic_use_msi_x;
+extern int qlcnic_auto_fw_reset;
+extern int qlcnic_load_fw_file;
+extern int qlcnic_config_npars;
+
/* Number of status descriptors to handle per interrupt */
#define MAX_STATUS_HANDLE (64)
@@ -314,6 +352,7 @@ struct qlcnic_rx_buffer {
#define QLCNIC_INTR_DEFAULT 0x04
#define QLCNIC_CONFIG_INTR_COALESCE 3
+#define QLCNIC_DEV_INFO_SIZE 1
struct qlcnic_nic_intr_coalesce {
u8 type;
@@ -337,6 +376,7 @@ struct qlcnic_dump_template_hdr {
u32 sys_info[3];
u32 saved_state[16];
u32 cap_sizes[8];
+ u32 ocm_wnd_reg[16];
u32 rsvd[0];
};
@@ -396,12 +436,24 @@ struct qlcnic_hardware_context {
u16 act_pci_func;
u32 capabilities;
+ u32 capabilities2;
u32 temp;
u32 int_vec_bit;
u32 fw_hal_version;
+ u32 port_config;
struct qlcnic_hardware_ops *hw_ops;
struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_fw_dump fw_dump;
+ struct qlcnic_fdt fdt;
+ struct qlc_83xx_reset reset;
+ struct qlc_83xx_idc idc;
+ struct qlc_83xx_fw_info fw_info;
+ struct qlcnic_intrpt_config *intr_tbl;
+ u32 *reg_tbl;
+ u32 *ext_reg_tbl;
+ u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
+ u32 mbox_reg[4];
+ spinlock_t mbx_lock;
};
struct qlcnic_adapter_stats {
@@ -422,6 +474,8 @@ struct qlcnic_adapter_stats {
u64 null_rxbuf;
u64 rx_dma_map_error;
u64 tx_dma_map_error;
+ u64 spurious_intr;
+ u64 mac_filter_limit_overrun;
};
/*
@@ -460,12 +514,17 @@ struct qlcnic_host_sds_ring {
} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_tx_ring {
+ int irq;
+ void __iomem *crb_intr_mask;
+ char name[IFNAMSIZ+4];
u16 ctx_id;
u32 producer;
u32 sw_consumer;
u32 num_desc;
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
struct qlcnic_cmd_buffer *cmd_buf_arr;
__le32 *hw_consumer;
@@ -492,8 +551,6 @@ struct qlcnic_recv_context {
/* HW context creation */
#define QLCNIC_OS_CRB_RETRY_COUNT 4000
-#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
- (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
#define QLCNIC_CDRP_CMD_BIT 0x80000000
@@ -513,43 +570,6 @@ struct qlcnic_recv_context {
* the crb QLCNIC_CDRP_CRB_OFFSET.
*/
#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
-#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
-
-#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
-#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
-#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
-#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
-#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
-#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
-#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
-#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
-#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
-#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
-#define QLCNIC_CDRP_CMD_INTRPT_TEST 0x00000011
-#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
-#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
-#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
-#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
-#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
-#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
-#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
-#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
-#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
-
-#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
-#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
-#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
-#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
-#define QLCNIC_CDRP_CMD_SET_PORTMIRRORING 0x00000027
-#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
-#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
-#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
-#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
-#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
-#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
#define QLCNIC_RCODE_INVALID_ARGS 6
@@ -726,6 +746,11 @@ struct qlcnic_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
};
+/* MAC Learn */
+#define NO_MAC_LEARN 0
+#define DRV_MAC_LEARN 1
+#define FDB_MAC_LEARN 2
+
#define QLCNIC_HOST_REQUEST 0x13
#define QLCNIC_REQUEST 0x14
@@ -762,7 +787,7 @@ struct qlcnic_mac_list_s {
*/
#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
-#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -779,6 +804,8 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
+#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
+#define QLCNIC_FW_CAPABILITY_2_OCBB BIT_5
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -855,7 +882,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_MSI_ENABLED 0x02
#define QLCNIC_MSIX_ENABLED 0x04
-#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_LRO_ENABLED 0x01
#define QLCNIC_LRO_DISABLED 0x00
#define QLCNIC_BRIDGE_ENABLED 0X10
#define QLCNIC_DIAG_ENABLED 0x20
@@ -887,6 +914,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_AER 5
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
+#define __QLCNIC_ELB_INPROGRESS 8
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -895,12 +923,14 @@ struct qlcnic_ipaddr {
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
+#define QLCNIC_LB_BUCKET_SIZE 32
/* QLCNIC Driver Error Code */
#define QLCNIC_FW_NOT_RESPOND 51
#define QLCNIC_TEST_IN_PROGRESS 52
#define QLCNIC_UNDEFINED_ERROR 53
#define QLCNIC_LB_CABLE_NOT_CONN 54
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
struct qlcnic_filter {
struct hlist_node fnode;
@@ -912,7 +942,8 @@ struct qlcnic_filter {
struct qlcnic_filter_hash {
struct hlist_head *fhead;
u8 fnum;
- u8 fmax;
+ u16 fmax;
+ u16 fbucket_size;
};
struct qlcnic_adapter {
@@ -934,6 +965,7 @@ struct qlcnic_adapter {
u8 max_rds_rings;
u8 max_sds_rings;
+ u8 rx_csum;
u8 portnum;
u8 fw_wait_cnt;
@@ -954,8 +986,10 @@ struct qlcnic_adapter {
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
- u8 mac_learn;
+ bool drv_mac_learn;
+ bool fdb_mac_learn;
unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u8 flash_mfg_id;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
@@ -969,12 +1003,17 @@ struct qlcnic_adapter {
void __iomem *isr_int_vec;
struct msix_entry *msix_entries;
+ struct workqueue_struct *qlcnic_wq;
struct delayed_work fw_work;
+ struct delayed_work idc_aen_work;
struct qlcnic_filter_hash fhash;
+ struct qlcnic_filter_hash rx_fhash;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
+ /* spinlock for catching rcv filters for eswitch traffic */
+ spinlock_t rx_mac_learn_lock;
u32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
@@ -995,7 +1034,24 @@ struct qlcnic_info_le {
__le16 max_rx_ques;
__le16 min_tx_bw;
__le16 max_tx_bw;
- u8 reserved2[104];
+ __le32 op_type;
+ __le16 max_bw_reg_offset;
+ __le16 max_linkspeed_reg_offset;
+ __le32 capability1;
+ __le32 capability2;
+ __le32 capability3;
+ __le16 max_tx_mac_filters;
+ __le16 max_rx_mcast_mac_filters;
+ __le16 max_rx_ucast_mac_filters;
+ __le16 max_rx_ip_addr;
+ __le16 max_rx_lro_flow;
+ __le16 max_rx_status_rings;
+ __le16 max_rx_buf_rings;
+ __le16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ __le16 max_vports;
+ u8 reserved2[64];
} __packed;
struct qlcnic_info {
@@ -1005,12 +1061,28 @@ struct qlcnic_info {
u16 switch_mode;
u32 capabilities;
u8 max_mac_filters;
- u8 reserved1;
u16 max_mtu;
u16 max_tx_ques;
u16 max_rx_ques;
u16 min_tx_bw;
u16 max_tx_bw;
+ u32 op_type;
+ u16 max_bw_reg_offset;
+ u16 max_linkspeed_reg_offset;
+ u32 capability1;
+ u32 capability2;
+ u32 capability3;
+ u16 max_tx_mac_filters;
+ u16 max_rx_mcast_mac_filters;
+ u16 max_rx_ucast_mac_filters;
+ u16 max_rx_ip_addr;
+ u16 max_rx_lro_flow;
+ u16 max_rx_status_rings;
+ u16 max_rx_buf_rings;
+ u16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ u16 max_vports;
};
struct qlcnic_pci_info_le {
@@ -1024,7 +1096,9 @@ struct qlcnic_pci_info_le {
__le16 reserved1[2];
u8 mac[ETH_ALEN];
- u8 reserved2[106];
+ __le16 func_count;
+ u8 reserved2[104];
+
} __packed;
struct qlcnic_pci_info {
@@ -1035,6 +1109,7 @@ struct qlcnic_pci_info {
u16 tx_min_bw;
u16 tx_max_bw;
u8 mac[ETH_ALEN];
+ u16 func_count;
};
struct qlcnic_npar_info {
@@ -1266,10 +1341,8 @@ struct qlcnic_esw_statistics {
#define QLCNIC_RESET_QUIESCENT 0xadd00020
struct _cdrp_cmd {
- u32 cmd;
- u32 arg1;
- u32 arg2;
- u32 arg3;
+ u32 num;
+ u32 *arg;
};
struct qlcnic_cmd_args {
@@ -1279,9 +1352,6 @@ struct qlcnic_cmd_args {
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
-
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
-int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
@@ -1291,9 +1361,10 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
(((addr) < (high)) && ((addr) >= (low)))
#define QLCRD32(adapter, off) \
- (qlcnic_hw_read_wx_2M(adapter, off))
+ (adapter->ahw->hw_ops->read_reg)(adapter, off)
+
#define QLCWR32(adapter, off, val) \
- (qlcnic_hw_write_wx_2M(adapter, off, val))
+ adapter->ahw->hw_ops->write_reg(adapter, off, val)
int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
@@ -1306,10 +1377,6 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
#define qlcnic_phy_unlock(a) \
qlcnic_pcie_sem_unlock((a), 3)
-#define qlcnic_api_lock(a) \
- qlcnic_pcie_sem_lock((a), 5, 0)
-#define qlcnic_api_unlock(a) \
- qlcnic_pcie_sem_unlock((a), 5)
#define qlcnic_sw_lock(a) \
qlcnic_pcie_sem_lock((a), 6, 0)
#define qlcnic_sw_unlock(a) \
@@ -1324,14 +1391,13 @@ void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
#define MAX_CTL_CHECK 1000
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
/* Functions from qlcnic_init.c */
+void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
@@ -1361,54 +1427,42 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring);
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *);
+int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd);
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
-void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
netdev_features_t qlcnic_fix_features(struct net_device *netdev,
netdev_features_t features);
int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
-void qlcnic_fetch_mac(u32, u32, u8, u8 *);
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter);
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode);
/* Functions from qlcnic_ethtool.c */
-int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[]);
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+int qlcnic_loopback_test(struct net_device *, u8);
/* Functions from qlcnic_main.c */
int qlcnic_reset_context(struct qlcnic_adapter *);
-void qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
-void qlcnic_dev_request_reset(struct qlcnic_adapter *);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_validate_max_rss(u8, u8);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
-
-/* Management functions */
-int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
-int qlcnic_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
-int qlcnic_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
/* eSwitch management functions */
int qlcnic_config_switch_port(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+
int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
@@ -1418,14 +1472,12 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
struct __qlcnic_esw_statistics *);
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
-extern int qlcnic_config_tso;
-int qlcnic_napi_add(struct qlcnic_adapter *, struct net_device *);
-void qlcnic_napi_del(struct qlcnic_adapter *adapter);
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter);
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd);
+
int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
@@ -1433,6 +1485,9 @@ void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
void qlcnic_set_vlan_config(struct qlcnic_adapter *,
@@ -1440,6 +1495,22 @@ void qlcnic_set_vlan_config(struct qlcnic_adapter *,
void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_detach(struct qlcnic_adapter *);
+void qlcnic_teardown_intr(struct qlcnic_adapter *);
+int qlcnic_attach(struct qlcnic_adapter *);
+int __qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_restore_indev_addr(struct net_device *, unsigned long);
+
+int qlcnic_check_temp(struct qlcnic_adapter *);
+int qlcnic_init_pci_info(struct qlcnic_adapter *);
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_reset_npar_config(struct qlcnic_adapter *);
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int,
+ __le16);
/*
* QLOGIC Board information
*/
@@ -1462,6 +1533,277 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
+struct qlcnic_nic_template {
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*start_firmware) (struct qlcnic_adapter *);
+ int (*init_driver) (struct qlcnic_adapter *);
+ void (*request_reset) (struct qlcnic_adapter *, u32);
+ void (*cancel_idc_work) (struct qlcnic_adapter *);
+ int (*napi_add)(struct qlcnic_adapter *, struct net_device *);
+ void (*napi_del)(struct qlcnic_adapter *);
+ void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
+ irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+};
+
+/* Adapter hardware abstraction */
+struct qlcnic_hardware_ops {
+ void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ int (*read_reg) (struct qlcnic_adapter *, ulong);
+ int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
+ void (*get_ocm_win) (struct qlcnic_hardware_context *);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
+ int (*setup_intr) (struct qlcnic_adapter *, u8);
+ int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+ int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*get_func_no) (struct qlcnic_adapter *);
+ int (*api_lock) (struct qlcnic_adapter *);
+ void (*api_unlock) (struct qlcnic_adapter *);
+ void (*add_sysfs) (struct qlcnic_adapter *);
+ void (*remove_sysfs) (struct qlcnic_adapter *);
+ void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *);
+ int (*create_rx_ctx) (struct qlcnic_adapter *);
+ int (*create_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+ int (*setup_link_event) (struct qlcnic_adapter *, int);
+ int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
+ int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
+ int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
+ int (*change_macvlan) (struct qlcnic_adapter *, u8*, __le16, u8);
+ void (*napi_enable) (struct qlcnic_adapter *);
+ void (*napi_disable) (struct qlcnic_adapter *);
+ void (*config_intr_coal) (struct qlcnic_adapter *);
+ int (*config_rss) (struct qlcnic_adapter *, int);
+ int (*config_hw_lro) (struct qlcnic_adapter *, int);
+ int (*config_loopback) (struct qlcnic_adapter *, u8);
+ int (*clear_loopback) (struct qlcnic_adapter *, u8);
+ int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+ void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, __le16);
+ int (*get_board_info) (struct qlcnic_adapter *);
+};
+
+extern struct qlcnic_nic_template qlcnic_vf_ops;
+
+static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->start_firmware(adapter);
+}
+
+static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size);
+}
+
+static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
+}
+
+static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off)
+{
+ return adapter->ahw->hw_ops->read_reg(adapter, off);
+}
+
+static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
+ ulong off, u32 data)
+{
+ return adapter->ahw->hw_ops->write_reg(adapter, off, data);
+}
+
+static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
+ u8 *mac)
+{
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+}
+
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+{
+ return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+}
+
+static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 arg)
+{
+ return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg);
+}
+
+static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+}
+
+static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_func_no(adapter);
+}
+
+static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->api_lock(adapter);
+}
+
+static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->api_unlock(adapter);
+}
+
+static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->add_sysfs(adapter);
+}
+
+static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
+}
+
+static inline void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring);
+}
+
+static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->create_rx_ctx(adapter);
+}
+
+static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr,
+ int ring)
+{
+ return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
+}
+
+static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->setup_link_event(adapter, enable);
+}
+
+static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u8 id)
+{
+ return adapter->ahw->hw_ops->get_nic_info(adapter, info, id);
+}
+
+static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *info)
+{
+ return adapter->ahw->hw_ops->get_pci_info(adapter, info);
+}
+
+static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ return adapter->ahw->hw_ops->set_nic_info(adapter, info);
+}
+
+static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
+ u8 *addr, __le16 id, u8 cmd)
+{
+ return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
+}
+
+static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ return adapter->nic_ops->napi_add(adapter, netdev);
+}
+
+static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->napi_del(adapter);
+}
+
+static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_enable(adapter);
+}
+
+static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_disable(adapter);
+}
+
+static inline void qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->config_intr_coal(adapter);
+}
+
+static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ return adapter->ahw->hw_ops->config_rss(adapter, enable);
+}
+
+static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->config_hw_lro(adapter, enable);
+}
+
+static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+ u32 mode)
+{
+ return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode);
+}
+
+static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 *addr, __le16 id)
+{
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+}
+
+static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->get_board_info(adapter);
+}
+
+static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ adapter->nic_ops->request_reset(adapter, key);
+}
+
+static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->cancel_idc_work(adapter);
+}
+
+static inline irqreturn_t
+qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->clear_legacy_intr(adapter);
+}
+
+static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 rate)
+{
+ return adapter->nic_ops->config_led(adapter, state, rate);
+}
+
+static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
+}
+
static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
{
writel(0, sds_ring->crb_intr_mask);
@@ -1480,12 +1822,6 @@ static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
extern const struct ethtool_ops qlcnic_ethtool_ops;
extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
-struct qlcnic_nic_template {
- int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
- int (*config_led) (struct qlcnic_adapter *, u32, u32);
- int (*start_firmware) (struct qlcnic_adapter *);
-};
-
#define QLCDB(adapter, lvl, _fmt, _args...) do { \
if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
printk(KERN_INFO "%s: %s: " _fmt, \
@@ -1493,6 +1829,7 @@ struct qlcnic_nic_template {
__func__, ##_args); \
} while (0)
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
@@ -1500,4 +1837,11 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
}
+static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
new file mode 100644
index 00000000000..cd5ae8813cb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -0,0 +1,3011 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+
+#define QLCNIC_MAX_TX_QUEUES 1
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+/* status descriptor mailbox data
+ * @phy_addr: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u64 phy_addr;
+ u8 rsvd1[16];
+ u16 sds_ring_size;
+ u16 rsvd2[3];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd3[5];
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg: physical address of regular buffer
+ * phy_addr_jmb: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u64 phy_addr_reg;
+ u64 phy_addr_jmb;
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr: DMA address of the transmit buffer
+ * @cnsmr_index: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrput id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u64 phys_addr;
+ u64 cnsmr_index;
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+} __packed;
+
+static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
+ {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
+ {QLCNIC_CMD_CREATE_RX_CTX, 136, 27},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 54, 18},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 22, 12},
+ {QLCNIC_CMD_SET_MTU, 3, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 66},
+ {QLCNIC_CMD_GET_NIC_INFO, 2, 19},
+ {QLCNIC_CMD_SET_NIC_INFO, 32, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 1, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 5, 5},
+ {QLCNIC_CMD_GET_LINK_EVENT, 2, 1},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1},
+ {QLCNIC_CMD_CONFIGURE_RSS, 14, 1},
+ {QLCNIC_CMD_CONFIGURE_LED, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1},
+ {QLCNIC_CMD_GET_STATISTICS, 2, 80},
+ {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1},
+ {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
+ {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
+ {QLCNIC_CMD_IDC_ACK, 5, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
+ {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+ {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+};
+
+static const u32 qlcnic_83xx_ext_reg_tbl[] = {
+ 0x38CC, /* Global Reset */
+ 0x38F0, /* Wildcard */
+ 0x38FC, /* Informant */
+ 0x3038, /* Host MBX ctrl */
+ 0x303C, /* FW MBX ctrl */
+ 0x355C, /* BOOT LOADER ADDRESS REG */
+ 0x3560, /* BOOT LOADER SIZE REG */
+ 0x3564, /* FW IMAGE ADDR REG */
+ 0x1000, /* MBX intr enable */
+ 0x1200, /* Default Intr mask */
+ 0x1204, /* Default Interrupt ID */
+ 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */
+ 0x3784, /* QLC_83XX_IDC_DEV_STATE */
+ 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */
+ 0x378C, /* QLC_83XX_IDC_DRV_ACK */
+ 0x3790, /* QLC_83XX_IDC_CTRL */
+ 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */
+ 0x3798, /* QLC_83XX_IDC_MIN_VERSION */
+ 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */
+ 0x37A0, /* QLC_83XX_IDC_PF_0 */
+ 0x37A4, /* QLC_83XX_IDC_PF_1 */
+ 0x37A8, /* QLC_83XX_IDC_PF_2 */
+ 0x37AC, /* QLC_83XX_IDC_PF_3 */
+ 0x37B0, /* QLC_83XX_IDC_PF_4 */
+ 0x37B4, /* QLC_83XX_IDC_PF_5 */
+ 0x37B8, /* QLC_83XX_IDC_PF_6 */
+ 0x37BC, /* QLC_83XX_IDC_PF_7 */
+ 0x37C0, /* QLC_83XX_IDC_PF_8 */
+ 0x37C4, /* QLC_83XX_IDC_PF_9 */
+ 0x37C8, /* QLC_83XX_IDC_PF_10 */
+ 0x37CC, /* QLC_83XX_IDC_PF_11 */
+ 0x37D0, /* QLC_83XX_IDC_PF_12 */
+ 0x37D4, /* QLC_83XX_IDC_PF_13 */
+ 0x37D8, /* QLC_83XX_IDC_PF_14 */
+ 0x37DC, /* QLC_83XX_IDC_PF_15 */
+ 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */
+ 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */
+ 0x37F0, /* QLC_83XX_DRV_OP_MODE */
+ 0x37F4, /* QLC_83XX_VNIC_STATE */
+ 0x3868, /* QLC_83XX_DRV_LOCK */
+ 0x386C, /* QLC_83XX_DRV_UNLOCK */
+ 0x3504, /* QLC_83XX_DRV_LOCK_ID */
+ 0x34A4, /* QLC_83XX_ASIC_TEMP */
+};
+
+static const u32 qlcnic_83xx_reg_tbl[] = {
+ 0x34A8, /* PEG_HALT_STAT1 */
+ 0x34AC, /* PEG_HALT_STAT2 */
+ 0x34B0, /* FW_HEARTBEAT */
+ 0x3500, /* FLASH LOCK_ID */
+ 0x3528, /* FW_CAPABILITIES */
+ 0x3538, /* Driver active, DRV_REG0 */
+ 0x3540, /* Device state, DRV_REG1 */
+ 0x3544, /* Driver state, DRV_REG2 */
+ 0x3548, /* Driver scratch, DRV_REG3 */
+ 0x354C, /* Device partiton info, DRV_REG4 */
+ 0x3524, /* Driver IDC ver, DRV_REG5 */
+ 0x3550, /* FW_VER_MAJOR */
+ 0x3554, /* FW_VER_MINOR */
+ 0x3558, /* FW_VER_SUB */
+ 0x359C, /* NPAR STATE */
+ 0x35FC, /* FW_IMG_VALID */
+ 0x3650, /* CMD_PEG_STATE */
+ 0x373C, /* RCV_PEG_STATE */
+ 0x37B4, /* ASIC TEMP */
+ 0x356C, /* FW API */
+ 0x3570, /* DRV OP MODE */
+ 0x3850, /* FLASH LOCK */
+ 0x3854, /* FLASH UNLOCK */
+};
+
+static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_83xx_mbx_op,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .add_sysfs = qlcnic_83xx_add_sysfs,
+ .remove_sysfs = qlcnic_83xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+};
+
+static struct qlcnic_nic_template qlcnic_83xx_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .request_reset = qlcnic_83xx_idc_request_reset,
+ .cancel_idc_work = qlcnic_83xx_idc_exit,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_83xx_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ struct pci_dev *pdev = adapter->pdev;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ return adapter->fw_version;
+}
+
+static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
+{
+ void __iomem *base;
+ u32 val;
+
+ base = adapter->ahw->pci_base0 +
+ QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func);
+ writel(addr, base);
+ val = readl(base);
+ if (val != addr)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
+{
+ int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ret = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!ret) {
+ return QLCRDX(ahw, QLCNIC_WILDCARD);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x\n", __func__, (int)addr);
+ return -EIO;
+ }
+}
+
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ u32 data)
+{
+ int err;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!err) {
+ QLCWRX(ahw, QLCNIC_WILDCARD, data);
+ return 0;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x data = 0x%x\n",
+ __func__, (int)addr, data);
+ return err;
+ }
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+{
+ int err, i, num_msix;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!num_intr)
+ num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+ num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ num_intr));
+ /* account for AEN interrupt MSI-X based interrupts */
+ num_msix += 1;
+ num_msix += adapter->max_drv_tx_rings;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix;
+ else
+ num_msix = 1;
+ /* setup interrupt mapping table for fw */
+ ahw->intr_tbl = vzalloc(num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+ }
+
+ for (i = 0; i < num_msix; i++) {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ else
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+ return 0;
+}
+
+inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ writel(0, adapter->tgt_mask_reg);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+void qlcnic_83xx_enable_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+void qlcnic_83xx_disable_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
+inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+ *adapter)
+{
+ u32 mask;
+
+ /* Mailbox in MSI-x mode and Legacy Interrupt share the same
+ * source register. We could be here before contexts are created
+ * and sds_ring->crb_intr_mask has not been initialized, calculate
+ * BAR offset for Interrupt Source Register
+ */
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+}
+
+inline void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 mask;
+
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(1, adapter->ahw->pci_base0 + mask);
+}
+
+static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+ for (i = 0; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
+}
+
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 intr_val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int retries = 0;
+
+ intr_val = readl(adapter->tgt_status_reg);
+
+ if (!QLC_83XX_VALID_INTX_BIT31(intr_val))
+ return IRQ_NONE;
+
+ if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) {
+ adapter->stats.spurious_intr++;
+ return IRQ_NONE;
+ }
+ /* The barrier is required to ensure writes to the registers */
+ wmb();
+
+ /* clear the interrupt trigger control register */
+ writel(0, adapter->isr_int_vec);
+ intr_val = readl(adapter->isr_int_vec);
+ do {
+ intr_val = readl(adapter->tgt_status_reg);
+ if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func)
+ break;
+ retries++;
+ } while (QLC_83XX_VALID_INTX_BIT30(intr_val) &&
+ (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY));
+
+ return IRQ_HANDLED;
+}
+
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+}
+
+irqreturn_t qlcnic_83xx_intr(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ qlcnic_83xx_poll_process_aen(adapter);
+
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ ahw->diag_cnt++;
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ return IRQ_HANDLED;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ } else {
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ napi_schedule(&sds_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+
+ if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+
+ return IRQ_HANDLED;
+}
+
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 val = 0, num_msix = adapter->ahw->num_msix - 1;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix - 1;
+ else
+ num_msix = 0;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ msleep(20);
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+}
+
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ u32 val;
+ char name[32];
+ int err = 0;
+ unsigned long flags = 0;
+
+ if (!(adapter->flags & QLCNIC_MSI_ENABLED) &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ flags |= IRQF_SHARED;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ handler = qlcnic_83xx_handle_aen;
+ val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
+ snprintf(name, (IFNAMSIZ + 4),
+ "%s[%s]", "qlcnic", "aen");
+ err = request_irq(val, handler, flags, name, adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register MBX interrupt\n");
+ return err;
+ }
+ } else {
+ handler = qlcnic_83xx_intr;
+ val = adapter->msix_entries[0].vector;
+ err = request_irq(val, handler, flags, "qlcnic", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register INTx interrupt\n");
+ return err;
+ }
+ qlcnic_83xx_clear_legacy_intr_mask(adapter);
+ }
+
+ /* Enable mailbox interrupt */
+ qlcnic_83xx_enable_mbx_intrpt(adapter);
+
+ return err;
+}
+
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
+ adapter->ahw->pci_func = val & 0xf;
+}
+
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val, limit = 0;
+
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func);
+ do {
+ val = readl(addr);
+ if (val) {
+ /* write the function number to register */
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ ahw->pci_func);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT);
+
+ return -EIO;
+}
+
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
+ val = readl(addr);
+}
+
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int ret;
+ u32 data;
+
+ if (qlcnic_api_lock(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to acquire lock. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
+ qlcnic_api_unlock(adapter);
+
+ if (ret == -EIO) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+ data = ret;
+ memcpy(buf, &data, size);
+}
+
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+
+ memcpy(&data, buf, size);
+ qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data);
+}
+
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get Port Info failed\n");
+ } else {
+ if (QLC_83XX_SFP_10G_CAPABLE(adapter->ahw->port_config))
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ else
+ adapter->ahw->port_type = QLCNIC_GBE;
+
+ if (QLC_83XX_AUTONEG(adapter->ahw->port_config))
+ adapter->ahw->link_autoneg = AUTONEG_ENABLE;
+ }
+ return status;
+}
+
+void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8);
+ else
+ val = BIT_2;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+}
+
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->fw_hal_version = 2;
+ qlcnic_get_func_no(adapter);
+
+ /* Determine function privilege level */
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else {
+ adapter->nic_ops = &qlcnic_83xx_ops;
+ }
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+
+static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ dev_info(&adapter->pdev->dev,
+ "Host MBX regs(%d)\n", cmd->req.num);
+ for (i = 0; i < cmd->req.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->req.arg[i]);
+ }
+ pr_info("\n");
+ dev_info(&adapter->pdev->dev,
+ "FW MBX regs(%d)\n", cmd->rsp.num);
+ for (i = 0; i < cmd->rsp.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->rsp.arg[i]);
+ }
+ pr_info("\n");
+}
+
+/* Mailbox response for mac rcode */
+static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+{
+ u32 fw_data;
+ u8 mac_cmd_rcode;
+
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
+ return QLCNIC_RCODE_SUCCESS;
+ return 1;
+}
+
+static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ u32 data;
+ unsigned long wait_time = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ /* wait for mailbox completion */
+ do {
+ data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+ if (++wait_time > QLCNIC_MBX_TIMEOUT) {
+ data = QLCNIC_RCODE_TIMEOUT;
+ break;
+ }
+ mdelay(1);
+ } while (!data);
+ return data;
+}
+
+int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+ u16 opcode;
+ u8 mbx_err_code;
+ unsigned long flags;
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ opcode = LSW(cmd->req.arg[0]);
+ if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ dev_info(&adapter->pdev->dev,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ dev_info(&adapter->pdev->dev, "Mailbox detached\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+
+ if (mbx_val) {
+ QLCDB(adapter, DRV,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ QLCDB(adapter, DRV,
+ "Mailbox not available, 0x%x, collect FW dump\n",
+ mbx_val);
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return cmd->rsp.arg[0];
+ }
+
+ /* Fill in mailbox registers */
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+
+ /* Signal FW about the impending command */
+ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+poll:
+ rsp = qlcnic_83xx_mbx_poll(adapter);
+ if (rsp != QLCNIC_RCODE_TIMEOUT) {
+ /* Get the FW response data */
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
+ qlcnic_83xx_process_aen(adapter);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (mbx_val)
+ goto poll;
+ }
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
+ opcode = QLCNIC_MBX_RSP(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ rsp = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ rsp = qlcnic_83xx_mac_rcode(adapter);
+ if (!rsp)
+ goto out;
+ }
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x failed with err:0x%x\n",
+ opcode, mbx_err_code);
+ rsp = mbx_err_code;
+ qlcnic_dump_mbx(adapter, cmd);
+ break;
+ }
+ goto out;
+ }
+
+ dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
+ QLCNIC_MBX_RSP(mbx_cmd));
+ rsp = QLCNIC_RCODE_TIMEOUT;
+out:
+ /* clear fw mbx control register */
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return rsp;
+}
+
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ u32 temp;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_83xx_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ temp = adapter->ahw->fw_hal_version << 29;
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ break;
+ }
+ }
+ return 0;
+}
+
+void qlcnic_83xx_idc_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+
+ for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
+ cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "%s: Mailbox IDC ACK failed.\n", __func__);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(data[0]));
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status);
+ return;
+}
+
+void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 event[QLC_83XX_MBX_AEN_CNT];
+ int i;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ event[i] = readl(QLCNIC_MBX_FW(ahw, i));
+
+ switch (QLCNIC_MBX_RSP(event[0])) {
+
+ case QLCNIC_MBX_LINK_EVENT:
+ qlcnic_83xx_handle_link_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_COMP_EVENT:
+ qlcnic_83xx_handle_idc_comp_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_REQUEST_EVENT:
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]);
+ queue_delayed_work(adapter->qlcnic_wq,
+ &adapter->idc_aen_work, 0);
+ break;
+ case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ break;
+ case QLCNIC_MBX_SFP_INSERT_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_SFP_REMOVE_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ default:
+ dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ }
+
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
+{
+ int index, i, err, sds_mbx_size;
+ u32 *buf, intrpt_id, intr_mask;
+ u16 context_id;
+ u8 num_sds;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_add_rings_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ context_id = recv_ctx->context_id;
+ num_sds = (adapter->max_sds_rings - QLCNIC_MAX_RING_SETS);
+ ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
+
+ /* set up status rings, mbx 2-81 */
+ index = 2;
+ for (i = 8; i < adapter->max_sds_rings; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.sds_ring_size = sds->num_desc;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to add rings %d\n", err);
+ goto out;
+ }
+
+ mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
+ index = 0;
+ /* status descriptor ring */
+ for (i = 8; i < adapter->max_sds_rings; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[index];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ index++;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int i, err, index, sds_mbx_size, rds_mbx_size;
+ u8 num_sds, num_rds;
+ u32 *buf, intrpt_id, intr_mask, cap = 0;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_host_rds_ring *rds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_rds_mbx rds_mbx;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ num_rds = adapter->max_rds_rings;
+
+ if (adapter->max_sds_rings <= QLCNIC_MAX_RING_SETS)
+ num_sds = adapter->max_sds_rings;
+ else
+ num_sds = QLCNIC_MAX_RING_SETS;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
+ cap = QLCNIC_CAP0_LEGACY_CONTEXT;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLC_83XX_FW_CAP_LRO_MSS;
+
+ /* set mailbox hdr and capabilities */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CREATE_RX_CTX);
+ cmd.req.arg[1] = cap;
+ cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
+ (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+ /* set up status rings, mbx 8-57/87 */
+ index = QLC_83XX_HOST_SDS_MBX_IDX;
+ for (i = 0; i < num_sds; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.sds_ring_size = sds->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+ /* set up receive rings, mbx 88-111/135 */
+ index = QLCNIC_HOST_RDS_MBX_IDX;
+ rds = &recv_ctx->rds_rings[0];
+ rds->producer = 0;
+ memset(&rds_mbx, 0, rds_mbx_size);
+ rds_mbx.phy_addr_reg = rds->phys_addr;
+ rds_mbx.reg_ring_sz = rds->dma_size;
+ rds_mbx.reg_ring_len = rds->num_desc;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->producer = 0;
+ rds_mbx.phy_addr_jmb = rds->phys_addr;
+ rds_mbx.jmb_ring_sz = rds->dma_size;
+ rds_mbx.jmb_ring_len = rds->num_desc;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &rds_mbx, rds_mbx_size);
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Rx ctx in firmware%d\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1];
+ recv_ctx->context_id = mbx_out->ctx_id;
+ recv_ctx->state = mbx_out->state;
+ recv_ctx->virt_port = mbx_out->vport_id;
+ dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ /* Receive descriptor ring */
+ /* Standard ring */
+ rds = &recv_ctx->rds_rings[0];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].reg_buf;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].jmb_buf;
+ /* status descriptor ring */
+ for (i = 0; i < num_sds; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[i];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ if (adapter->max_sds_rings > QLCNIC_MAX_RING_SETS)
+ err = qlcnic_83xx_add_rings(adapter);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx, int ring)
+{
+ int err;
+ u16 msix_id;
+ u32 *buf, intr_mask;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_tx_mbx mbx;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Reset host resources */
+ tx->producer = 0;
+ tx->sw_consumer = 0;
+ *(tx->hw_consumer) = 0;
+
+ memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
+
+ /* setup mailbox inbox registerss */
+ mbx.phys_addr = tx->phys_addr;
+ mbx.cnsmr_index = tx->hw_cons_phys_addr;
+ mbx.size = tx->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id;
+ else
+ msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ mbx.intr_id = msix_id;
+ else
+ mbx.intr_id = 0xffff;
+ mbx.src = 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
+ cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES;
+ buf = &cmd.req.arg[6];
+ memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
+ /* send the mailbox command*/
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
+ tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
+ tx->ctx_id = mbx_out->ctx_id;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
+ tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+ dev_info(&adapter->pdev->dev, "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ u8 ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ /* disable and free mailbox interrupt */
+ qlcnic_83xx_free_mbx_intr(adapter);
+ adapter->ahw->loopback_state = 0;
+ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+ int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring, err;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+ qlcnic_detach(adapter);
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to setup mbx interrupt\n",
+ __func__);
+ goto out;
+ }
+ }
+ adapter->ahw->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+out:
+ netif_device_attach(netdev);
+}
+
+int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 beacon)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_in;
+ int i, status = 0;
+
+ if (state) {
+ /* Get LED configuration */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_CONFIG);
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get led config failed.\n");
+ goto mbx_err;
+ } else {
+ for (i = 0; i < 4; i++)
+ adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1];
+ }
+ qlcnic_free_mbx_args(&cmd);
+ /* Set LED Configuration */
+ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
+ LSW(QLC_83XX_LED_CONFIG);
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ cmd.req.arg[1] = mbx_in;
+ cmd.req.arg[2] = mbx_in;
+ cmd.req.arg[3] = mbx_in;
+ if (beacon)
+ cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Set led config failed.\n");
+ }
+mbx_err:
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+
+ } else {
+ /* Restoring default LED configuration */
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
+ cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
+ cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
+ if (beacon)
+ cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Restoring led config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+ }
+}
+
+void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int status;
+
+ if (enable) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
+ cmd.req.arg[1] = BIT_0 | BIT_31;
+ } else {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+ cmd.req.arg[1] = BIT_0 | BIT_31;
+ }
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s in NIC IDC function event.\n",
+ (enable ? "register" : "unregister"));
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+ cmd.req.arg[1] = adapter->ahw->port_config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Set Port Config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Get Port config failed\n");
+ else
+ adapter->ahw->port_config = cmd.rsp.arg[1];
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Setup linkevent mailbox failed\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (mode ? 1 : 0) | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Promiscous mode config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0, loop = 0, max_sds_rings = adapter->max_sds_rings;
+
+ QLCDB(adapter, DRV, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported for non privilege function\n");
+ return ret;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto fail_diag_alloc;
+
+ ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_diag_res;
+
+ /* Poll for link up event before running traffic */
+ do {
+ msleep(500);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -QLCNIC_FW_NOT_RESPOND;
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ goto free_diag_res;
+ }
+ } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_alloc:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0, loop = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status)
+ return status;
+
+ config = ahw->port_config;
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to Set Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(300);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_err(&adapter->pdev->dev,
+ "FW did not generate IDC completion AEN\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ return -EIO;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_ADD);
+ return status;
+}
+
+int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0, loop = 0;
+ u32 config = ahw->port_config;
+
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to Clear Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(300);
+ qlcnic_83xx_process_aen(adapter);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware didn't sent IDC completion AEN\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EIO;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_DEL);
+ return status;
+}
+
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
+ int mode)
+{
+ int err;
+ u32 temp, temp_ip;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
+ if (mode == QLCNIC_IP_UP) {
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = 1 | temp;
+ } else {
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = 2 | temp;
+ }
+
+ /*
+ * Adapter needs IP address in network byte order.
+ * But hardware mailbox registers go through writel(), hence IP address
+ * gets swapped on big endian architecture.
+ * To negate swapping of writel() on big endian architecture
+ * use swab32(value).
+ */
+
+ temp_ip = swab32(ntohl(ip));
+ memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
+{
+ int err;
+ u32 temp, arg1;
+ struct qlcnic_cmd_args cmd;
+ int lro_bit_mask;
+
+ lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0);
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+ temp = adapter->recv_ctx->context_id << 16;
+ arg1 = lro_bit_mask | temp;
+ cmd.req.arg[1] = arg1;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "LRO config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 word;
+ struct qlcnic_cmd_args cmd;
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+
+ /*
+ * RSS request:
+ * bits 3-0: Rsvd
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 16-31: indirection table mask
+ */
+ word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u32)(enable & 0x1) << 8) |
+ ((0x7ULL) << 16);
+ cmd.req.arg[1] = (adapter->recv_ctx->context_id);
+ cmd.req.arg[2] = word;
+ memcpy(&cmd.req.arg[4], key, sizeof(key));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_info(&adapter->pdev->dev, "RSS config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ __le16 vlan_id, u8 op)
+{
+ int err;
+ u32 *buf;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_macvlan_mbx mv;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ return err;
+ cmd.req.arg[1] = op | (1 << 8) |
+ (adapter->recv_ctx->context_id << 16);
+
+ mv.vlan = le16_to_cpu(vlan_id);
+ memcpy(&mv.mac, addr, ETH_ALEN);
+ buf = &cmd.req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "MAC-VLAN %s to CAM failed, err=%d.\n",
+ ((op == 1) ? "add " : "delete "), err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ __le16 vlan_id)
+{
+ u8 mac[ETH_ALEN];
+ memcpy(&mac, addr, ETH_ALEN);
+ qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
+{
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ memcpy(&cmd->req.arg[2], mac, sizeof(u32));
+ memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
+ break;
+ }
+ cmd->req.arg[1] = type;
+}
+
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ cmd.req.arg[1] = 1 | (adapter->recv_ctx->context_id << 16);
+ cmd.req.arg[3] = coal->flag;
+ temp = coal->rx_time_us << 16;
+ cmd.req.arg[2] = coal->rx_packets | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_info(&adapter->pdev->dev,
+ "Failed to send interrupt coalescence parameters\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ u8 link_status, duplex;
+ /* link speed */
+ link_status = LSB(data[3]) & 1;
+ adapter->ahw->link_speed = MSW(data[2]);
+ adapter->ahw->link_autoneg = MSB(MSW(data[3]));
+ adapter->ahw->module_type = MSB(LSW(data[3]));
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ adapter->ahw->link_duplex = DUPLEX_FULL;
+ else
+ adapter->ahw->link_duplex = DUPLEX_HALF;
+ adapter->ahw->has_link_events = 1;
+ qlcnic_advert_link_change(adapter, link_status);
+}
+
+irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ unsigned long flags;
+ u32 mask, resp, event;
+
+ spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT)
+ qlcnic_83xx_process_aen(adapter);
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+int qlcnic_enable_eswitch(struct qlcnic_adapter *adapter, u8 port, u8 enable)
+{
+ int err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH);
+ cmd.req.arg[1] = (port & 0xf) | BIT_4;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to enable eswitch%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int i, err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ cmd.req.arg[1] = (nic->pci_func << 16);
+ cmd.req.arg[2] = 0x1 << 16;
+ cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
+ cmd.req.arg[4] = nic->capabilities;
+ cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
+ cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
+ cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
+ for (i = 8; i < 32; i++)
+ cmd.req.arg[i] = 0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ u32 temp;
+ u8 op = 0;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (func_id != adapter->ahw->pci_func) {
+ temp = func_id << 16;
+ cmd.req.arg[1] = op | BIT_31 | temp;
+ } else {
+ cmd.req.arg[1] = adapter->ahw->pci_func << 16;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to get nic info %d\n", err);
+ goto out;
+ }
+
+ npar_info->op_type = cmd.rsp.arg[1];
+ npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF;
+ npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16;
+ npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF;
+ npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16;
+ npar_info->capabilities = cmd.rsp.arg[4];
+ npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF;
+ npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16;
+ npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF;
+ npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16;
+ npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF;
+ npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16;
+ if (cmd.rsp.arg[8] & 0x1)
+ npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1;
+ if (cmd.rsp.arg[8] & 0x10000) {
+ temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
+ npar_info->max_linkspeed_reg_offset = temp;
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ int i, err = 0, j = 0;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ adapter->ahw->act_pci_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ pci_info->func_count = cmd.rsp.arg[1] & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: total functions = %d\n",
+ __func__, pci_info->func_count);
+ for (i = 2, j = 0; j < QLCNIC_MAX_PCI_FUNC; j++, pci_info++) {
+ pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
+ pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ i++;
+ pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
+ if (pci_info->type == QLCNIC_TYPE_NIC)
+ adapter->ahw->act_pci_func++;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->default_port = temp;
+ i++;
+ pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->tx_max_bw = temp;
+ i = i + 2;
+ memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2);
+ i++;
+ memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
+ i = i + 3;
+
+ dev_info(&adapter->pdev->dev, "%s:\n"
+ "\tid = %d active = %d type = %d\n"
+ "\tport = %d min bw = %d max bw = %d\n"
+ "\tmac_addr = %pM\n", __func__,
+ pci_info->id, pci_info->active, pci_info->type,
+ pci_info->default_port, pci_info->tx_min_bw,
+ pci_info->tx_max_bw, pci_info->mac);
+ }
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get PCI Info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
+{
+ int i, index, err;
+ bool type;
+ u8 max_ints;
+ u32 val, temp;
+ struct qlcnic_cmd_args cmd;
+
+ max_ints = adapter->ahw->num_msix - 1;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+ cmd.req.arg[1] = max_ints;
+ for (i = 0, index = 2; i < max_ints; i++) {
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (adapter->ahw->intr_tbl[i].type << 4);
+ if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (adapter->ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[index++] = val;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure interrupts 0x%x\n", err);
+ goto out;
+ }
+
+ max_ints = cmd.rsp.arg[1];
+ for (i = 0, index = 2; i < max_ints; i++, index += 2) {
+ val = cmd.rsp.arg[index];
+ if (LSB(val)) {
+ dev_info(&adapter->pdev->dev,
+ "Can't configure interrupt %d\n",
+ adapter->ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ adapter->ahw->intr_tbl[i].id = MSW(val);
+ adapter->ahw->intr_tbl[i].enabled = 1;
+ temp = cmd.rsp.arg[index + 1];
+ adapter->ahw->intr_tbl[i].src = temp;
+ } else {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter)
+{
+ int id, timeout = 0;
+ u32 status = 0;
+
+ while (status == 0) {
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (status)
+ break;
+
+ if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) {
+ id = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FLASH_LOCK_OWNER);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, lock held by %d\n", __func__, id);
+ return -EIO;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum);
+ return 0;
+}
+
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter)
+{
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF);
+}
+
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
+ u32 flash_addr, u8 *p_data,
+ int count)
+{
+ int i, ret;
+ u32 word, range, flash_offset, addr = flash_addr;
+ ulong indirect_add, direct_window;
+
+ flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr));
+
+ range = flash_offset + (count * sizeof(u32));
+ /* Check if data is spread across multiple sectors */
+ if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_add);
+ if (ret == -EIO)
+ return -EIO;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+ direct_window = QLC_83XX_FLASH_DIRECT_WINDOW;
+ /* This write is needed once for each sector */
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ direct_window,
+ (addr));
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_add);
+ if (ret == -EIO)
+ return -EIO;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+ int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+
+ do {
+ status = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_STATUS);
+ if ((status & QLC_83XX_FLASH_STATUS_READY) ==
+ QLC_83XX_FLASH_STATUS_READY)
+ break;
+
+ msleep(QLC_83XX_FLASH_STATUS_REG_POLL_DELAY);
+ } while (--retries);
+
+ if (!retries)
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
+{
+ int ret;
+ u32 cmd;
+ cmd = adapter->ahw->fdt.write_statusreg_cmd;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_enable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG |
+ adapter->ahw->fdt.write_statusreg_cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_disable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
+{
+ int ret, mfg_id;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
+ if (mfg_id == -EIO)
+ return -EIO;
+
+ adapter->flash_mfg_id = (mfg_id & 0xFF);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
+{
+ int count, fdt_size, ret = 0;
+
+ fdt_size = sizeof(struct qlcnic_fdt);
+ count = fdt_size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ memset(&adapter->ahw->fdt, 0, fdt_size);
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
+ (u8 *)&adapter->ahw->fdt,
+ count);
+
+ qlcnic_83xx_unlock_flash(adapter);
+ return ret;
+}
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
+ u32 sector_start_addr)
+{
+ u32 reversed_addr, addr1, addr2, cmd;
+ int ret = -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write_op(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ addr1 = (sector_start_addr & 0xFF) << 16;
+ addr2 = (sector_start_addr & 0xFF0000) >> 16;
+ reversed_addr = addr1 | addr2;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ reversed_addr);
+ cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd;
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id)
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_ERASE_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write_op(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data)
+{
+ int ret = -EIO;
+ u32 addr1 = 0x00800000 | (addr >> 2);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data, int count)
+{
+ u32 temp;
+ int ret = -EIO;
+
+ if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid word count\n", __func__);
+ return -EIO;
+ }
+
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL);
+
+ /* First DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_FIRST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ count--;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL);
+ /* Second to N-1 DWORD writes */
+ while (count != 1) {
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ count--;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL |
+ (addr >> 2));
+ /* Last DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
+ if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
+ dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
+ __func__, __LINE__);
+ /* Operation failed, clear error bit */
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL);
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
+{
+ u32 val, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+
+ /* Check if recovery need to be performed by the calling function */
+ if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) {
+ val = val & ~0x3F;
+ val = val | ((adapter->portnum << 2) |
+ QLC_83XX_NEED_DRV_LOCK_RECOVERY);
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated\n", __func__);
+ msleep(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+ id = ((val >> 2) & 0xF);
+ if (id == adapter->portnum) {
+ val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK;
+ val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ /* Force release the lock */
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+ /* Clear recovery bits */
+ val = val & ~0x3F;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery completed\n", __func__);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: func %d to resume lock recovery process\n",
+ __func__, id);
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated by other functions\n",
+ __func__);
+ }
+}
+
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0;
+ int max_attempt = 0;
+
+ while (status == 0) {
+ status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK);
+ if (status)
+ break;
+
+ msleep(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ i++;
+
+ if (i == 1)
+ temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+
+ if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ if (val == temp) {
+ id = val & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: lock to be recovered from %d\n",
+ __func__, id);
+ qlcnic_83xx_recover_driver_lock(adapter);
+ i = 0;
+ max_attempt++;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ /* Force exit from while loop after few attempts */
+ if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ lock_alive_counter = val >> 8;
+ lock_alive_counter++;
+ val = lock_alive_counter << 8 | adapter->portnum;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+
+ return 0;
+}
+
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 val, lock_alive_counter, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = val & 0xFF;
+ lock_alive_counter = val >> 8;
+
+ if (id != adapter->portnum)
+ dev_err(&adapter->pdev->dev,
+ "%s:Warning func %d is unlocking lock owned by %d\n",
+ __func__, adapter->portnum, id);
+
+ val = (lock_alive_counter << 8) | 0xFF;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+}
+
+int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+ u32 *data, u32 count)
+{
+ int i, j, ret = 0;
+ u32 temp;
+
+ /* Check alignment */
+ if (addr & 0xF)
+ return -EIO;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) ||
+ (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
+ *data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
+ QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
+ QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_83xx_rd_reg_indirect(adapter,
+ QLCNIC_MS_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failure */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_WARNING
+ "MS memory write failed\n");
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
+ u8 *p_data, int count)
+{
+ int i, ret;
+ u32 word, addr = flash_addr;
+ ulong indirect_addr;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr))) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = qlcnic_83xx_rd_reg_indirect(adapter,
+ indirect_addr);
+ if (ret == -EIO)
+ return -EIO;
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 config = 0, state;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) {
+ dev_info(&adapter->pdev->dev, "link state down\n");
+ return config;
+ }
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Get Link Status Command failed: 0x%x\n", err);
+ goto out;
+ } else {
+ config = cmd.rsp.arg[1];
+ switch (QLC_83XX_CURRENT_LINK_SPEED(config)) {
+ case QLC_83XX_10M_LINK:
+ ahw->link_speed = SPEED_10;
+ break;
+ case QLC_83XX_100M_LINK:
+ ahw->link_speed = SPEED_100;
+ break;
+ case QLC_83XX_1G_LINK:
+ ahw->link_speed = SPEED_1000;
+ break;
+ case QLC_83XX_10G_LINK:
+ ahw->link_speed = SPEED_10000;
+ break;
+ default:
+ ahw->link_speed = 0;
+ break;
+ }
+ config = cmd.rsp.arg[3];
+ if (config & 1)
+ err = 1;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return config;
+}
+
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
+{
+ u32 config = 0;
+ int status = 0;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ /* hard code until there is a way to get it from flash */
+ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+ return status;
+}
+
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
+{
+ int status = 0;
+ u32 config = adapter->ahw->port_config;
+
+ if (ecmd->autoneg)
+ adapter->ahw->port_config |= BIT_15;
+
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ adapter->ahw->port_config |= BIT_8;
+ break;
+ case SPEED_100:
+ adapter->ahw->port_config |= BIT_9;
+ break;
+ case SPEED_1000:
+ adapter->ahw->port_config |= BIT_10;
+ break;
+ case SPEED_10000:
+ adapter->ahw->port_config |= BIT_11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_info(&adapter->pdev->dev,
+ "Faild to Set Link Speed and autoneg.\n");
+ adapter->ahw->port_config = config;
+ }
+ return status;
+}
+
+static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd,
+ u64 *data, int index)
+{
+ u32 low, hi;
+ u64 val;
+
+ low = cmd->rsp.arg[index];
+ hi = cmd->rsp.arg[index + 1];
+ val = (((u64) low) | (((u64) hi) << 32));
+ *data++ = val;
+ return data;
+}
+
+static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd, u64 *data,
+ int type, int *ret)
+{
+ int err, k, total_regs;
+
+ *ret = 0;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Error in get statistics mailbox command\n");
+ *ret = -EIO;
+ return data;
+ }
+ total_regs = cmd->rsp.num;
+ switch (type) {
+ case QLC_83XX_STAT_MAC:
+ /* fill in MAC tx counters */
+ for (k = 2; k < 28; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx counters */
+ for (k += 6; k < 60; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx frame stats */
+ for (k += 6; k < 80; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_RX:
+ for (k = 2; k < 8; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < 24; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes containing RE1FBQ error data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_TX:
+ for (k = 2; k < 10; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n");
+ *ret = -EIO;
+ }
+ return data;
+}
+
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+ /* Get Tx stats */
+ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
+ cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_TX, &ret);
+ if (ret) {
+ dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
+ goto out;
+ }
+ /* Get MAC stats */
+ cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
+ cmd.rsp.num = QLC_83XX_MAC_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_MAC, &ret);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "Error getting Rx stats\n");
+ goto out;
+ }
+ /* Get Rx stats */
+ cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
+ cmd.rsp.num = QLC_83XX_RX_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_RX, &ret);
+ if (ret)
+ dev_info(&adapter->pdev->dev,
+ "Error getting Tx stats\n");
+out:
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
+{
+ u32 major, minor, sub;
+
+ major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) {
+ dev_info(&adapter->pdev->dev, "%s: Reg test failed\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+{
+ return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
+ sizeof(adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
+ sizeof(adapter->ahw->reg_tbl));
+}
+
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1;
+ j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j);
+
+ for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++)
+ regs_buff[i++] = QLCRDX(adapter->ahw, j);
+ return i;
+}
+
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u32 data;
+ u16 intrpt_id, id;
+ u8 val;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto fail_diag_irq;
+
+ ahw->diag_cnt = 0;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[0].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ cmd.req.arg[1] = 1;
+ cmd.req.arg[2] = intrpt_id;
+ cmd.req.arg[3] = BIT_0;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ data = cmd.rsp.arg[2];
+ id = LSW(data);
+ val = LSB(MSW(data));
+ if (id != intrpt_id)
+ dev_info(&adapter->pdev->dev,
+ "Interrupt generated: 0x%x, requested:0x%x\n",
+ id, intrpt_id);
+ if (val)
+ dev_err(&adapter->pdev->dev,
+ "Interrupt test error: 0x%x\n", val);
+ if (ret)
+ goto done;
+
+ msleep(20);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_83xx_diag_free_res(netdev, max_sds_rings);
+
+fail_diag_irq:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed\n", __func__);
+ return;
+ }
+ config = ahw->port_config;
+ if (config & QLC_83XX_CFG_STD_PAUSE) {
+ if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+ pause->tx_pause = 1;
+ if (config & QLC_83XX_CFG_STD_RX_PAUSE)
+ pause->rx_pause = 1;
+ }
+
+ if (QLC_83XX_AUTONEG(config))
+ pause->autoneg = 1;
+}
+
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed.\n", __func__);
+ return status;
+ }
+ config = ahw->port_config;
+
+ if (ahw->port_type == QLCNIC_GBE) {
+ if (pause->autoneg)
+ ahw->port_config |= QLC_83XX_ENABLE_AUTONEG;
+ if (!pause->autoneg)
+ ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG;
+ } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (!(config & QLC_83XX_CFG_STD_PAUSE))
+ ahw->port_config |= QLC_83XX_CFG_STD_PAUSE;
+
+ if (pause->rx_pause && pause->tx_pause) {
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ } else if (pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE;
+ } else if (pause->tx_pause && !pause->rx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
+ } else if (!pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Set Pause Config failed.\n", __func__);
+ ahw->port_config = config;
+ }
+ return status;
+}
+
+static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_READ_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
+ return ret & 0xFF;
+}
+
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_read_flash_status_reg(adapter);
+ if (status == -EIO) {
+ dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
new file mode 100644
index 00000000000..61f81f6c84a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -0,0 +1,438 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_83XX_HW_H
+#define __QLCNIC_83XX_HW_H
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "qlcnic_hw.h"
+
+/* Directly mapped registers */
+#define QLC_83XX_CRB_WIN_BASE 0x3800
+#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
+#define QLC_83XX_SEM_LOCK_BASE 0x3840
+#define QLC_83XX_SEM_UNLOCK_BASE 0x3844
+#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8))
+#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLC_83XX_LINK_SPEED_FACTOR 10
+#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4)))
+#define QLC_83XX_INTX_PTR 0x38C0
+#define QLC_83XX_INTX_TRGR 0x38C4
+#define QLC_83XX_INTX_MASK 0x38C8
+
+#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100
+#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20
+#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1
+#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2
+#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3
+#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200
+#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3
+
+#define QLC_83XX_NO_NIC_RESOURCE 0x5
+#define QLC_83XX_MAC_PRESENT 0xC
+#define QLC_83XX_MAC_ABSENT 0xD
+
+
+#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024)
+
+/* PEG status definitions */
+#define QLC_83XX_CMDPEG_COMPLETE 0xff01
+#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30)
+#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31)
+#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF)
+#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100
+#define QLC_83XX_LEGACY_INTX_DELAY 4
+#define QLC_83XX_REG_DESC 1
+#define QLC_83XX_LRO_DESC 2
+#define QLC_83XX_CTRL_DESC 3
+#define QLC_83XX_FW_CAPABILITY_TSO BIT_6
+#define QLC_83XX_FW_CAP_LRO_MSS BIT_17
+#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0
+#define QLC_83XX_HOST_SDS_MBX_IDX 8
+
+#define QLCNIC_HOST_RDS_MBX_IDX 88
+#define QLCNIC_MAX_RING_SETS 8
+
+/* Pause control registers */
+#define QLC_83XX_SRE_SHIM_REG 0x0D200284
+#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4
+#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4
+#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388
+#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388
+#define QLC_83XX_PORT0_TC_STATS 0x0B20039C
+#define QLC_83XX_PORT1_TC_STATS 0x0B20139C
+#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704
+#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704
+
+/* Peg PC status registers */
+#define QLC_83XX_CRB_PEG_NET_0 0x3400003c
+#define QLC_83XX_CRB_PEG_NET_1 0x3410003c
+#define QLC_83XX_CRB_PEG_NET_2 0x3420003c
+#define QLC_83XX_CRB_PEG_NET_3 0x3430003c
+#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c
+
+/* Firmware image definitions */
+#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_83XX_BOOT_FROM_FLASH 0
+#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+
+#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+
+struct qlcnic_intrpt_config {
+ u8 type;
+ u8 enabled;
+ u16 id;
+ u32 src;
+};
+
+struct qlcnic_macvlan_mbx {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+};
+
+struct qlc_83xx_fw_info {
+ const struct firmware *fw;
+ u16 major_fw_version;
+ u8 minor_fw_version;
+ u8 sub_fw_version;
+ u8 fw_build_num;
+ u8 load_from_file;
+};
+
+struct qlc_83xx_reset {
+ struct qlc_83xx_reset_hdr *hdr;
+ int seq_index;
+ int seq_error;
+ int array_index;
+ u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES];
+ u8 *buff;
+ u8 *stop_offset;
+ u8 *start_offset;
+ u8 *init_offset;
+ u8 seq_end;
+ u8 template_end;
+};
+
+#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
+#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_TIMESTAMP 0
+#define QLC_83XX_IDC_DURATION 1
+#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
+#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20
+#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ)
+#define QLC_83XX_IDC_FW_FAIL_THRESH 2
+#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8
+#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16
+#define QLC_83XX_IDC_MAJOR_VERSION 1
+#define QLC_83XX_IDC_MINOR_VERSION 0
+#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
+
+struct qlcnic_adapter;
+struct qlc_83xx_idc {
+ int (*state_entry) (struct qlcnic_adapter *);
+ u64 sec_counter;
+ u64 delay;
+ unsigned long status;
+ int err_code;
+ int collect_dump;
+ u8 curr_state;
+ u8 prev_state;
+ u8 vnic_state;
+ u8 vnic_wait_limit;
+ u8 quiesce_req;
+ char **name;
+};
+
+#define QLCNIC_MBX_RSP(reg) LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
+/* Mailbox process AEN count */
+#define QLC_83XX_IDC_COMP_AEN 3
+#define QLC_83XX_MBX_AEN_CNT 5
+#define QLC_83XX_MODULE_LOADED 1
+#define QLC_83XX_MBX_READY 2
+#define QLC_83XX_MBX_AEN_ACK 3
+#define QLC_83XX_SFP_PRESENT(data) ((data) & 3)
+#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3)
+#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
+#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
+#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
+#define QLC_83XX_SFP_10G_CAPABLE(data) ((data) & BIT_11)
+#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
+#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
+#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
+#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7)
+#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12)
+#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
+#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
+#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
+#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
+#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
+#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20)
+#define QLC_83XX_ENABLE_AUTONEG (1 << 15)
+#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1)
+#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1)
+#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1)
+
+/* LED configuration settings */
+#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_LED_RATE 0xff
+#define QLC_83XX_LED_ACT (1 << 10)
+#define QLC_83XX_LED_MOD (0 << 13)
+#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \
+ QLC_83XX_LED_MOD)
+
+#define QLC_83XX_10M_LINK 1
+#define QLC_83XX_100M_LINK 2
+#define QLC_83XX_1G_LINK 3
+#define QLC_83XX_10G_LINK 4
+#define QLC_83XX_STAT_TX 3
+#define QLC_83XX_STAT_RX 2
+#define QLC_83XX_STAT_MAC 1
+#define QLC_83XX_TX_STAT_REGS 14
+#define QLC_83XX_RX_STAT_REGS 40
+#define QLC_83XX_MAC_STAT_REGS 80
+
+#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
+#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
+#define QLC_83XX_DEFAULT_OPMODE 0x55555555
+#define QLC_83XX_PRIVLEGED_FUNC 0x1
+#define QLC_83XX_VIRTUAL_FUNC 0x2
+
+#define QLC_83XX_LB_MAX_FILTERS 2048
+#define QLC_83XX_LB_BUCKET_SIZE 256
+#define QLC_83XX_MINIMUM_VECTOR 3
+
+#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
+#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
+#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
+#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
+#define QLC_83XX_DEFAULT_MODE 0x0
+#define QLCNIC_BRDTYPE_83XX_10G 0x0083
+
+#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLC_83XX_FLASH_STATUS 0x42100004
+#define QLC_83XX_FLASH_CONTROL 0x42110004
+#define QLC_83XX_FLASH_ADDR 0x42110008
+#define QLC_83XX_FLASH_WRDATA 0x4211000C
+#define QLC_83XX_FLASH_RDDATA 0x42110018
+#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
+#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
+#define QLC_83XX_FLASH_STATUS_READY 0x6
+#define QLC_83XX_FLASH_BULK_WRITE_MIN 2
+#define QLC_83XX_FLASH_BULK_WRITE_MAX 64
+#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLC_83XX_ERASE_MODE 1
+#define QLC_83XX_WRITE_MODE 2
+#define QLC_83XX_BULK_WRITE_MODE 3
+#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100
+#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300
+#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F
+#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8
+#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101
+#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005
+#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000
+#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001
+#define QLC_83XX_FLASH_WRDATA_DEF 0x0
+#define QLC_83XX_FLASH_READ_CTRL 0x3F
+#define QLC_83XX_FLASH_SPI_CTRL 0x4
+#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2
+#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D
+#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43
+#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D
+#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
+#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+
+/* Additional registers in 83xx */
+enum qlc_83xx_ext_regs {
+ QLCNIC_GLOBAL_RESET = 0,
+ QLCNIC_WILDCARD,
+ QLCNIC_INFORMANT,
+ QLCNIC_HOST_MBX_CTRL,
+ QLCNIC_FW_MBX_CTRL,
+ QLCNIC_BOOTLOADER_ADDR,
+ QLCNIC_BOOTLOADER_SIZE,
+ QLCNIC_FW_IMAGE_ADDR,
+ QLCNIC_MBX_INTR_ENBL,
+ QLCNIC_DEF_INT_MASK,
+ QLCNIC_DEF_INT_ID,
+ QLC_83XX_IDC_MAJ_VERSION,
+ QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DRV_PRESENCE,
+ QLC_83XX_IDC_DRV_ACK,
+ QLC_83XX_IDC_CTRL,
+ QLC_83XX_IDC_DRV_AUDIT,
+ QLC_83XX_IDC_MIN_VERSION,
+ QLC_83XX_RECOVER_DRV_LOCK,
+ QLC_83XX_IDC_PF_0,
+ QLC_83XX_IDC_PF_1,
+ QLC_83XX_IDC_PF_2,
+ QLC_83XX_IDC_PF_3,
+ QLC_83XX_IDC_PF_4,
+ QLC_83XX_IDC_PF_5,
+ QLC_83XX_IDC_PF_6,
+ QLC_83XX_IDC_PF_7,
+ QLC_83XX_IDC_PF_8,
+ QLC_83XX_IDC_PF_9,
+ QLC_83XX_IDC_PF_10,
+ QLC_83XX_IDC_PF_11,
+ QLC_83XX_IDC_PF_12,
+ QLC_83XX_IDC_PF_13,
+ QLC_83XX_IDC_PF_14,
+ QLC_83XX_IDC_PF_15,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_1,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_2,
+ QLC_83XX_DRV_OP_MODE,
+ QLC_83XX_VNIC_STATE,
+ QLC_83XX_DRV_LOCK,
+ QLC_83XX_DRV_UNLOCK,
+ QLC_83XX_DRV_LOCK_ID,
+ QLC_83XX_ASIC_TEMP,
+};
+
+/* 83xx funcitons */
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
+int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
+int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong);
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_intr_coalesce(struct qlcnic_adapter *);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, __le16);
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *, int);
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
+int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
+void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
+ struct qlcnic_info *);
+void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_intr(int, void *);
+irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
+void qlcnic_83xx_enable_intr(struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+void qlcnic_83xx_disable_intr(struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
+ const struct pci_device_id *);
+void qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+int qlcnic_enable_eswitch(struct qlcnic_adapter *, u8, u8);
+int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *);
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
+void qlcnic_83xx_idc_aen_work(struct work_struct *);
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int);
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
+int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
+int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
+ u32, u8 *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
+int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u8);
+int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_get_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_test_link(struct qlcnic_adapter *);
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
new file mode 100644
index 00000000000..c53832b02b3
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -0,0 +1,2054 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+/* Reset template definitions */
+#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLC_83XX_RESET_SEQ_VERSION 0x0101
+
+#define QLC_83XX_OPCODE_NOP 0x0000
+#define QLC_83XX_OPCODE_WRITE_LIST 0x0001
+#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002
+#define QLC_83XX_OPCODE_POLL_LIST 0x0004
+#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008
+#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010
+#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020
+#define QLC_83XX_OPCODE_SEQ_END 0x0040
+#define QLC_83XX_OPCODE_TMPL_END 0x0080
+#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+
+/* Template header */
+struct qlc_83xx_reset_hdr {
+ u16 version;
+ u16 signature;
+ u16 size;
+ u16 entries;
+ u16 hdr_size;
+ u16 checksum;
+ u16 init_offset;
+ u16 start_offset;
+} __packed;
+
+/* Command entry header. */
+struct qlc_83xx_entry_hdr {
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+} __packed;
+
+/* Generic poll command */
+struct qlc_83xx_poll {
+ u32 mask;
+ u32 status;
+} __packed;
+
+/* Read modify write command */
+struct qlc_83xx_rmw {
+ u32 mask;
+ u32 xor_value;
+ u32 or_value;
+ u8 shl;
+ u8 shr;
+ u8 index_a;
+ u8 rsvd;
+} __packed;
+
+/* Generic command with 2 DWORD */
+struct qlc_83xx_entry {
+ u32 arg1;
+ u32 arg2;
+} __packed;
+
+/* Generic command with 4 DWORD */
+struct qlc_83xx_quad_entry {
+ u32 dr_addr;
+ u32 dr_value;
+ u32 ar_addr;
+ u32 ar_value;
+} __packed;
+static const char *const qlc_83xx_idc_states[] = {
+ "Unknown",
+ "Cold",
+ "Init",
+ "Ready",
+ "Need Reset",
+ "Need Quiesce",
+ "Failed",
+ "Quiesce"
+};
+
+/* Device States */
+enum qlcnic_83xx_states {
+ QLC_83XX_IDC_DEV_UNKNOWN,
+ QLC_83XX_IDC_DEV_COLD,
+ QLC_83XX_IDC_DEV_INIT,
+ QLC_83XX_IDC_DEV_READY,
+ QLC_83XX_IDC_DEV_NEED_RESET,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT,
+ QLC_83XX_IDC_DEV_FAILED,
+ QLC_83XX_IDC_DEV_QUISCENT
+};
+
+static int
+qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ if ((val & 0xFFFF))
+ return 1;
+ else
+ return 0;
+}
+
+static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter)
+{
+ u32 cur, prev;
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+
+ dev_info(&adapter->pdev->dev,
+ "current state = %s, prev state = %s\n",
+ adapter->ahw->idc.name[cur],
+ adapter->ahw->idc.name[prev]);
+}
+
+static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
+ u8 mode, int lock)
+{
+ u32 val;
+ int seconds;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = adapter->portnum & 0xf;
+ val |= mode << 7;
+ if (mode)
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ else
+ seconds = jiffies / HZ;
+
+ val |= seconds << 8;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val);
+ adapter->ahw->idc.sec_counter = jiffies / HZ;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION);
+ val = val & ~(0x3 << (adapter->portnum * 2));
+ val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2));
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val);
+}
+
+static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ val = val & ~0xFF;
+ val = val | QLC_83XX_IDC_MAJOR_VERSION;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter,
+ int status, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+
+ if (status)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ qlcnic_83xx_idc_update_minor_version(adapter);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u8 version;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ version = val & 0xFF;
+
+ if (version != QLC_83XX_IDC_MAJOR_VERSION) {
+ dev_info(&adapter->pdev->dev,
+ "%s:mismatch. version 0x%x, expected version 0x%x\n",
+ __func__, version, QLC_83XX_IDC_MAJOR_VERSION);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
+ /* Clear gracefull reset bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter,
+ int flag, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ if (flag)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter,
+ int time_limit)
+{
+ u64 seconds;
+
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ if (seconds <= time_limit)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+/**
+ * qlcnic_83xx_idc_check_reset_ack_reg
+ *
+ * @adapter: adapter structure
+ *
+ * Check ACK wait limit and clear the functions which failed to ACK
+ *
+ * Return 0 if all functions have acknowledged the reset request.
+ **/
+static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter)
+{
+ int timeout;
+ u32 ack, presence, val;
+
+ timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ dev_info(&adapter->pdev->dev,
+ "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence);
+ if (!((ack & presence) == presence)) {
+ if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) {
+ /* Clear functions which failed to ACK */
+ dev_info(&adapter->pdev->dev,
+ "%s: ACK wait exceeds time limit\n", __func__);
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(ack ^ presence);
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: updated drv presence reg = 0x%x\n",
+ __func__, val);
+ qlcnic_83xx_unlock_driver(adapter);
+ return 0;
+
+ } else {
+ return 1;
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: Reset ACK received from all functions\n",
+ __func__);
+ return 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_tx_soft_reset
+ *
+ * @adapter: adapter structure
+ *
+ * Handle context deletion and recreation request from transmit routine
+ *
+ * Returns -EBUSY or Success (0)
+ *
+ **/
+static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ qlcnic_down(adapter, netdev);
+ qlcnic_up(adapter, netdev);
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ dev_err(&adapter->pdev->dev, "%s:\n", __func__);
+
+ adapter->netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_detach_driver
+ *
+ * @adapter: adapter structure
+ * Detach net interface, stop TX and cleanup resources before the HW reset.
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
+{
+ int i;
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ /* Disable mailbox interrupt */
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+ qlcnic_down(adapter, netdev);
+ for (i = 0; i < adapter->ahw->num_msix; i++) {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_attach_driver
+ *
+ * @adapter: adapter structure
+ *
+ * Re-attach and re-enable net interface
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+}
+
+static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ qlcnic_83xx_idc_log_state_history(adapter);
+ dev_info(&adapter->pdev->dev, "Device will enter failed state\n");
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_RESET);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_find_reset_owner_id
+ *
+ * @adapter: adapter structure
+ *
+ * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE.
+ * Within the same class, function with lowest PCI ID assumes ownership
+ *
+ * Returns: reset owner id or failure indication (-EIO)
+ *
+ **/
+static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter)
+{
+ u32 reg, reg1, reg2, i, j, owner, class;
+
+ reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1);
+ reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2);
+ owner = QLCNIC_TYPE_NIC;
+ i = 0;
+ j = 0;
+ reg = reg1;
+
+ do {
+ class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3);
+ if (class == owner)
+ break;
+ if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) {
+ reg = reg2;
+ j = 0;
+ } else {
+ j++;
+ }
+
+ if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) {
+ if (owner == QLCNIC_TYPE_NIC)
+ owner = QLCNIC_TYPE_ISCSI;
+ else if (owner == QLCNIC_TYPE_ISCSI)
+ owner = QLCNIC_TYPE_FCOE;
+ else if (owner == QLCNIC_TYPE_FCOE)
+ return -EIO;
+ reg = reg1;
+ j = 0;
+ i = 0;
+ }
+ } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS);
+
+ return i;
+}
+
+static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock)
+{
+ int ret = 0;
+
+ ret = qlcnic_83xx_restart_hw(adapter);
+
+ if (ret) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, lock);
+ } else {
+ qlcnic_83xx_idc_clear_registers(adapter, lock);
+ ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock);
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "peg halt status1=0x%x\n", status);
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+{
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
+ qlcnic_83xx_enable_mbx_intrpt(adapter);
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (qlcnic_83xx_config_intrpt(adapter, 1)) {
+ netdev_err(adapter->netdev,
+ "Failed to enable mbx intr\n");
+ return -EIO;
+ }
+ }
+
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->nic_ops->init_driver(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_attach_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ adapter->ahw->idc.quiesce_req = 0;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Perform ready state initialization, this routine will get invoked only
+ * once from READY state.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+ /* Re-attach the device if required */
+ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_vnic_pf_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Ensure vNIC mode privileged function starts only after vNIC mode is
+ * enabled by management function.
+ * If vNIC mode is ready, start initialization.
+ *
+ * Returns: -EIO or 0
+ *
+ **/
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Privileged function waits till mgmt function enables VNIC mode */
+ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE);
+ if (state != QLCNIC_DEV_NPAR_OPER) {
+ if (!ahw->idc.vnic_wait_limit--) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ dev_info(&adapter->pdev->dev, "vNIC mode disabled\n");
+ return -EIO;
+
+ } else {
+ /* Perform one time initialization from ready state */
+ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+
+ /* If the previous state is UNKNOWN, device will be
+ already attached properly by Init routine*/
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER;
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->idc.err_code = -EIO;
+ dev_err(&adapter->pdev->dev,
+ "%s: Device in unknown state\n", __func__);
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_cold_state
+ *
+ * @adapter: adapter structure
+ *
+ * If HW is up and running device will enter READY state.
+ * If firmware image from host needs to be loaded, device is
+ * forced to start with the file firmware image.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0);
+
+ if (qlcnic_load_fw_file) {
+ qlcnic_83xx_idc_restart_hw(adapter, 0);
+ } else {
+ if (qlcnic_83xx_check_hw_status(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ return -EIO;
+ } else {
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_init_state
+ *
+ * @adapter: adapter structure
+ *
+ * Reset owner will restart the device from this state.
+ * Device will enter failed state if it remains
+ * in this state for more than DEV_INIT time limit.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
+{
+ int timeout, ret = 0;
+ u32 owner;
+
+ timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (adapter->ahw->pci_func == owner)
+ ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
+ } else {
+ ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state
+ *
+ * @adapter: adapter structure
+ *
+ * Perform IDC protocol specicifed actions after monitoring device state and
+ * events.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0;
+
+ /* Perform NIC configuration based ready state entry actions */
+ if (ahw->idc.state_entry(adapter))
+ return -EIO;
+
+ if (qlcnic_check_temp(adapter)) {
+ if (ahw->temp == QLCNIC_TEMP_PANIC) {
+ qlcnic_83xx_idc_check_fan_failure(adapter);
+ dev_err(&adapter->pdev->dev,
+ "Error: device temperature %d above limits\n",
+ adapter->ahw->temp);
+ clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ ret = qlcnic_83xx_check_heartbeat(adapter);
+ if (ret) {
+ adapter->flags |= QLCNIC_FW_HANG;
+ if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ }
+ return -EIO;
+ }
+
+ if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ /* Move to need reset state and prepare for reset */
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ return ret;
+ }
+
+ /* Check for soft reset request */
+ if (ahw->reset_context &&
+ !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ qlcnic_83xx_idc_tx_soft_reset(adapter);
+ return ret;
+ }
+
+ /* Move to need quiesce state if requested */
+ if (adapter->ahw->idc.quiesce_req) {
+ qlcnic_83xx_idc_enter_need_quiesce(adapter, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_need_reset_state
+ *
+ * @adapter: adapter structure
+ *
+ * Device will remain in this state until:
+ * Reset request ACK's are recieved from all the functions
+ * Wait time exceeds max time limit
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
+{
+ int ret = 0;
+
+ if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ }
+
+ /* Check ACK from other functions */
+ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Waiting for reset ACK\n", __func__);
+ return 0;
+ }
+
+ /* Transit to INIT state and restart the HW */
+ qlcnic_83xx_idc_enter_init_state(adapter, 1);
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+ adapter->ahw->idc.err_code = -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
+ u32 state)
+{
+ u32 cur, prev, next;
+
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+ next = state;
+
+ if ((next < QLC_83XX_IDC_DEV_COLD) ||
+ (next > QLC_83XX_IDC_DEV_QUISCENT)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: curr %d, prev %d, next state %d is invalid\n",
+ __func__, cur, prev, state);
+ return 1;
+ }
+
+ if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
+ (prev == QLC_83XX_IDC_DEV_UNKNOWN)) {
+ if ((next != QLC_83XX_IDC_DEV_COLD) &&
+ (next != QLC_83XX_IDC_DEV_READY)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ if (next == QLC_83XX_IDC_DEV_INIT) {
+ if ((prev != QLC_83XX_IDC_DEV_INIT) &&
+ (prev != QLC_83XX_IDC_DEV_COLD) &&
+ (prev != QLC_83XX_IDC_DEV_NEED_RESET)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_poll_dev_state
+ *
+ * @work: kernel work queue structure used to schedule the function
+ *
+ * Poll device state periodically and perform state specific
+ * actions defined by Inter Driver Communication (IDC) protocol.
+ *
+ * Returns: None
+ *
+ **/
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ u32 state;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_idc_log_state_history(adapter);
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ } else {
+ adapter->ahw->idc.curr_state = state;
+ }
+
+ switch (adapter->ahw->idc.curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ qlcnic_83xx_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ qlcnic_83xx_idc_need_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ qlcnic_83xx_idc_need_quiesce_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ qlcnic_83xx_idc_failed_state(adapter);
+ return;
+ case QLC_83XX_IDC_DEV_INIT:
+ qlcnic_83xx_idc_init_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ qlcnic_83xx_idc_quiesce_state(adapter);
+ break;
+ default:
+ qlcnic_83xx_idc_unknown_state(adapter);
+ return;
+ }
+ adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
+ qlcnic_83xx_periodic_tasks(adapter);
+
+ /* Re-schedule the function */
+ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ adapter->ahw->idc.delay);
+}
+
+static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
+{
+ u32 idc_params, val;
+
+ if (qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:failed to get IDC params from flash\n", __func__);
+ adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ } else {
+ adapter->dev_init_timeo = idc_params & 0xFFFF;
+ adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF);
+ }
+
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+ adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ /* Check if reset recovery is disabled */
+ if (!qlcnic_auto_fw_reset) {
+ /* Propagate do not reset request to other functions */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+}
+
+static int
+qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
+{
+ u32 state, val;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EIO;
+
+ /* Clear driver lock register */
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0);
+ if (qlcnic_83xx_idc_update_major_version(adapter, 0)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) {
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_COLD);
+ state = QLC_83XX_IDC_DEV_COLD;
+ }
+
+ adapter->ahw->idc.curr_state = state;
+ /* First to load function should cold boot the device */
+ if (state == QLC_83XX_IDC_DEV_COLD)
+ qlcnic_83xx_idc_cold_state_handler(adapter);
+
+ /* Check if reset recovery is enabled */
+ if (qlcnic_auto_fw_reset) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+{
+ int ret = -EIO;
+
+ qlcnic_83xx_setup_idc_parameters(adapter);
+
+ if (qlcnic_83xx_get_reset_instruction_template(adapter))
+ return ret;
+
+ if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) {
+ if (qlcnic_83xx_idc_first_to_load_function_handler(adapter))
+ return -EIO;
+ } else {
+ if (qlcnic_83xx_idc_check_major_version(adapter))
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+
+ return 0;
+}
+
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter)
+{
+ int id;
+ u32 val;
+
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+
+ if (id == adapter->portnum) {
+ dev_err(&adapter->pdev->dev,
+ "%s: wait for lock recovery.. %d\n", __func__, id);
+ msleep(20);
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+ }
+
+ /* Clear driver presence bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
+{
+ u32 val;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, please retry\n", __func__);
+ return;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
+ !qlcnic_auto_fw_reset) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, device in non reset mode\n", __func__);
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+ }
+
+ if (key == QLCNIC_FORCE_FW_RESET) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) {
+ adapter->ahw->idc.collect_dump = 1;
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+}
+
+static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
+{
+ u8 *p_cache;
+ u32 src, size;
+ u64 dest;
+ int ret = -EIO;
+
+ src = QLC_83XX_BOOTLOADER_FLASH_ADDR;
+ dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR);
+ size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE);
+
+ /* alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ p_cache = kzalloc(size, GFP_KERNEL);
+ if (p_cache == NULL)
+ return -ENOMEM;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
+ size / sizeof(u32));
+ if (ret) {
+ kfree(p_cache);
+ return ret;
+ }
+ /* 16 byte write to MS memory */
+ ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
+ if (ret) {
+ kfree(p_cache);
+ return ret;
+ }
+ kfree(p_cache);
+
+ return ret;
+}
+
+static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+{
+ u32 dest, *p_cache;
+ u64 addr;
+ u8 data[16];
+ size_t size;
+ int i, ret = -EIO;
+
+ dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
+ size = (adapter->ahw->fw_info.fw->size & ~0xF);
+ p_cache = (u32 *)adapter->ahw->fw_info.fw->data;
+ addr = (u64)dest;
+
+ ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
+ (u32 *)p_cache, size / 16);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "MS memory write failed\n");
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+ return -EIO;
+ }
+
+ /* alignment check */
+ if (adapter->ahw->fw_info.fw->size & 0xF) {
+ addr = dest + size;
+ for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++)
+ data[i] = adapter->ahw->fw_info.fw->data[size + i];
+ for (; i < 16; i++)
+ data[i] = 0;
+ ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "MS memory write failed\n");
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+ return -EIO;
+ }
+ }
+ release_firmware(adapter->ahw->fw_info.fw);
+ adapter->ahw->fw_info.fw = NULL;
+
+ return 0;
+}
+
+static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
+{
+ int i, j;
+ u32 val = 0, val1 = 0, reg = 0;
+
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG);
+ dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_THRESHOLD;
+ }
+ for (i = 0; i < 8; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4));
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+ }
+ for (i = 0; i < 4; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4));
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_TC_STATS;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_TC_STATS;
+ }
+ for (i = 7; i >= 0; i--) {
+ val = QLCRD32(adapter, reg);
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ QLCWR32(adapter, reg, (val | (i << 29)));
+ val = QLCRD32(adapter, reg);
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD);
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD);
+ dev_info(&adapter->pdev->dev,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1);
+}
+
+
+static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
+{
+ u32 reg = 0, i, j;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed to acquire driver lock\n", __func__);
+ return;
+ }
+
+ qlcnic_83xx_dump_pause_control_regs(adapter);
+ QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_THRESHOLD;
+
+ for (i = 0; i < 8; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x0);
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+
+ for (i = 0; i < 4; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF);
+ }
+
+ QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0);
+ QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0);
+ dev_info(&adapter->pdev->dev,
+ "Disabled pause frames successfully on all ports\n");
+ qlcnic_83xx_unlock_driver(adapter);
+}
+
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
+{
+ u32 heartbeat, peg_status;
+ int retries, ret = -EIO;
+
+ retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+ p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != p_dev->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ if (ret) {
+ dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+ qlcnic_83xx_disable_pause_frames(p_dev);
+ peg_status = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_HALT_STATUS1);
+ dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n", peg_status,
+ QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4));
+
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&p_dev->pdev->dev,
+ "Device is being reset err code 0x00006700.\n");
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
+{
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+ u32 val;
+
+ do {
+ val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE);
+ if (val == QLC_83XX_CMDPEG_COMPLETE)
+ return 0;
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val);
+ return -EIO;
+}
+
+int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+{
+ int err;
+
+ err = qlcnic_83xx_check_cmd_peg_status(p_dev);
+ if (err)
+ return err;
+
+ err = qlcnic_83xx_check_heartbeat(p_dev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
+ int duration, u32 mask, u32 status)
+{
+ u32 value;
+ int timeout_error;
+ u8 retries;
+
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ retries = duration / 10;
+
+ do {
+ if ((value & mask) != status) {
+ timeout_error = 1;
+ msleep(duration / 10);
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+ if (timeout_error) {
+ p_dev->ahw->reset.seq_error++;
+ dev_err(&p_dev->pdev->dev,
+ "%s: Timeout Err, entry_num = %d\n",
+ __func__, p_dev->ahw->reset.seq_index);
+ dev_err(&p_dev->pdev->dev,
+ "0x%08x 0x%08x 0x%08x\n",
+ value, mask, status);
+ }
+
+ return timeout_error;
+}
+
+static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
+{
+ u32 sum = 0;
+ u16 *buff = (u16 *)p_dev->ahw->reset.buff;
+ int count = p_dev->ahw->reset.hdr->size / sizeof(u16);
+
+ while (count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ if (~sum) {
+ return 0;
+ } else {
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+ return -1;
+ }
+}
+
+int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+{
+ u8 *p_buff;
+ u32 addr, count;
+ struct qlcnic_hardware_context *ahw = p_dev->ahw;
+
+ ahw->reset.seq_error = 0;
+ ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
+ if (p_dev->ahw->reset.buff == NULL)
+ return -ENOMEM;
+
+ p_buff = p_dev->ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR;
+ count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32);
+
+ /* Copy template header from flash */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+ ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size;
+ p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32);
+
+ /* Copy rest of the template */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+
+ if (qlcnic_83xx_reset_template_checksum(p_dev))
+ return -EIO;
+ /* Get Stop, Start and Init command offsets */
+ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset;
+ ahw->reset.start_offset = ahw->reset.buff +
+ ahw->reset.hdr->start_offset;
+ ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ return 0;
+}
+
+/* Read Write HW register command */
+static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr)
+{
+ int value;
+
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Read Modify Write HW register command */
+static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr,
+ struct qlc_83xx_rmw *p_rmw_hdr)
+{
+ int value;
+
+ if (p_rmw_hdr->index_a)
+ value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
+ else
+ value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+
+ value &= p_rmw_hdr->mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Write HW register command */
+static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Read and Write instruction */
+static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Poll HW register command */
+static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_poll *poll;
+ int i;
+ unsigned long arg1, arg2;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, entry++)
+ qlcnic_83xx_poll_reg(p_dev, entry->arg1,
+ delay, poll->mask,
+ poll->status);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ arg1 = entry->arg1;
+ arg2 = entry->arg2;
+ if (delay) {
+ if (qlcnic_83xx_poll_reg(p_dev,
+ arg1, delay,
+ poll->mask,
+ poll->status)){
+ qlcnic_83xx_rd_reg_indirect(p_dev,
+ arg1);
+ qlcnic_83xx_rd_reg_indirect(p_dev,
+ arg2);
+ }
+ }
+ }
+ }
+}
+
+/* Poll and write HW register command */
+static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ long delay;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr,
+ entry->dr_value);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay)
+ qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status);
+ }
+}
+
+/* Read Modify Write register command */
+static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_rmw *rmw_hdr;
+
+ rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)rmw_hdr +
+ sizeof(struct qlc_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1,
+ entry->arg2, rmw_hdr);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((u32)((long)p_hdr->delay));
+}
+
+/* Read and poll register command */
+static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ int index, i, j;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+ unsigned long addr;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay) {
+ if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status)){
+ index = p_dev->ahw->reset.array_index;
+ addr = entry->dr_addr;
+ j = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ p_dev->ahw->reset.array[index++] = j;
+
+ if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
+ p_dev->ahw->reset.array_index = 1;
+ }
+ }
+ }
+}
+
+static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_end = 1;
+}
+
+static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.template_end = 1;
+ if (p_dev->ahw->reset.seq_error == 0)
+ dev_err(&p_dev->pdev->dev,
+ "HW restart process completed successfully.\n");
+ else
+ dev_err(&p_dev->pdev->dev,
+ "HW restart completed with timeout errors.\n");
+}
+
+/**
+* qlcnic_83xx_exec_template_cmd
+*
+* @p_dev: adapter structure
+* @p_buff: Poiter to instruction template
+*
+* Template provides instructions to stop, restart and initalize firmware.
+* These instructions are abstracted as a series of read, write and
+* poll operations on hardware registers. Register information and operation
+* specifics are not exposed to the driver. Driver reads the template from
+* flash and executes the instructions located at pre-defined offsets.
+*
+* Returns: None
+* */
+static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
+ char *p_buff)
+{
+ int index, entries;
+ struct qlc_83xx_entry_hdr *p_hdr;
+ char *entry = p_buff;
+
+ p_dev->ahw->reset.seq_end = 0;
+ p_dev->ahw->reset.template_end = 0;
+ entries = p_dev->ahw->reset.hdr->entries;
+ index = p_dev->ahw->reset.seq_index;
+
+ for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qlc_83xx_entry_hdr *)entry;
+
+ switch (p_hdr->cmd) {
+ case QLC_83XX_OPCODE_NOP:
+ break;
+ case QLC_83XX_OPCODE_WRITE_LIST:
+ qlcnic_83xx_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_WRITE_LIST:
+ qlcnic_83xx_read_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_LIST:
+ qlcnic_83xx_poll_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_WRITE_LIST:
+ qlcnic_83xx_poll_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_MODIFY_WRITE:
+ qlcnic_83xx_read_modify_write(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_PAUSE:
+ qlcnic_83xx_pause(p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_END:
+ qlcnic_83xx_seq_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_TMPL_END:
+ qlcnic_83xx_template_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_POLL_READ_LIST:
+ qlcnic_83xx_poll_read_list(p_dev, p_hdr);
+ break;
+ default:
+ dev_err(&p_dev->pdev->dev,
+ "%s: Unknown opcode 0x%04x in template %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+ entry += p_hdr->size;
+ }
+ p_dev->ahw->reset.seq_index = index;
+}
+
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_index = 0;
+
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset);
+ if (p_dev->ahw->reset.template_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ if (request_firmware(&adapter->ahw->fw_info.fw,
+ QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+ dev_err(&adapter->pdev->dev,
+ "No file FW image, loading flash FW image.\n");
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ } else {
+ if (qlcnic_83xx_copy_fw_file(adapter))
+ return err;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int err = -EIO;
+
+ qlcnic_83xx_stop_hw(adapter);
+
+ /* Collect FW register dump if required */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
+ qlcnic_dump_fw(adapter);
+ qlcnic_83xx_init_hw(adapter);
+
+ if (qlcnic_83xx_copy_bootloader(adapter))
+ return err;
+ /* Boot either flash image or firmware image from host file system */
+ if (qlcnic_load_fw_file) {
+ if (qlcnic_83xx_load_fw_image_from_host(adapter))
+ return err;
+ } else {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ }
+
+ qlcnic_83xx_start_hw(adapter);
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+* qlcnic_83xx_config_default_opmode
+*
+* @adapter: adapter structure
+*
+* Configure default driver operating mode
+*
+* Returns: Error code or Success(0)
+* */
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ } else {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_tx_ques = nic_info.max_tx_ques;
+ ahw->max_rx_ques = nic_info.max_rx_ques;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->max_mac_filters = nic_info.max_mac_filters;
+ ahw->max_mtu = nic_info.max_mtu;
+
+ if (ahw->capabilities & BIT_23)
+ ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
+ else
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+
+ return ahw->nic_mode;
+}
+
+static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ ret = qlcnic_83xx_get_nic_configuration(adapter);
+ if (ret == -EIO)
+ return -EIO;
+
+ if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
+ if (qlcnic_83xx_config_vnic_opmode(adapter))
+ return -EIO;
+ } else if (ret == QLC_83XX_DEFAULT_MODE) {
+ if (qlcnic_83xx_config_default_opmode(adapter))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1))
+static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 presence_mask, audit_mask;
+ int status;
+
+ presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+
+ if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_STOP_NIC_FUNC);
+ cmd.req.arg[1] = BIT_31;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to clean up the function resources\n");
+ qlcnic_free_mbx_args(&cmd);
+ }
+}
+
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ /* Initilaize 83xx mailbox spinlock */
+ spin_lock_init(&ahw->mbx_lock);
+
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ qlcnic_83xx_clear_function_resources(adapter);
+
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+
+ if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
+ qlcnic_83xx_read_flash_mfg_id(adapter);
+
+ if (qlcnic_83xx_idc_init(adapter))
+ return -EIO;
+
+ /* Configure default, SR-IOV or Virtual NIC mode of operation */
+ if (qlcnic_83xx_configure_opmode(adapter))
+ return -EIO;
+
+ /* Perform operating mode specific initialization */
+ if (adapter->nic_ops->init_driver(adapter))
+ return -EIO;
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ /* Periodically monitor device status */
+ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+
+ return adapter->ahw->idc.err_code;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
new file mode 100644
index 00000000000..b0c3de9ede0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -0,0 +1,225 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int i, ret = -EBUSY;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return ret;
+
+ if (qlcnic_config_npars) {
+ for (i = 0; i < ahw->act_pci_func; i++) {
+ id = adapter->npars[i].pci_func;
+ if (id == ahw->pci_func)
+ continue;
+ data |= qlcnic_config_npars &
+ QLC_83XX_SET_FUNC_OPMODE(0x3, id);
+ }
+ } else {
+ data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, ahw->pci_func)) |
+ QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC,
+ ahw->pci_func);
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+
+/**
+ * qlcnic_83xx_init_mgmt_vnic
+ *
+ * @adapter: adapter structure
+ * Management virtual NIC sets the operational mode of other vNIC's and
+ * configures embedded switch (ESWITCH).
+ * Returns: Success(0) or error code.
+ *
+ **/
+static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
+ if (qlcnic_init_pci_info(adapter))
+ return err;
+
+ if (qlcnic_83xx_set_vnic_opmode(adapter))
+ return err;
+
+ if (qlcnic_set_default_offload_settings(adapter))
+ return err;
+ } else {
+ if (qlcnic_reset_npar_config(adapter))
+ return err;
+ }
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ qlcnic_83xx_enable_vnic_mode(adapter, 1);
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ return 0;
+}
+
+static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_fw_version(adapter);
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return err;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_vnic_opmode
+ *
+ * @adapter: adapter structure
+ * Identify virtual NIC operational modes.
+ *
+ * Returns: Success(0) or error code.
+ *
+ **/
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_nic_template *nic_ops = adapter->nic_ops;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+ } else if (priv_level == QLCNIC_MGMT_FUNC) {
+ ahw->op_mode = QLCNIC_MGMT_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+ } else {
+ return -EIO;
+ }
+
+ if (ahw->capabilities & BIT_23)
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ else
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+ adapter->ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index b14b8f0787e..a69097c6b84 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1,12 +1,92 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
#include "qlcnic.h"
+static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 4, 1},
+ {QLCNIC_CMD_SET_MTU, 4, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
+ {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 4, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
+ {QLCNIC_CMD_SET_DRV_VER, 4, 1},
+};
+
+static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
+{
+ return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
+ (0xcafe << 16);
+}
+
+/* Allocate mailbox registers */
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = type;
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Free up mailbox registers */
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
+{
+ kfree(cmd->req.arg);
+ cmd->req.arg = NULL;
+ kfree(cmd->rsp.arg);
+ cmd->rsp.arg = NULL;
+}
+
static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
int i;
@@ -38,194 +118,123 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
return rsp;
}
-void
-qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
+ int i;
u32 rsp;
u32 signature;
struct pci_dev *pdev = adapter->pdev;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ const char *fmt;
- signature = QLCNIC_CDRP_SIGNATURE_MAKE(ahw->pci_func,
- adapter->ahw->fw_hal_version);
+ signature = qlcnic_get_cmd_signature(ahw);
/* Acquire semaphore before accessing CRB */
if (qlcnic_api_lock(adapter)) {
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
- return;
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ return cmd->rsp.arg[0];
}
QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
- QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, cmd->req.arg1);
- QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, cmd->req.arg2);
- QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, cmd->req.arg3);
+ for (i = 1; i < QLCNIC_CDRP_MAX_ARGS; i++)
+ QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
- QLCNIC_CDRP_FORM_CMD(cmd->req.cmd));
-
+ QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "CDRP response timeout.\n");
- cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
+ dev_err(&pdev->dev, "card response timeout.\n");
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
- cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- switch (cmd->rsp.cmd) {
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
+ switch (cmd->rsp.arg[0]) {
case QLCNIC_RCODE_INVALID_ARGS:
- dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid args: [%d]\n";
break;
case QLCNIC_RCODE_NOT_SUPPORTED:
case QLCNIC_RCODE_NOT_IMPL:
- dev_err(&pdev->dev,
- "CDRP command not supported: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command not supported: [%d]\n";
break;
case QLCNIC_RCODE_NOT_PERMITTED:
- dev_err(&pdev->dev,
- "CDRP requested action not permitted: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP requested action not permitted: [%d]\n";
break;
case QLCNIC_RCODE_INVALID:
- dev_err(&pdev->dev,
- "CDRP invalid or unknown cmd received: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP invalid or unknown cmd received: [%d]\n";
break;
case QLCNIC_RCODE_TIMEOUT:
- dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command timeout: [%d]\n";
break;
default:
- dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
- cmd->rsp.cmd);
+ fmt = "CDRP command failed: [%d]\n";
+ break;
}
- } else if (rsp == QLCNIC_CDRP_RSP_OK) {
- cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
- if (cmd->rsp.arg2)
- cmd->rsp.arg2 = QLCRD32(adapter,
- QLCNIC_ARG2_CRB_OFFSET);
- if (cmd->rsp.arg3)
- cmd->rsp.arg3 = QLCRD32(adapter,
- QLCNIC_ARG3_CRB_OFFSET);
- }
- if (cmd->rsp.arg1)
- cmd->rsp.arg1 = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK)
+ cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
+
+ for (i = 1; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
/* Release semaphore */
qlcnic_api_unlock(adapter);
-
-}
-
-static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
-{
- uint64_t sum = 0;
- int count = temp_size / sizeof(uint32_t);
- while (count-- > 0)
- sum += *temp_buffer++;
- while (sum >> 32)
- sum = (sum & 0xFFFFFFFF) + (sum >> 32);
- return ~sum;
+ return cmd->rsp.arg[0];
}
-int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter)
{
- int err, i;
- void *tmp_addr;
- u32 temp_size, version, csum, *template;
- __le32 *tmp_buf;
struct qlcnic_cmd_args cmd;
- struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
- dma_addr_t tmp_addr_t = 0;
-
- ahw = adapter->ahw;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_TEMP_SIZE;
- memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd != QLCNIC_RCODE_SUCCESS) {
- dev_info(&adapter->pdev->dev,
- "Can't get template size %d\n", cmd.rsp.cmd);
- err = -EIO;
- return err;
- }
- temp_size = cmd.rsp.arg2;
- version = cmd.rsp.arg3;
- dev_info(&adapter->pdev->dev,
- "minidump template version = 0x%x", version);
- if (!temp_size)
- return -EIO;
+ u32 arg1, arg2, arg3;
+ char drv_string[12];
+ int err = 0;
- tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
- &tmp_addr_t, GFP_KERNEL);
- if (!tmp_addr) {
- dev_err(&adapter->pdev->dev,
- "Can't get memory for FW dump template\n");
- return -ENOMEM;
- }
- memset(&cmd.rsp, 0, sizeof(struct _cdrp_cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_TEMP_HDR;
- cmd.req.arg1 = LSD(tmp_addr_t);
- cmd.req.arg2 = MSD(tmp_addr_t);
- cmd.req.arg3 = temp_size;
- qlcnic_issue_cmd(adapter, &cmd);
-
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
- dev_err(&adapter->pdev->dev,
- "Failed to get mini dump template header %d\n", err);
- err = -EIO;
- goto error;
- }
- ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
- if (!ahw->fw_dump.tmpl_hdr) {
- err = -EIO;
- goto error;
- }
- tmp_buf = tmp_addr;
- template = (u32 *) ahw->fw_dump.tmpl_hdr;
- for (i = 0; i < temp_size/sizeof(u32); i++)
- *template++ = __le32_to_cpu(*tmp_buf++);
+ memset(drv_string, 0, sizeof(drv_string));
+ snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
+ _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
+ _QLCNIC_LINUX_SUBVERSION);
- csum = qlcnic_temp_checksum((u32 *)ahw->fw_dump.tmpl_hdr, temp_size);
- if (csum) {
- dev_err(&adapter->pdev->dev,
- "Template header checksum validation failed\n");
- err = -EIO;
- goto error;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_DRV_VER);
+ memcpy(&arg1, drv_string, sizeof(u32));
+ memcpy(&arg2, drv_string + 4, sizeof(u32));
+ memcpy(&arg3, drv_string + 8, sizeof(u32));
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ cmd.req.arg[3] = arg3;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to set driver version in firmware\n");
+ return -EIO;
}
- tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
- ahw->fw_dump.enable = 1;
-error:
- dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
- return err;
+ return 0;
}
int
qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
{
+ int err = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_MTU;
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = mtu;
- cmd.req.arg3 = 0;
- if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd) {
- dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
- return -EIO;
- }
- }
+ if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
+ return err;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+ cmd.req.arg[1] = recv_ctx->context_id;
+ cmd.req.arg[2] = mtu;
- return 0;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
-static int
-qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
void *addr;
struct qlcnic_hostrq_rx_ctx *prq;
@@ -242,10 +251,10 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
u64 phys_addr;
u8 i, nrds_rings, nsds_rings;
+ u16 temp_u16;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
int err;
- u16 temp;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -279,11 +288,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
- cap |= QLCNIC_CAP0_LRO_MSS;
-
- temp = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp);
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
prq->txrx_sds_binding = nsds_rings - 1;
prq->capabilities[0] = cpu_to_le32(cap);
@@ -329,20 +335,17 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
}
phys_addr = hostrq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32) (phys_addr >> 32);
- cmd.req.arg2 = (u32) (phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err) {
dev_err(&adapter->pdev->dev,
"Failed to create rx ctx in firmware%d\n", err);
goto out_free_rsp;
}
-
prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
&prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
@@ -373,6 +376,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
+ qlcnic_free_mbx_args(&cmd);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
@@ -381,24 +385,24 @@ out_free_rq:
static void
qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
{
+ int err;
struct qlcnic_cmd_args cmd;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = recv_ctx->context_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_RX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+ cmd.req.arg[1] = recv_ctx->context_id;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
dev_err(&adapter->pdev->dev,
"Failed to destroy rx ctx in firmware\n");
recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
}
-static int
-qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int ring)
{
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
@@ -410,7 +414,6 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
int err;
u64 phys_addr;
dma_addr_t rq_phys_addr, rsp_phys_addr;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
/* reset host resources */
tx_ring->producer = 0;
@@ -445,9 +448,9 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq->host_int_crb_mode =
cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->msi_index = 0;
prq->interrupt_ctl = 0;
- prq->msi_index = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
prq_cds = &prq->cds_ring;
@@ -456,19 +459,17 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
phys_addr = rq_phys_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = (u32)(phys_addr >> 32);
- cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
- cmd.req.arg3 = rq_size;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CREATE_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
-
- adapter->tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
} else {
dev_err(&adapter->pdev->dev,
"Failed to create tx ctx in firmware%d\n", err);
@@ -476,76 +477,81 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
- rsp_phys_addr);
+ rsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+ qlcnic_free_mbx_args(&cmd);
return err;
}
static void
-qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_args cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->tx_ring->ctx_id;
- cmd.req.arg2 = QLCNIC_DESTROY_CTX_RESET;
- cmd.req.arg3 = 0;
- cmd.req.cmd = QLCNIC_CDRP_CMD_DESTROY_TX_CTX;
- qlcnic_issue_cmd(adapter, &cmd);
- if (cmd.rsp.cmd)
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+ cmd.req.arg[1] = tx_ring->ctx_id;
+ if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
"Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
}
int
qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
{
+ int err;
struct qlcnic_cmd_args cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = config;
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIG_PORT;
- qlcnic_issue_cmd(adapter, &cmd);
-
- return cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+ cmd.req.arg[1] = config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
{
void *addr;
- int err;
- int ring;
+ int err, ring;
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
+ __le32 *ptr;
struct pci_dev *pdev = adapter->pdev;
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
- tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
- sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
- if (tx_ring->hw_consumer == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx consumer\n");
- return -ENOMEM;
- }
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr,
+ GFP_KERNEL);
- /* cmd desc ring */
- addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr, GFP_KERNEL);
+ if (ptr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+ tx_ring->hw_consumer = ptr;
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr,
+ GFP_KERNEL);
- if (addr == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
- err = -ENOMEM;
- goto err_out_free;
- }
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate tx desc ring\n");
+ err = -ENOMEM;
+ goto err_out_free;
+ }
- tx_ring->desc_head = addr;
+ tx_ring->desc_head = addr;
+ }
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -584,36 +590,70 @@ err_out_free:
return err;
}
-
-int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
{
- int err;
+ int i, err, ring;
- if (adapter->flags & QLCNIC_NEED_FLR) {
- pci_reset_function(adapter->pdev);
- adapter->flags &= ~QLCNIC_NEED_FLR;
+ if (dev->flags & QLCNIC_NEED_FLR) {
+ pci_reset_function(dev->pdev);
+ dev->flags &= ~QLCNIC_NEED_FLR;
}
- err = qlcnic_fw_cmd_create_rx_ctx(adapter);
- if (err)
- return err;
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_config_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+ }
- err = qlcnic_fw_cmd_create_tx_ctx(adapter);
- if (err) {
- qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- return err;
+ err = qlcnic_fw_cmd_create_rx_ctx(dev);
+ if (err)
+ goto err_out;
+
+ for (ring = 0; ring < dev->max_drv_tx_rings; ring++) {
+ err = qlcnic_fw_cmd_create_tx_ctx(dev,
+ &dev->tx_ring[ring],
+ ring);
+ if (err) {
+ qlcnic_fw_cmd_destroy_rx_ctx(dev);
+ if (ring == 0)
+ goto err_out;
+
+ for (i = 0; i < ring; i++)
+ qlcnic_fw_cmd_destroy_tx_ctx(dev,
+ &dev->tx_ring[i]);
+
+ goto err_out;
+ }
}
- set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
return 0;
+
+err_out:
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(dev, 0);
+ }
+ return err;
}
void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
{
+ int ring;
+
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
qlcnic_fw_cmd_destroy_rx_ctx(adapter);
- qlcnic_fw_cmd_destroy_tx_ctx(adapter);
-
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
+
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(adapter, 0);
+ }
/* Allow dma queues to drain after context reset */
mdelay(20);
}
@@ -629,20 +669,23 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
recv_ctx = adapter->recv_ctx;
- tx_ring = adapter->tx_ring;
- if (tx_ring->hw_consumer != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- sizeof(u32),
- tx_ring->hw_consumer,
- tx_ring->hw_cons_phys_addr);
- tx_ring->hw_consumer = NULL;
- }
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->hw_consumer != NULL) {
+ dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
- if (tx_ring->desc_head != NULL) {
- dma_free_coherent(&adapter->pdev->dev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
- tx_ring->desc_head = NULL;
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head,
+ tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -671,40 +714,43 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
-/* Get MAC address of a NIC partition */
-int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
{
- int err;
+ int err, i;
struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.arg1 = adapter->ahw->pci_func | BIT_8;
- cmd.req.cmd = QLCNIC_CDRP_CMD_MAC_ADDRESS;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err == QLCNIC_RCODE_SUCCESS)
- qlcnic_fetch_mac(cmd.rsp.arg1, cmd.rsp.arg2, 0, mac);
- else {
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
dev_err(&adapter->pdev->dev,
"Failed to get mac address%d\n", err);
err = -EIO;
}
-
+ qlcnic_free_mbx_args(&cmd);
return err;
}
/* Get info of a NIC partition */
-int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
- struct qlcnic_info *npar_info, u8 func_id)
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
{
int err;
dma_addr_t nic_dma_t;
- struct qlcnic_info_le *nic_info;
+ const struct qlcnic_info_le *nic_info;
void *nic_info_addr;
struct qlcnic_cmd_args cmd;
- size_t nic_size = sizeof(struct qlcnic_info_le);
+ size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
&nic_dma_t, GFP_KERNEL);
@@ -713,47 +759,39 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = (func_id << 16 | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
- if (err == QLCNIC_RCODE_SUCCESS) {
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = (func_id << 16 | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ } else {
npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
- npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
- npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
-
- dev_info(&adapter->pdev->dev,
- "phy port: %d switch_mode: %d,\n"
- "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
- "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
- npar_info->phys_port, npar_info->switch_mode,
- npar_info->max_tx_ques, npar_info->max_rx_ques,
- npar_info->min_tx_bw, npar_info->max_tx_bw,
- npar_info->max_mtu, npar_info->capabilities);
- } else {
- dev_err(&adapter->pdev->dev,
- "Failed to get nic info%d\n", err);
- err = -EIO;
}
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
- nic_dma_t);
+ nic_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
/* Configure a NIC partition */
-int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
{
int err = -EIO;
dma_addr_t nic_dma_t;
@@ -784,13 +822,11 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_NIC_INFO;
- cmd.req.arg1 = MSD(nic_dma_t);
- cmd.req.arg2 = LSD(nic_dma_t);
- cmd.req.arg3 = ((nic->pci_func << 16) | nic_size);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -800,12 +836,14 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
nic_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
/* Get PCI Info of a partition */
-int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
- struct qlcnic_pci_info *pci_info)
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
{
int err = 0, i;
struct qlcnic_cmd_args cmd;
@@ -822,13 +860,11 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_PCI_INFO;
- cmd.req.arg1 = MSD(pci_info_dma_t);
- cmd.req.arg2 = LSD(pci_info_dma_t);
- cmd.req.arg3 = pci_size;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ cmd.req.arg[1] = MSD(pci_info_dma_t);
+ cmd.req.arg[2] = LSD(pci_info_dma_t);
+ cmd.req.arg[3] = pci_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
adapter->ahw->act_pci_func = 0;
if (err == QLCNIC_RCODE_SUCCESS) {
@@ -854,6 +890,8 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
pci_info_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -872,21 +910,19 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_SET_PORTMIRRORING;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORTMIRRORING);
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
- if (err != QLCNIC_RCODE_SUCCESS) {
+ if (err != QLCNIC_RCODE_SUCCESS)
dev_err(&adapter->pdev->dev,
"Failed to configure port mirroring%d on eswitch:%d\n",
pci_func, id);
- } else {
+ else
dev_info(&adapter->pdev->dev,
"Configured eSwitch %d for port mirroring:%d\n",
id, pci_func);
- }
+ qlcnic_free_mbx_args(&cmd);
return err;
}
@@ -923,13 +959,11 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
@@ -949,6 +983,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
stats_dma_t);
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -963,6 +999,9 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
void *stats_addr;
int err;
+ if (mac_stats == NULL)
+ return -ENOMEM;
+
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
&stats_dma_t, GFP_KERNEL);
if (!stats_addr) {
@@ -971,15 +1010,11 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
memset(stats_addr, 0, stats_size);
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
- cmd.req.arg1 = stats_size << 16;
- cmd.req.arg2 = MSD(stats_dma_t);
- cmd.req.arg3 = LSD(stats_dma_t);
-
- qlcnic_issue_cmd(adapter, &cmd);
- err = cmd.rsp.cmd;
-
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+ cmd.req.arg[1] = stats_size << 16;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
if (!err) {
stats = stats_addr;
mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
@@ -1001,10 +1036,16 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get mac stats failed, err=%d.\n", __func__, err);
}
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
stats_dma_t);
+
+ qlcnic_free_mbx_args(&cmd);
+
return err;
}
@@ -1065,7 +1106,7 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
const u8 port, const u8 rx_tx)
{
-
+ int err;
u32 arg1;
struct qlcnic_cmd_args cmd;
@@ -1088,15 +1129,16 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
arg1 |= BIT_14 | rx_tx << 15;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_STATS;
- cmd.req.arg1 = arg1;
- qlcnic_issue_cmd(adapter, &cmd);
- return cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_ESWITCH_STATS);
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
err_ret:
- dev_err(&adapter->pdev->dev, "Invalid argument func_esw=%d port=%d"
- "rx_ctx=%d\n", func_esw, port, rx_tx);
+ dev_err(&adapter->pdev->dev,
+ "Invalid args func_esw %d port %d rx_ctx %d\n",
+ func_esw, port, rx_tx);
return -EIO;
}
@@ -1109,22 +1151,21 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
u8 pci_func;
pci_func = (*arg1 >> 8);
- cmd.req.cmd = QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG;
- cmd.req.arg1 = *arg1;
- cmd.rsp.arg1 = cmd.rsp.arg2 = 1;
- qlcnic_issue_cmd(adapter, &cmd);
- *arg1 = cmd.rsp.arg1;
- *arg2 = cmd.rsp.arg2;
- err = cmd.rsp.cmd;
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+ cmd.req.arg[1] = *arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg[1];
+ *arg2 = cmd.rsp.arg[2];
+ qlcnic_free_mbx_args(&cmd);
- if (err == QLCNIC_RCODE_SUCCESS) {
+ if (err == QLCNIC_RCODE_SUCCESS)
dev_info(&adapter->pdev->dev,
- "eSwitch port config for pci func %d\n", pci_func);
- } else {
+ "eSwitch port config for pci func %d\n", pci_func);
+ else
dev_err(&adapter->pdev->dev,
"Failed to get eswitch port config for pci func %d\n",
pci_func);
- }
return err;
}
/* Configure eSwitch port
@@ -1189,20 +1230,18 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
return err;
}
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH;
- cmd.req.arg1 = arg1;
- cmd.req.arg2 = arg2;
- qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_ESWITCH);
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
- err = cmd.rsp.cmd;
- if (err != QLCNIC_RCODE_SUCCESS) {
+ if (err != QLCNIC_RCODE_SUCCESS)
dev_err(&adapter->pdev->dev,
"Failed to configure eswitch pci func %d\n", pci_func);
- } else {
+ else
dev_info(&adapter->pdev->dev,
- "Configured eSwitch for pci func %d\n", pci_func);
- }
+ "Configured eSwitch for pci func %d\n", pci_func);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 74b98110c5b..5641f8ec49a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -22,42 +22,37 @@ struct qlcnic_stats {
#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+static const u32 qlcnic_fw_dump_level[] = {
+ 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+};
static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
- {"xmit_called",
- QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
- {"xmit_finished",
- QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
- {"rx_dropped",
- QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
- {"tx_dropped",
- QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
- {"csummed",
- QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
- {"rx_pkts",
- QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
- {"lro_pkts",
- QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
- {"rx_bytes",
- QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
- {"tx_bytes",
- QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
- {"lrobytes",
- QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
- {"lso_frames",
- QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
- {"xmit_on",
- QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
- {"xmit_off",
- QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+ QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+ QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
{"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
- QLC_OFF(stats.skb_alloc_failure)},
- {"null rxbuf",
- QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ QLC_OFF(stats.skb_alloc_failure)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
{"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
QLC_OFF(stats.rx_dma_map_error)},
{"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
QLC_OFF(stats.tx_dma_map_error)},
+ {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
+ QLC_OFF(stats.mac_filter_limit_overrun)},
+ {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ QLC_OFF(stats.spurious_intr)},
};
@@ -78,7 +73,15 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx numbytes",
};
-static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
+static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_tx_bytes",
+ "ctx_tx_pkts",
+ "ctx_tx_errors",
+ "ctx_tx_dropped_pkts",
+ "ctx_tx_num_buffers",
+};
+
+static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
"mac_tx_frames",
"mac_tx_bytes",
"mac_tx_mcast_pkts",
@@ -110,35 +113,70 @@ static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
"mac_rx_length_large",
"mac_rx_jabber",
"mac_rx_dropped",
- "mac_rx_crc_error",
+ "mac_crc_error",
"mac_align_error",
};
-#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
-#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
-#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
-#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_rx_bytes",
+ "ctx_rx_pkts",
+ "ctx_lro_pkt_cnt",
+ "ctx_ip_csum_error",
+ "ctx_rx_pkts_wo_ctx",
+ "ctx_rx_pkts_dropped_wo_sts",
+ "ctx_rx_osized_pkts",
+ "ctx_rx_pkts_dropped_wo_rds",
+ "ctx_rx_unexpected_mcast_pkts",
+ "ctx_invalid_mac_address",
+ "ctx_rx_rds_ring_prim_attemoted",
+ "ctx_rx_rds_ring_prim_success",
+ "ctx_num_lro_flows_added",
+ "ctx_num_lro_flows_removed",
+ "ctx_num_lro_flows_active",
+ "ctx_pkts_dropped_unknown",
+};
static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Register_Test_on_offline",
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
- "External_Loopback_offline"
+ "EEPROM_Test_offline"
};
#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+static inline int qlcnic_82xx_statistics(void)
+{
+ return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+}
+
+static inline int qlcnic_83xx_statistics(void)
+{
+ return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+}
+
+static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_statistics();
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_statistics();
+ else
+ return -1;
+}
+
#define QLCNIC_RING_REGS_COUNT 20
#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
#define QLCNIC_MAX_EEPROM_LEN 1024
static const u32 diag_registers[] = {
- CRB_CMDPEG_STATE,
- CRB_RCVPEG_STATE,
- CRB_XG_STATE_P3P,
- CRB_FW_CAPABILITIES_1,
- ISR_INT_STATE_REG,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_FW_CAPABILITIES,
QLCNIC_CRB_DRV_ACTIVE,
QLCNIC_CRB_DEV_STATE,
QLCNIC_CRB_DRV_STATE,
@@ -148,6 +186,13 @@ static const u32 diag_registers[] = {
QLCNIC_PEG_ALIVE_COUNTER,
QLCNIC_PEG_HALT_STATUS1,
QLCNIC_PEG_HALT_STATUS2,
+ -1
+};
+
+
+static const u32 ext_diag_registers[] = {
+ CRB_XG_STATE_P3P,
+ ISR_INT_STATE_REG,
QLCNIC_CRB_PEG_NET_0+0x3c,
QLCNIC_CRB_PEG_NET_1+0x3c,
QLCNIC_CRB_PEG_NET_2+0x3c,
@@ -156,12 +201,19 @@ static const u32 diag_registers[] = {
};
#define QLCNIC_MGMT_API_VERSION 2
-#define QLCNIC_DEV_INFO_SIZE 1
-#define QLCNIC_ETHTOOL_REGS_VER 2
+#define QLCNIC_ETHTOOL_REGS_VER 3
+
static int qlcnic_get_regs_len(struct net_device *dev)
{
- return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN +
- QLCNIC_DEV_INFO_SIZE + 1;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 len;
+
+ if (qlcnic_83xx_check(adapter))
+ len = qlcnic_83xx_get_regs_len(adapter);
+ else
+ len = sizeof(ext_diag_registers) + sizeof(diag_registers);
+
+ return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
}
static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -174,10 +226,9 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 fw_major, fw_minor, fw_build;
-
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d", fw_major, fw_minor, fw_build);
@@ -192,7 +243,10 @@ static int
qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 speed, reg;
int check_sfp_module = 0;
+ u16 pcifn = ahw->pci_func;
/* read which mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
@@ -213,9 +267,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->autoneg = adapter->ahw->link_autoneg;
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
- u32 val;
+ u32 val = 0;
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_get_settings(adapter);
+ else
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
- val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_1000baseT_Full;
@@ -225,6 +282,12 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev) && adapter->ahw->has_link_events) {
+ if (qlcnic_82xx_check(adapter)) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn));
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
ecmd->autoneg = adapter->ahw->link_autoneg;
ecmd->duplex = adapter->ahw->link_duplex;
@@ -294,6 +357,13 @@ skip:
ecmd->port = PORT_TP;
}
break;
+ case QLCNIC_BRDTYPE_83XX_10G:
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) && ahw->has_link_events;
+ break;
default:
dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
adapter->ahw->board_type);
@@ -321,16 +391,10 @@ skip:
return 0;
}
-static int
-qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
+ struct ethtool_cmd *ecmd)
{
- u32 config = 0;
- u32 ret = 0;
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if (adapter->ahw->port_type != QLCNIC_GBE)
- return -EOPNOTSUPP;
-
+ u32 ret = 0, config = 0;
/* read which mode */
if (ecmd->duplex)
config |= 0x1;
@@ -358,6 +422,24 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
else if (ret)
return -EIO;
+ return ret;
+}
+
+static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ u32 ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (qlcnic_83xx_check(adapter))
+ ret = qlcnic_83xx_set_settings(adapter, ecmd);
+ else
+ ret = qlcnic_set_port_config(adapter, ecmd);
+
+ if (!ret)
+ return ret;
adapter->ahw->link_speed = ethtool_cmd_speed(ecmd);
adapter->ahw->link_duplex = ecmd->duplex;
@@ -370,6 +452,19 @@ qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return dev->netdev_ops->ndo_open(dev);
}
+static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
+ u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
+ j = 0;
+ while (ext_diag_registers[j] != -1)
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
+ return i;
+}
+
static void
qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
@@ -377,17 +472,20 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
u32 *regs_buff = p;
- int ring, i = 0, j = 0;
+ int ring, i = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
+
regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
(adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
- for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
- regs_buff[i] = QLCRD32(adapter, diag_registers[j]);
+ if (qlcnic_82xx_check(adapter))
+ i = qlcnic_82xx_get_registers(adapter, regs_buff);
+ else
+ i = qlcnic_83xx_get_registers(adapter, regs_buff);
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
@@ -415,6 +513,10 @@ static u32 qlcnic_test_link(struct net_device *dev)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 val;
+ if (qlcnic_83xx_check(adapter)) {
+ val = qlcnic_83xx_test_link(adapter);
+ return (val & 1) ? 0 : 1;
+ }
val = QLCRD32(adapter, CRB_XG_STATE_P3P);
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
@@ -426,8 +528,10 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int offset;
- int ret;
+ int ret = -1;
+ if (qlcnic_83xx_check(adapter))
+ return 0;
if (eeprom->len == 0)
return -EINVAL;
@@ -435,8 +539,9 @@ qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
((adapter->pdev)->device << 16);
offset = eeprom->offset;
- ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
- eeprom->len);
+ if (qlcnic_82xx_check(adapter))
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
if (ret < 0)
return ret;
@@ -529,11 +634,11 @@ static int qlcnic_set_channels(struct net_device *dev,
channel->tx_count != channel->max_tx)
return -EINVAL;
- err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
+ err = qlcnic_validate_max_rss(channel->max_rx, channel->rx_count);
if (err)
return err;
- err = qlcnic_set_max_rss(adapter, channel->rx_count);
+ err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
netdev_info(dev, "allocated 0x%x sds rings\n",
adapter->max_sds_rings);
return err;
@@ -547,6 +652,10 @@ qlcnic_get_pauseparam(struct net_device *netdev,
int port = adapter->ahw->physical_port;
__u32 val;
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_get_pauseparam(adapter, pause);
+ return;
+ }
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
@@ -592,6 +701,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
int port = adapter->ahw->physical_port;
__u32 val;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_pauseparam(adapter, pause);
+
/* read mode */
if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
@@ -606,6 +718,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
switch (port) {
@@ -668,6 +781,9 @@ static int qlcnic_reg_test(struct net_device *dev)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_reg_test(adapter);
+
data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
@@ -675,16 +791,30 @@ static int qlcnic_reg_test(struct net_device *dev)
return 0;
}
+static int qlcnic_eeprom_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return 0;
+
+ return qlcnic_83xx_flash_test(adapter);
+}
+
static int qlcnic_get_sset_count(struct net_device *dev, int sset)
{
+ int len;
+
struct qlcnic_adapter *adapter = netdev_priv(dev);
switch (sset) {
case ETH_SS_TEST:
return QLCNIC_TEST_LEN;
case ETH_SS_STATS:
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
- return QLCNIC_TOTAL_STATS_LEN;
+ len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN;
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_83xx_check(adapter))
+ return len;
+ return qlcnic_82xx_statistics();
default:
return -EOPNOTSUPP;
}
@@ -693,35 +823,36 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
static int qlcnic_irq_test(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int max_sds_rings = adapter->max_sds_rings;
- int ret;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
+ int ret, max_sds_rings = adapter->max_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_interrupt_test(netdev);
if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
return -EIO;
ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
if (ret)
- goto clear_it;
+ goto clear_diag_irq;
- adapter->ahw->diag_cnt = 0;
- memset(&cmd, 0, sizeof(cmd));
- cmd.req.cmd = QLCNIC_CDRP_CMD_INTRPT_TEST;
- cmd.req.arg1 = adapter->ahw->pci_func;
- qlcnic_issue_cmd(adapter, &cmd);
- ret = cmd.rsp.cmd;
+ ahw->diag_cnt = 0;
+ qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ cmd.req.arg[1] = ahw->pci_func;
+ ret = qlcnic_issue_cmd(adapter, &cmd);
if (ret)
goto done;
- msleep(10);
-
- ret = !adapter->ahw->diag_cnt;
+ usleep_range(1000, 12000);
+ ret = !ahw->diag_cnt;
done:
+ qlcnic_free_mbx_args(&cmd);
qlcnic_diag_free_res(netdev, max_sds_rings);
-clear_it:
+clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
@@ -750,7 +881,7 @@ int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
}
-static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
@@ -761,11 +892,10 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
-
adapter->ahw->diag_cnt = 0;
qlcnic_xmit_frame(skb, adapter->netdev);
-
loop = 0;
+
do {
msleep(1);
qlcnic_process_rcv_ring_diag(sds_ring);
@@ -776,42 +906,46 @@ static int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
dev_kfree_skb_any(skb);
if (!adapter->ahw->diag_cnt)
- QLCDB(adapter, DRV,
- "LB Test: packet #%d was not received\n", i + 1);
+ dev_warn(&adapter->pdev->dev,
+ "LB Test: packet #%d was not received\n",
+ i + 1);
else
cnt++;
}
if (cnt != i) {
- dev_warn(&adapter->pdev->dev, "LB Test failed\n");
- if (mode != QLCNIC_ILB_MODE) {
+ dev_err(&adapter->pdev->dev,
+ "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+ if (mode != QLCNIC_ILB_MODE)
dev_warn(&adapter->pdev->dev,
- "WARNING: Please make sure external"
- "loopback connector is plugged in\n");
- }
+ "WARNING: Please check loopback cable\n");
return -1;
}
return 0;
}
-static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int loop = 0;
int ret;
- if (!(adapter->ahw->capabilities &
- QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
- netdev_info(netdev, "Firmware is not loopback test capable\n");
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_loopback_test(netdev, mode);
+
+ if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware do not support loopback test\n");
return -EOPNOTSUPP;
}
- QLCDB(adapter, DRV, "%s loopback test in progress\n",
- mode == QLCNIC_ILB_MODE ? "internal" : "external");
- if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
- netdev_warn(netdev, "Loopback test not supported for non "
- "privilege function\n");
+ dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported in nonprivileged mode\n");
return 0;
}
@@ -823,12 +957,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
goto clear_it;
sds_ring = &adapter->recv_ctx->sds_rings[0];
-
ret = qlcnic_set_lb_mode(adapter, mode);
if (ret)
goto free_res;
- adapter->ahw->diag_cnt = 0;
+ ahw->diag_cnt = 0;
do {
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
@@ -841,11 +974,11 @@ static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
ret = adapter->ahw->diag_cnt;
goto free_res;
}
- } while (!QLCNIC_IS_LB_CONFIGURED(adapter->ahw->loopback_state));
+ } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
ret = qlcnic_do_lb_test(adapter, mode);
- qlcnic_clear_lb_mode(adapter);
+ qlcnic_clear_lb_mode(adapter, mode);
free_res:
qlcnic_diag_free_res(netdev, max_sds_rings);
@@ -878,20 +1011,18 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
- if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
- data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
- if (data[4])
- eth_test->flags |= ETH_TEST_FL_FAILED;
- eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
- }
+
+ data[4] = qlcnic_eeprom_test(dev);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
static void
-qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- int index, i, j;
+ int index, i, num_stats;
switch (stringset) {
case ETH_SS_TEST:
@@ -904,14 +1035,34 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
- for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
- memcpy(data + index * ETH_GSTRING_LEN,
- qlcnic_mac_stats_strings[j],
- ETH_GSTRING_LEN);
+ if (qlcnic_83xx_check(adapter)) {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_tx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_rx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ return;
+ } else {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
- for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
+ num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ for (i = 0; i < num_stats; index++, i++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_device_gstrings_stats[i],
ETH_GSTRING_LEN);
@@ -920,89 +1071,84 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
static void
-qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
+qlcnic_fill_stats(u64 *data, void *stats, int type)
{
- int ind = *index;
-
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
(struct qlcnic_mac_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
- data[ind++] =
- QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
- data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
} else if (type == QLCNIC_ESW_STATS) {
struct __qlcnic_esw_statistics *esw_stats =
(struct __qlcnic_esw_statistics *)stats;
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
- data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->errors);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
}
-
- *index = ind;
}
-static void
-qlcnic_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 * data)
+static void qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret;
-
- for (index = 0; index < QLCNIC_STATS_LEN; index++) {
- char *p =
- (char *)adapter +
- qlcnic_gstrings_stats[index].stat_offset;
- data[index] =
- (qlcnic_gstrings_stats[index].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ int index, ret, length, size;
+ char *p;
+
+ memset(data, 0, stats->n_stats * sizeof(u64));
+ length = QLCNIC_STATS_LEN;
+ for (index = 0; index < length; index++) {
+ p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
+ size = qlcnic_gstrings_stats[index].sizeof_stat;
+ *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
}
- /* Retrieve MAC statistics from firmware */
- memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
- qlcnic_get_mac_stats(adapter, &mac_stats);
- qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
+ if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->linkup)
+ qlcnic_83xx_get_stats(adapter, data);
+ return;
+ } else {
+ /* Retrieve MAC statistics from firmware */
+ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+ qlcnic_get_mac_stats(adapter, &mac_stats);
+ qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ }
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
@@ -1013,14 +1159,13 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
-
+ qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
return;
- qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
+ qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
}
static int qlcnic_set_led(struct net_device *dev,
@@ -1030,6 +1175,8 @@ static int qlcnic_set_led(struct net_device *dev,
int max_sds_rings = adapter->max_sds_rings;
int err = -EIO, active = 1;
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
@@ -1096,6 +1243,8 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ if (qlcnic_83xx_check(adapter))
+ return;
wol->supported = 0;
wol->wolopts = 0;
@@ -1114,8 +1263,10 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
if (!(wol_cfg & (1 << adapter->portnum)))
@@ -1307,7 +1458,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return 0;
}
netdev_info(netdev, "Forcing a FW dump\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
if (fw_dump->enable && fw_dump->tmpl_hdr) {
@@ -1327,7 +1478,7 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
return 0;
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
return 0;
case QLCNIC_SET_QUIESCENT:
@@ -1341,8 +1492,8 @@ qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
netdev_err(netdev, "FW dump not supported\n");
return -ENOTSUPP;
}
- for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
- if (val->flag == FW_DUMP_LEVELS[i]) {
+ for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
+ if (val->flag == qlcnic_fw_dump_level[i]) {
fw_dump->tmpl_hdr->drv_cap_mask =
val->flag;
netdev_info(netdev, "Driver mask changed to: 0x%x\n",
@@ -1386,10 +1537,3 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_dump_data = qlcnic_get_dump_data,
.set_dump = qlcnic_set_dump,
};
-
-const struct ethtool_ops qlcnic_ethtool_failed_ops = {
- .get_settings = qlcnic_get_settings,
- .get_drvinfo = qlcnic_get_drvinfo,
- .set_msglevel = qlcnic_set_msglevel,
- .get_msglevel = qlcnic_get_msglevel,
-};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 49cc1ac4f05..44197ca1456 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include "qlcnic_hw.h"
+
/*
* The basic unit of access when reading/writing control registers.
*/
@@ -387,9 +389,6 @@ enum {
#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
-/* Lock IDs for ROM lock */
-#define ROM_LOCK_DRIVER 0x0d417340
-
/******************************************************************************
*
* Definitions specific to M25P flash
@@ -449,13 +448,10 @@ enum {
#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
-#define QLCNIC_PCI_MN_2M (0)
-#define QLCNIC_PCI_MS_2M (0x80000)
#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
#define QLCNIC_PCI_CAMQM (0x04800000UL)
#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
-#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
@@ -491,7 +487,7 @@ enum {
#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
(QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
-
+#define MAX_CTL_CHECK 1000
#define TEST_AGT_CTRL (0x00)
#define TA_CTL_START BIT_0
@@ -499,44 +495,6 @@ enum {
#define TA_CTL_WRITE BIT_2
#define TA_CTL_BUSY BIT_3
-/*
- * Register offsets for MN
- */
-#define MIU_TEST_AGT_BASE (0x90)
-
-#define MIU_TEST_AGT_ADDR_LO (0x04)
-#define MIU_TEST_AGT_ADDR_HI (0x08)
-#define MIU_TEST_AGT_WRDATA_LO (0x10)
-#define MIU_TEST_AGT_WRDATA_HI (0x14)
-#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
-#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
-#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
-#define MIU_TEST_AGT_RDDATA_LO (0x18)
-#define MIU_TEST_AGT_RDDATA_HI (0x1c)
-#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
-#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
-#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
-
-#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
-#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
-
-/*
- * Register offsets for MS
- */
-#define SIU_TEST_AGT_BASE (0x60)
-
-#define SIU_TEST_AGT_ADDR_LO (0x04)
-#define SIU_TEST_AGT_ADDR_HI (0x18)
-#define SIU_TEST_AGT_WRDATA_LO (0x08)
-#define SIU_TEST_AGT_WRDATA_HI (0x0c)
-#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
-#define SIU_TEST_AGT_RDDATA_LO (0x10)
-#define SIU_TEST_AGT_RDDATA_HI (0x14)
-#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
-
-#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
-#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
-
/* XG Link status */
#define XG_LINK_UP 0x10
#define XG_LINK_DOWN 0x20
@@ -556,9 +514,6 @@ enum {
#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
-#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
-#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
-#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
@@ -568,28 +523,17 @@ enum {
#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+#define QLCNIC_CDRP_MAX_ARGS 4
+#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4)))
+
#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
-#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
-#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
-#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
-#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
-#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
-
#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
-#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
-
-#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
-
-#define CRB_V2P_0 (QLCNIC_REG(0x290))
-#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
-#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
-#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
* CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
@@ -616,11 +560,6 @@ enum {
/* Lock IDs for PHY lock */
#define PHY_LOCK_DRIVER 0x44524956
-/* Used for PS PCI Memory access */
-#define PCIX_PS_OP_ADDR_LO (0x10000)
-/* via CRB (PS side only) */
-#define PCIX_PS_OP_ADDR_HI (0x10004)
-
#define PCIX_INT_VECTOR (0x10100)
#define PCIX_INT_MASK (0x10104)
@@ -682,17 +621,6 @@ enum {
#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
-#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
-#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
-#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
-#define QLCNIC_CRB_DRV_ACTIVE (QLCNIC_CAM_RAM(0x138))
-#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
-
-#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
-#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
-#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
-#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
-#define QLCNIC_CRB_DEV_NPAR_STATE (QLCNIC_CAM_RAM(0x19c))
#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
@@ -711,7 +639,6 @@ enum {
#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
@@ -744,6 +671,9 @@ enum {
#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 45
+#define QLCNIC_MAX_MC_COUNT 38
+#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
+
#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
@@ -766,26 +696,13 @@ struct qlcnic_legacy_intr_set {
u32 pci_int_reg;
};
-#define QLCNIC_FW_API 0x1b216c
-#define QLCNIC_DRV_OP_MODE 0x1b2170
#define QLCNIC_MSIX_BASE 0x132110
#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
-/* FW dump defines */
-#define MIU_TEST_CTR 0x41000090
-#define MIU_TEST_ADDR_LO 0x41000094
-#define MIU_TEST_ADDR_HI 0x41000098
#define FLASH_ROM_WINDOW 0x42110030
#define FLASH_ROM_DATA 0x42150000
-
-static const u32 FW_DUMP_LEVELS[] = {
- 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
-
-static const u32 MIU_TEST_READ_DATA[] = {
- 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
-
#define QLCNIC_FW_DUMP_REG1 0x00130060
#define QLCNIC_FW_DUMP_REG2 0x001e0000
#define QLCNIC_FLASH_SEM2_LK 0x0013C010
@@ -796,7 +713,8 @@ static const u32 MIU_TEST_READ_DATA[] = {
enum {
QLCNIC_MGMT_FUNC = 0,
QLCNIC_PRIV_FUNC = 1,
- QLCNIC_NON_PRIV_FUNC = 2
+ QLCNIC_NON_PRIV_FUNC = 2,
+ QLCNIC_UNKNOWN_FUNC_MODE = 3
};
enum {
@@ -1013,6 +931,8 @@ enum {
#define QLCNIC_NIU_PROMISC_MODE 1
#define QLCNIC_NIU_ALLMULTI_MODE 2
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
struct crb_128M_2M_sub_block_map {
unsigned valid;
unsigned start_128M;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 7a6d5ebe4e0..325e11e1ce0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1,6 +1,6 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
@@ -344,21 +344,26 @@ qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
-static int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
{
u32 data;
if (qlcnic_82xx_check(adapter))
qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
- else
- return -EIO;
+ else {
+ data = qlcnic_83xx_rd_reg_indirect(adapter, addr);
+ if (data == -EIO)
+ return -EIO;
+ }
return data;
}
-static void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
}
static int
@@ -417,9 +422,8 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
return 0;
}
-static int
-qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
- __le16 vlan_id, unsigned op)
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ __le16 vlan_id, u8 op)
{
struct qlcnic_nic_req req;
struct qlcnic_mac_req *mac_req;
@@ -442,7 +446,29 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
}
-static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+ int err = -EINVAL;
+
+ /* Delete MAC from the existing list */
+ list_for_each(head, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ list_del(&cur->list);
+ kfree(cur);
+ return err;
+ }
+ }
+ return err;
+}
+
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct list_head *head;
struct qlcnic_mac_list_s *cur;
@@ -455,11 +481,9 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
}
cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
- if (cur == NULL) {
- dev_err(&adapter->netdev->dev,
- "failed to add mac address filter\n");
+ if (cur == NULL)
return -ENOMEM;
- }
+
memcpy(cur->mac_addr, addr, ETH_ALEN);
if (qlcnic_sre_macaddr_change(adapter,
@@ -506,17 +530,17 @@ void qlcnic_set_multi(struct net_device *netdev)
}
send_fw_cmd:
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) {
qlcnic_alloc_lb_filters_mem(adapter);
- adapter->mac_learn = 1;
+ adapter->drv_mac_learn = true;
} else {
- adapter->mac_learn = 0;
+ adapter->drv_mac_learn = false;
}
qlcnic_nic_set_promisc(adapter, mode);
}
-int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
struct qlcnic_nic_req req;
u64 word;
@@ -555,18 +579,20 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
int i;
+ unsigned long time;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
- hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
- {
- if (jiffies >
- (QLCNIC_FILTER_AGE * HZ + tmp_fil->ftime)) {
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
qlcnic_sre_macaddr_change(adapter,
- tmp_fil->faddr, tmp_fil->vlan_id,
- tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
- QLCNIC_MAC_DEL);
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -575,6 +601,21 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
}
}
}
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
+ head = &(adapter->rx_fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode)
+ {
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ spin_lock_bh(&adapter->rx_mac_learn_lock);
+ adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->rx_mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
}
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
@@ -583,14 +624,17 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
int i;
+ u8 cmd;
- for (i = 0; i < adapter->fhash.fmax; i++) {
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
head = &(adapter->fhash.fhead[i]);
-
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
- qlcnic_sre_macaddr_change(adapter, tmp_fil->faddr,
- tmp_fil->vlan_id, tmp_fil->vlan_id ?
- QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL);
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
spin_lock_bh(&adapter->mac_learn_lock);
adapter->fhash.fnum--;
hlist_del(&tmp_fil->fnode);
@@ -620,12 +664,13 @@ static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
return rv;
}
-int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
if (qlcnic_set_fw_loopback(adapter, mode))
return -EIO;
- if (qlcnic_nic_set_promisc(adapter, VPORT_MISS_MODE_ACCEPT_ALL)) {
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
qlcnic_set_fw_loopback(adapter, 0);
return -EIO;
}
@@ -634,11 +679,11 @@ int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
-void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
- int mode = VPORT_MISS_MODE_DROP;
struct net_device *netdev = adapter->netdev;
+ mode = VPORT_MISS_MODE_DROP;
qlcnic_set_fw_loopback(adapter, 0);
if (netdev->flags & IFF_PROMISC)
@@ -648,12 +693,13 @@ void qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter)
qlcnic_nic_set_promisc(adapter, mode);
msleep(1000);
+ return 0;
}
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
-int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
int rv;
@@ -675,10 +721,14 @@ int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
- return rv;
}
-int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+#define QLCNIC_ENABLE_IPV4_LRO 1
+#define QLCNIC_ENABLE_IPV6_LRO 2
+#define QLCNIC_NO_DEST_IPV4_CHECK (1 << 8)
+#define QLCNIC_NO_DEST_IPV6_CHECK (2 << 8)
+
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -694,7 +744,15 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64(enable);
+ word = 0;
+ if (enable) {
+ word = QLCNIC_ENABLE_IPV4_LRO | QLCNIC_NO_DEST_IPV4_CHECK;
+ if (adapter->ahw->capabilities2 & QLCNIC_FW_CAP2_HW_LRO_IPV6)
+ word |= QLCNIC_ENABLE_IPV6_LRO |
+ QLCNIC_NO_DEST_IPV6_CHECK;
+ }
+
+ req.words[0] = cpu_to_le64(word);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
@@ -734,9 +792,12 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
}
-#define RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10
+#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63)
+#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL
-int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
@@ -761,13 +822,19 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
* 7-6: hash_type_ipv6
* 8: enable
* 9: use indirection table
- * 47-10: reserved
- * 63-48: indirection table mask
+ * 10: type-c rss
+ * 11: udp rss
+ * 47-12: reserved
+ * 62-48: indirection table mask
+ * 63: feature flag
*/
- word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
- ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
((u64)(enable & 0x1) << 8) |
- ((0x7ULL) << 48);
+ ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) |
+ (u64)QLCNIC_ENABLE_TYPE_C_RSS |
+ (u64)QLCNIC_RSS_FEATURE_FLAG;
+
req.words[0] = cpu_to_le64(word);
for (i = 0; i < 5; i++)
req.words[i+1] = cpu_to_le64(key[i]);
@@ -779,7 +846,8 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
return rv;
}
-int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
{
struct qlcnic_nic_req req;
struct qlcnic_ipaddr *ipa;
@@ -801,23 +869,19 @@ int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, int cmd)
dev_err(&adapter->netdev->dev,
"could not notify %s IP 0x%x reuqest\n",
(cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
-
- return rv;
}
-int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
{
struct qlcnic_nic_req req;
u64 word;
int rv;
-
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
req.words[0] = cpu_to_le64(enable | (enable << 8));
-
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
dev_err(&adapter->netdev->dev,
@@ -882,7 +946,8 @@ netdev_features_t qlcnic_fix_features(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ qlcnic_82xx_check(adapter)) {
netdev_features_t changed = features ^ netdev->features;
features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
}
@@ -903,13 +968,15 @@ int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
if (!(changed & NETIF_F_LRO))
return 0;
- netdev->features = features ^ NETIF_F_LRO;
+ netdev->features ^= NETIF_F_LRO;
if (qlcnic_config_hw_lro(adapter, hw_lro))
return -EIO;
- if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
- return -EIO;
+ if (!hw_lro && qlcnic_82xx_check(adapter)) {
+ if (qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+ }
return 0;
}
@@ -981,8 +1048,8 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
return 0;
}
-int
-qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ u32 data)
{
unsigned long flags;
int rv;
@@ -1013,7 +1080,7 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
return -EIO;
}
-int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
{
unsigned long flags;
int rv;
@@ -1042,7 +1109,6 @@ int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
return -1;
}
-
void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
u32 offset)
{
@@ -1268,7 +1334,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
return ret;
}
-int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
{
int offset, board_type, magic;
struct pci_dev *pdev = adapter->pdev;
@@ -1341,7 +1407,7 @@ qlcnic_wol_supported(struct qlcnic_adapter *adapter)
return 0;
}
-int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
{
struct qlcnic_nic_req req;
int rv;
@@ -1353,7 +1419,7 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
- req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
req.words[1] = cpu_to_le64(state);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
@@ -1362,3 +1428,56 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
+
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ u32 func;
+ u32 msix_base;
+
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw->pci_func = func;
+}
+
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+}
+
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+}
+
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter)
+{
+ return qlcnic_pcie_sem_lock(adapter, 5, 0);
+}
+
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_pcie_sem_unlock(adapter, 5);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
new file mode 100644
index 00000000000..5b8749eda11
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -0,0 +1,194 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_HW_H
+#define __QLCNIC_HW_H
+
+/* Common registers in 83xx and 82xx */
+enum qlcnic_regs {
+ QLCNIC_PEG_HALT_STATUS1 = 0,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_FLASH_LOCK_OWNER,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_FW_VERSION_MAJOR,
+ QLCNIC_FW_VERSION_MINOR,
+ QLCNIC_FW_VERSION_SUB,
+ QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_FW_IMG_VALID,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_ASIC_TEMP,
+ QLCNIC_FW_API,
+ QLCNIC_DRV_OP_MODE,
+ QLCNIC_FLASH_LOCK,
+ QLCNIC_FLASH_UNLOCK,
+};
+
+/* Read from an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_RD32(a, addr) \
+ readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Write to an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_WR32(a, addr, value) \
+ writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Read from a direct address offset from BAR0, additional registers */
+#define QLCRDX(ahw, addr) \
+ readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))
+
+/* Write to a direct address offset from BAR0, additional registers */
+#define QLCWRX(ahw, addr, value) \
+ writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])))
+
+#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1
+#define QLCNIC_CMD_CONFIG_INTRPT 0x2
+#define QLCNIC_CMD_CREATE_RX_CTX 0x7
+#define QLCNIC_CMD_DESTROY_RX_CTX 0x8
+#define QLCNIC_CMD_CREATE_TX_CTX 0x9
+#define QLCNIC_CMD_DESTROY_TX_CTX 0xa
+#define QLCNIC_CMD_CONFIGURE_LRO 0xC
+#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD
+#define QLCNIC_CMD_GET_STATISTICS 0xF
+#define QLCNIC_CMD_INTRPT_TEST 0x11
+#define QLCNIC_CMD_SET_MTU 0x12
+#define QLCNIC_CMD_READ_PHY 0x13
+#define QLCNIC_CMD_WRITE_PHY 0x14
+#define QLCNIC_CMD_READ_HW_REG 0x15
+#define QLCNIC_CMD_GET_FLOW_CTL 0x16
+#define QLCNIC_CMD_SET_FLOW_CTL 0x17
+#define QLCNIC_CMD_READ_MAX_MTU 0x18
+#define QLCNIC_CMD_READ_MAX_LRO 0x19
+#define QLCNIC_CMD_MAC_ADDRESS 0x1f
+#define QLCNIC_CMD_GET_PCI_INFO 0x20
+#define QLCNIC_CMD_GET_NIC_INFO 0x21
+#define QLCNIC_CMD_SET_NIC_INFO 0x22
+#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24
+#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25
+#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26
+#define QLCNIC_CMD_SET_PORTMIRRORING 0x27
+#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28
+#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29
+#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a
+#define QLCNIC_CMD_CONFIG_PORT 0x2e
+#define QLCNIC_CMD_TEMP_SIZE 0x2f
+#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_GET_MAC_STATS 0x37
+#define QLCNIC_CMD_SET_DRV_VER 0x38
+#define QLCNIC_CMD_CONFIGURE_RSS 0x41
+#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
+#define QLCNIC_CMD_CONFIGURE_LED 0x44
+#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45
+#define QLCNIC_CMD_GET_LINK_EVENT 0x48
+#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
+#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
+#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
+#define QLCNIC_CMD_IDC_ACK 0x63
+#define QLCNIC_CMD_SET_PORT_CONFIG 0x66
+#define QLCNIC_CMD_GET_PORT_CONFIG 0x67
+#define QLCNIC_CMD_GET_LINK_STATUS 0x68
+#define QLCNIC_CMD_SET_LED_CONFIG 0x69
+#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
+#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+
+#define QLCNIC_INTRPT_INTX 1
+#define QLCNIC_INTRPT_MSIX 3
+#define QLCNIC_INTRPT_ADD 1
+#define QLCNIC_INTRPT_DEL 2
+
+#define QLCNIC_GET_CURRENT_MAC 1
+#define QLCNIC_SET_STATION_MAC 2
+#define QLCNIC_GET_DEFAULT_MAC 3
+#define QLCNIC_GET_FAC_DEF_MAC 4
+#define QLCNIC_SET_FAC_DEF_MAC 5
+
+#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_COMP_EVENT 0x8100
+#define QLCNIC_MBX_REQUEST_EVENT 0x8101
+#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
+#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
+
+struct qlcnic_mailbox_metadata {
+ u32 cmd;
+ u32 in_args;
+ u32 out_args;
+};
+
+/* Mailbox ownership */
+#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1))
+
+#define QLCNIC_SET_OWNER 1
+#define QLCNIC_CLR_OWNER 0
+#define QLCNIC_MBX_TIMEOUT 10000
+
+#define QLCNIC_MBX_RSP_OK 1
+#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+
+struct qlcnic_pci_info;
+struct qlcnic_info;
+struct qlcnic_cmd_args;
+struct ethtool_stats;
+struct pci_device_id;
+struct qlcnic_host_sds_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_hardware_context;
+struct qlcnic_adapter;
+
+int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev);
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+ u64 *uaddr, __le16 vlan_id);
+void qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32, int);
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *);
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *tx_ring, int);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
+int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *);
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index de79cde233d..d28336fc65a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1,15 +1,12 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/if_vlan.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
struct crb_addr_pair {
u32 addr;
@@ -166,13 +163,12 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
{
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
int ring;
recv_ctx = adapter->recv_ctx;
if (recv_ctx->rds_rings == NULL)
- goto skip_rds;
+ return;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -180,16 +176,6 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
rds_ring->rx_buf_arr = NULL;
}
kfree(recv_ctx->rds_rings);
-
-skip_rds:
- if (adapter->tx_ring == NULL)
- return;
-
- tx_ring = adapter->tx_ring;
- vfree(tx_ring->cmd_buf_arr);
- tx_ring->cmd_buf_arr = NULL;
- kfree(adapter->tx_ring);
- adapter->tx_ring = NULL;
}
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
@@ -197,39 +183,16 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_rx_buffer *rx_buf;
- int ring, i, size;
-
- struct qlcnic_cmd_buffer *cmd_buf_arr;
- struct net_device *netdev = adapter->netdev;
-
- size = sizeof(struct qlcnic_host_tx_ring);
- tx_ring = kzalloc(size, GFP_KERNEL);
- if (tx_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
- return -ENOMEM;
- }
- adapter->tx_ring = tx_ring;
-
- tx_ring->num_desc = adapter->num_txd;
- tx_ring->txq = netdev_get_tx_queue(netdev, 0);
-
- cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
- if (cmd_buf_arr == NULL) {
- dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
- goto err_out;
- }
- tx_ring->cmd_buf_arr = cmd_buf_arr;
+ int ring, i;
recv_ctx = adapter->recv_ctx;
- size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
- rds_ring = kzalloc(size, GFP_KERNEL);
- if (rds_ring == NULL) {
- dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
goto err_out;
- }
+
recv_ctx->rds_rings = rds_ring;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
@@ -255,11 +218,9 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
break;
}
rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
- if (rds_ring->rx_buf_arr == NULL) {
- dev_err(&netdev->dev, "Failed to allocate "
- "rx buffer ring %d\n", ring);
+ if (rds_ring->rx_buf_arr == NULL)
goto err_out;
- }
+
INIT_LIST_HEAD(&rds_ring->free_list);
/*
* Now go through all of them, set reference handles
@@ -327,7 +288,6 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
long done = 0;
cond_resched();
-
while (done == 0) {
done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
done &= 2;
@@ -416,8 +376,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
u32 off;
struct pci_dev *pdev = adapter->pdev;
- QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
- QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0);
/* Halt all the indiviual PEGs and other blocks */
/* disable all I2Q */
@@ -482,10 +442,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
}
buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
- if (buf == NULL) {
- dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ if (buf == NULL)
return -ENOMEM;
- }
for (i = 0; i < n; i++) {
if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
@@ -564,8 +522,8 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
msleep(1);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
- QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
return 0;
}
@@ -576,7 +534,7 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE);
switch (val) {
case PHAN_INITIALIZE_COMPLETE:
@@ -592,7 +550,8 @@ static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
} while (--retries);
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
out_err:
dev_err(&adapter->pdev->dev, "Command Peg initialization not "
@@ -607,7 +566,7 @@ qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
do {
- val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE);
if (val == PHAN_PEG_RCV_INITIALIZED)
return 0;
@@ -638,7 +597,7 @@ qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
if (err)
return err;
- QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
return err;
}
@@ -649,7 +608,7 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
int timeo;
u32 val;
- val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
val = QLC_DEV_GET_DRV(val, adapter->portnum);
if ((val & 0x3) != QLCNIC_TYPE_NIC) {
dev_err(&adapter->pdev->dev,
@@ -689,11 +648,9 @@ static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
}
entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
- flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
- if (flt_entry == NULL) {
- dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+ flt_entry = vzalloc(entry_size);
+ if (flt_entry == NULL)
return -EIO;
- }
ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
sizeof(struct qlcnic_flt_header),
@@ -1096,11 +1053,13 @@ qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
u32 heartbeat, ret = -EIO;
int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
- adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ adapter->heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
do {
msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
ret = QLCNIC_RCODE_SUCCESS;
break;
@@ -1270,7 +1229,7 @@ qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
return -EINVAL;
}
- QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 09aa310b619..6387e0cc3ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -1,3 +1,10 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
@@ -5,9 +12,6 @@
#include "qlcnic.h"
-#define QLCNIC_MAC_HASH(MAC)\
- ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
-
#define TX_ETHER_PKT 0x01
#define TX_TCP_PKT 0x02
#define TX_UDP_PKT 0x03
@@ -84,6 +88,8 @@
#define qlcnic_get_lro_sts_mss(sts_data1) \
((sts_data1 >> 32) & 0x0FFFF)
+#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
+
/* opcode field in status_desc */
#define QLCNIC_SYN_OFFLOAD 0x03
#define QLCNIC_RXPKT_DESC 0x04
@@ -91,18 +97,152 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
+#define QLCNIC_TX_POLL_BUDGET 128
+#define QLCNIC_TCP_HDR_SIZE 20
+#define QLCNIC_TCP_TS_OPTION_SIZE 12
+#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
+#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
+
+#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
+
/* for status field in status_desc */
#define STATUS_CKSUM_LOOP 0
#define STATUS_CKSUM_OK 2
-static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 uaddr, __le16 vlan_id,
- struct qlcnic_host_tx_ring *tx_ring)
+#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
+#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
+#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
+#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
+#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
+#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
+#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
+#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
+#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
+#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
+#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
+#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
+#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
+
+struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *, u16, u16);
+
+inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+inline void qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline u8 qlcnic_mac_hash(u64 mac)
+{
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff));
+}
+
+static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
+ u16 handle, u8 ring_id)
+{
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
+ return handle | (ring_id << 15);
+ else
+ return handle;
+}
+
+static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
+{
+ return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
+}
+
+void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
+ int loopback_pkt, __le16 vlan_id)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_node *tmp_hnode, *n;
+ struct hlist_head *head;
+ unsigned long time;
+ u64 src_addr = 0;
+ u8 hindex, found = 0, op;
+ int ret;
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+
+ if (loopback_pkt) {
+ if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
+ return;
+
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->rx_fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ time = tmp_fil->ftime;
+ if (jiffies > (QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ fil->ftime = jiffies;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ fil->vlan_id = vlan_id;
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->rx_fhash.fnum++;
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ } else {
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->rx_fhash.fhead[hindex]);
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ return;
+ }
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter,
+ (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ hlist_del(&(tmp_fil->fnode));
+ adapter->rx_fhash.fnum--;
+ }
+ }
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ }
+}
+
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+ __le16 vlan_id)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -128,14 +268,14 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
}
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
- struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_node *tmp_hnode, *n;
struct hlist_head *head;
+ struct net_device *netdev = adapter->netdev;
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
u64 src_addr = 0;
__le16 vlan_id = 0;
u8 hindex;
@@ -143,23 +283,23 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
return;
- if (adapter->fhash.fnum >= adapter->fhash.fmax)
+ if (adapter->fhash.fnum >= adapter->fhash.fmax) {
+ adapter->stats.mac_filter_limit_overrun++;
+ netdev_info(netdev, "Can not add more than %d mac addresses\n",
+ adapter->fhash.fmax);
return;
+ }
- /* Only NPAR capable devices support vlan based learning*/
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
- vlan_id = first_desc->vlan_TCI;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
- hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
+ hindex = qlcnic_mac_hash(src_addr) & (adapter->fhash.fbucket_size - 1);
head = &(adapter->fhash.fhead[hindex]);
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
-
+ tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
- qlcnic_change_filter(adapter, src_addr, vlan_id,
- tx_ring);
+ qlcnic_change_filter(adapter, &src_addr,
+ vlan_id);
tmp_fil->ftime = jiffies;
return;
}
@@ -169,17 +309,13 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
-
+ qlcnic_change_filter(adapter, &src_addr, vlan_id);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
-
spin_lock(&adapter->mac_learn_lock);
-
hlist_add_head(&(fil->fnode), head);
adapter->fhash.fnum++;
-
spin_unlock(&adapter->mac_learn_lock);
}
@@ -474,8 +610,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
goto unwind_buff;
- if (adapter->mac_learn)
- qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
+ if (adapter->drv_mac_learn)
+ qlcnic_send_filter(adapter, first_desc, skb);
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
@@ -528,8 +664,8 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
skb_reserve(skb, NET_IP_ALIGN);
- dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(pdev, dma)) {
adapter->stats.rx_dma_map_error++;
@@ -544,12 +680,13 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
}
static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring,
+ u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- uint32_t producer;
+ uint32_t producer, handle;
struct list_head *head;
if (!spin_trylock(&rds_ring->lock))
@@ -557,7 +694,6 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
producer = rds_ring->producer;
head = &rds_ring->free_list;
-
while (!list_empty(head)) {
buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
@@ -565,28 +701,29 @@ static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
break;
}
-
count++;
list_del(&buffer->list);
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter,
+ buffer->ref_handle, ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
producer = get_next_index(producer, rds_ring->num_desc);
}
-
if (count) {
rds_ring->producer = producer;
writel((producer - 1) & (rds_ring->num_desc - 1),
rds_ring->crb_rcv_producer);
}
-
spin_unlock(&rds_ring->lock);
}
-static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int budget)
{
u32 sw_consumer, hw_consumer;
int i, done, count = 0;
@@ -594,7 +731,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
if (!spin_trylock(&adapter->tx_clean_lock))
return 1;
@@ -615,22 +751,19 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
PCI_DMA_TODEVICE);
frag->dma = 0ULL;
}
-
adapter->stats.xmitfinished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
- if (++count >= MAX_STATUS_HANDLE)
+ if (++count >= budget)
break;
}
if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
-
smp_mb();
-
if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
netif_wake_queue(netdev);
@@ -654,7 +787,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
-
spin_unlock(&adapter->tx_clean_lock);
return done;
@@ -662,16 +794,15 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
static int qlcnic_poll(struct napi_struct *napi, int budget)
{
+ int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
- int tx_complete, work_done;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
-
- tx_complete = qlcnic_process_cmd_ring(adapter);
+ tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+ budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
@@ -804,26 +935,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
}
}
-static struct sk_buff *
-qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring, u16 index,
- u16 cksum)
+struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
{
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
- buffer = &rds_ring->rx_buf_arr[index];
-
+ buffer = &ring->rx_buf_arr[index];
if (unlikely(buffer->skb == NULL)) {
WARN_ON(1);
return NULL;
}
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size,
PCI_DMA_FROMDEVICE);
skb = buffer->skb;
-
if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
(cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
adapter->stats.csummed++;
@@ -832,6 +960,7 @@ qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
+
buffer->skb = NULL;
return skb;
@@ -871,8 +1000,8 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
- int index, length, cksum, pkt_offset;
- u16 vid = 0xffff;
+ int index, length, cksum, pkt_offset, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
@@ -892,6 +1021,14 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
if (length > rds_ring->skb_size)
skb_put(skb, rds_ring->skb_size);
else
@@ -933,10 +1070,11 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
struct tcphdr *th;
bool push, timestamp;
- int index, l2_hdr_offset, l4_hdr_offset;
- u16 lro_length, length, data_offset, vid = 0xffff;
+ int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
+ u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
u32 seq_number;
if (unlikely(ring > adapter->max_rds_rings))
@@ -961,6 +1099,14 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
if (timestamp)
data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
else
@@ -976,12 +1122,21 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
}
skb->protocol = eth_type_trans(skb, netdev);
- iph = (struct iphdr *)skb->data;
- th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
- length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
- iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
th->psh = push;
th->seq = htonl(seq_number);
length = skb->len;
@@ -1011,9 +1166,9 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
struct list_head *cur;
struct status_desc *desc;
struct qlcnic_rx_buffer *rxbuf;
+ int opcode, desc_cnt, count = 0;
u64 sts_data0, sts_data1;
- __le64 owner_phantom = cpu_to_le64(STATUS_OWNER_PHANTOM);
- int opcode, ring, desc_cnt, count = 0;
+ u8 ring;
u32 consumer = sds_ring->consumer;
while (count < max) {
@@ -1025,7 +1180,6 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
opcode = qlcnic_get_sts_opcode(sts_data0);
-
switch (opcode) {
case QLCNIC_RXPKT_DESC:
case QLCNIC_OLD_RXPKT_DESC:
@@ -1045,18 +1199,16 @@ int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
default:
goto skip;
}
-
WARN_ON(desc_cnt > 1);
if (likely(rxbuf))
list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
else
adapter->stats.null_rxbuf++;
-
skip:
for (; desc_cnt > 0; desc_cnt--) {
desc = &sds_ring->desc_head[consumer];
- desc->status_desc_data[0] = owner_phantom;
+ desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
consumer = get_next_index(consumer, sds_ring->num_desc);
}
count++;
@@ -1064,7 +1216,6 @@ skip:
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
-
if (!list_empty(&sds_ring->free_list[ring])) {
list_for_each(cur, &sds_ring->free_list[ring]) {
rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
@@ -1077,7 +1228,7 @@ skip:
spin_unlock(&rds_ring->lock);
}
- qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
}
if (count) {
@@ -1089,12 +1240,12 @@ skip:
}
void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
- struct qlcnic_host_rds_ring *rds_ring)
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
int count = 0;
- u32 producer;
+ u32 producer, handle;
struct list_head *head;
producer = rds_ring->producer;
@@ -1115,7 +1266,9 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
/* make a rcv descriptor */
pdesc = &rds_ring->desc_head[producer];
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
- pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
+ ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
producer = get_next_index(producer, rds_ring->num_desc);
}
@@ -1185,7 +1338,7 @@ static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
return;
}
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
struct status_desc *desc;
@@ -1222,26 +1375,8 @@ void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
writel(consumer, sds_ring->crb_sts_consumer);
}
-void qlcnic_fetch_mac(u32 off1, u32 off2, u8 alt_mac, u8 *mac)
-{
- u32 mac_low, mac_high;
- int i;
-
- mac_low = off1;
- mac_high = off2;
-
- if (alt_mac) {
- mac_low |= (mac_low >> 16) | (mac_high << 16);
- mac_high >>= 16;
- }
-
- for (i = 0; i < 2; i++)
- mac[i] = (u8)(mac_high >> ((1 - i) * 8));
- for (i = 2; i < 6; i++)
- mac[i] = (u8)(mac_low >> ((5 - i) * 8));
-}
-
-int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
{
int ring, max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1254,8 +1389,7 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
-
- if (ring == max_sds_rings - 1)
+ if (ring == adapter->max_sds_rings - 1)
netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
QLCNIC_NETDEV_WEIGHT / max_sds_rings);
else
@@ -1263,10 +1397,15 @@ int qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
QLCNIC_NETDEV_WEIGHT*2);
}
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
return 0;
}
-void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1278,9 +1417,10 @@ void qlcnic_napi_del(struct qlcnic_adapter *adapter)
}
qlcnic_free_sds_rings(adapter->recv_ctx);
+ qlcnic_free_tx_rings(adapter);
}
-void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1296,7 +1436,7 @@ void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
}
}
-void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
@@ -1312,3 +1452,481 @@ void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
napi_disable(&sds_ring->napi);
}
}
+
+#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
+#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
+
+static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
+{
+ if (lro_pkt)
+ return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
+ else
+ return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_83xx_pktln(sts_data[0]);
+ cksum = qlcnic_83xx_csum_status(sts_data[1]);
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index, is_lb_pkt;
+ u16 lro_length, length, data_offset, gso_size;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
+ l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
+ l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
+ push = qlcnic_83xx_is_psh_bit(sts_data[1]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (adapter->drv_mac_learn &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt,
+ cpu_to_le16(t_vid));
+ }
+ if (qlcnic_83xx_is_tstamp(sts_data[1]))
+ data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ th->psh = push;
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
+ skb_shinfo(skb)->gso_size = gso_size;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+ return buffer;
+}
+
+static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf = NULL;
+ u8 ring;
+ u64 sts_data[2];
+ int count = 0, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ break;
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+
+ switch (opcode) {
+ case QLC_83XX_REG_DESC:
+ rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
+ ring, sts_data);
+ break;
+ case QLC_83XX_LRO_DESC:
+ rxbuf = qlcnic_83xx_process_lro(adapter, ring,
+ sts_data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unkonwn opcode: 0x%x\n", opcode);
+ goto skip;
+ }
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ desc = &sds_ring->desc_head[consumer];
+ /* Reset the descriptor */
+ desc->status_desc_data[1] = 0;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ count++;
+ }
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+ return count;
+}
+
+static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+
+ budget = QLCNIC_TX_POLL_BUDGET;
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
+ qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_83xx_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+}
+
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, max_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ max_sds_rings = adapter->max_sds_rings;
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll,
+ QLCNIC_NETDEV_WEIGHT * 2);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_poll,
+ QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi,
+ qlcnic_83xx_msix_tx_poll,
+ QLCNIC_NETDEV_WEIGHT);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ length = qlcnic_83xx_pktln(sts_data[0]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data[2];
+ int ring, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ return;
+
+ ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d833f592789..5d5fd06c4b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1,24 +1,25 @@
/*
* QLogic qlcnic NIC Driver
- * Copyright (c) 2009-2010 QLogic Corporation
+ * Copyright (c) 2009-2013 QLogic Corporation
*
* See LICENSE.qlcnic for copyright and licensing details.
*/
-#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
-#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#include <linux/pci.h>
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -29,28 +30,28 @@ char qlcnic_driver_name[] = "qlcnic";
static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
"Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
-static struct workqueue_struct *qlcnic_wq;
static int qlcnic_mac_learn;
module_param(qlcnic_mac_learn, int, 0444);
-MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
+MODULE_PARM_DESC(qlcnic_mac_learn,
+ "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
-static int qlcnic_use_msi = 1;
+int qlcnic_use_msi = 1;
MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
module_param_named(use_msi, qlcnic_use_msi, int, 0444);
-static int qlcnic_use_msi_x = 1;
+int qlcnic_use_msi_x = 1;
MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
-static int qlcnic_auto_fw_reset = 1;
+int qlcnic_auto_fw_reset = 1;
MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
-static int qlcnic_load_fw_file;
+int qlcnic_load_fw_file;
MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
-static int qlcnic_config_npars;
+int qlcnic_config_npars;
module_param(qlcnic_config_npars, int, 0444);
MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
@@ -62,9 +63,6 @@ static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
static void qlcnic_fw_poll_work(struct work_struct *work);
-static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay);
-static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev);
#endif
@@ -77,9 +75,9 @@ static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
static irqreturn_t qlcnic_intr(int irq, void *data);
static irqreturn_t qlcnic_msi_intr(int irq, void *data);
static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
-static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
static int qlcnic_start_firmware(struct qlcnic_adapter *);
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
@@ -93,15 +91,24 @@ static int qlcnic_vlan_rx_del(struct net_device *, u16);
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
+ return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
+ else
+ return 1;
+}
+
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
-
static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
{0,}
};
@@ -120,6 +127,32 @@ static const u32 msi_tgt_status[8] = {
ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
};
+static const u32 qlcnic_reg_tbl[] = {
+ 0x1B20A8, /* PEG_HALT_STAT1 */
+ 0x1B20AC, /* PEG_HALT_STAT2 */
+ 0x1B20B0, /* FW_HEARTBEAT */
+ 0x1B2100, /* LOCK ID */
+ 0x1B2128, /* FW_CAPABILITIES */
+ 0x1B2138, /* drv active */
+ 0x1B2140, /* dev state */
+ 0x1B2144, /* drv state */
+ 0x1B2148, /* drv scratch */
+ 0x1B214C, /* dev partition info */
+ 0x1B2174, /* drv idc ver */
+ 0x1B2150, /* fw version major */
+ 0x1B2154, /* fw version minor */
+ 0x1B2158, /* fw version sub */
+ 0x1B219C, /* npar state */
+ 0x1B21FC, /* FW_IMG_VALID */
+ 0x1B2250, /* CMD_PEG_STATE */
+ 0x1B233C, /* RCV_PEG_STATE */
+ 0x1B23B4, /* ASIC TEMP */
+ 0x1B216C, /* FW api */
+ 0x1B2170, /* drv op mode */
+ 0x13C010, /* flash lock */
+ 0x13C014, /* flash unlock */
+};
+
static const struct qlcnic_board_info qlcnic_boards[] = {
{0x1077, 0x8020, 0x1077, 0x203,
"8200 Series Single Port 10GbE Converged Network Adapter"
@@ -143,6 +176,7 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+#define QLC_MAX_SDS_RINGS 8
static const
struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -164,35 +198,6 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
recv_ctx->sds_rings = NULL;
}
-static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
-{
- memset(&adapter->stats, 0, sizeof(adapter->stats));
-}
-
-static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
-{
- u32 control;
- int pos;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
- if (enable)
- control |= PCI_MSIX_FLAGS_ENABLE;
- else
- control = 0;
- pci_write_config_dword(pdev, pos, control);
- }
-}
-
-static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
-{
- int i;
-
- for (i = 0; i < count; i++)
- adapter->msix_entries[i].entry = i;
-}
-
static int
qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
@@ -204,12 +209,11 @@ qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
return -EIO;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
- if (!is_valid_ether_addr(netdev->perm_addr))
+ if (!is_valid_ether_addr(netdev->dev_addr))
dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
netdev->dev_addr);
@@ -225,7 +229,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return -EOPNOTSUPP;
if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
+ return -EINVAL;
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
netif_device_detach(netdev);
@@ -243,6 +247,85 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev, const unsigned char *addr)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EOPNOTSUPP;
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return err;
+ }
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = qlcnic_nic_del_mac(adapter, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(netdev, addr);
+ else
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 flags)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ pr_info("%s: FDB e-switch is not enabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (ether_addr_equal(addr, adapter->mac_addr))
+ return err;
+
+ if (is_unicast_ether_addr(addr))
+ err = qlcnic_nic_add_mac(adapter, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(netdev, addr);
+ else
+ err = -EINVAL;
+
+ return err;
+}
+
+static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
+ struct net_device *netdev, int idx)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!adapter->fdb_mac_learn) {
+ pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+
+ return idx;
+}
+
+static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -257,6 +340,9 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_tx_timeout = qlcnic_tx_timeout,
.ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
.ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
+ .ndo_fdb_add = qlcnic_fdb_add,
+ .ndo_fdb_del = qlcnic_fdb_del,
+ .ndo_fdb_dump = qlcnic_fdb_dump,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -267,50 +353,125 @@ static const struct net_device_ops qlcnic_netdev_failed_ops = {
};
static struct qlcnic_nic_template qlcnic_ops = {
- .config_bridged_mode = qlcnic_config_bridged_mode,
- .config_led = qlcnic_config_led,
- .start_firmware = qlcnic_start_firmware
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_82xx_config_led,
+ .start_firmware = qlcnic_82xx_start_firmware,
+ .request_reset = qlcnic_82xx_dev_request_reset,
+ .cancel_idc_work = qlcnic_82xx_cancel_idc_work,
+ .napi_add = qlcnic_82xx_napi_add,
+ .napi_del = qlcnic_82xx_napi_del,
+ .config_ipaddr = qlcnic_82xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr,
};
-static struct qlcnic_nic_template qlcnic_vf_ops = {
- .config_bridged_mode = qlcnicvf_config_bridged_mode,
- .config_led = qlcnicvf_config_led,
- .start_firmware = qlcnicvf_start_firmware
+struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
};
-static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+static struct qlcnic_hardware_ops qlcnic_hw_ops = {
+ .read_crb = qlcnic_82xx_read_crb,
+ .write_crb = qlcnic_82xx_write_crb,
+ .read_reg = qlcnic_82xx_hw_read_wx_2M,
+ .write_reg = qlcnic_82xx_hw_write_wx_2M,
+ .get_mac_address = qlcnic_82xx_get_mac_address,
+ .setup_intr = qlcnic_82xx_setup_intr,
+ .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_82xx_issue_cmd,
+ .get_func_no = qlcnic_82xx_get_func_no,
+ .api_lock = qlcnic_82xx_api_lock,
+ .api_unlock = qlcnic_82xx_api_unlock,
+ .add_sysfs = qlcnic_82xx_add_sysfs,
+ .remove_sysfs = qlcnic_82xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
+ .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .setup_link_event = qlcnic_82xx_linkevent_request,
+ .get_nic_info = qlcnic_82xx_get_nic_info,
+ .get_pci_info = qlcnic_82xx_get_pci_info,
+ .set_nic_info = qlcnic_82xx_set_nic_info,
+ .change_macvlan = qlcnic_82xx_sre_macaddr_change,
+ .napi_enable = qlcnic_82xx_napi_enable,
+ .napi_disable = qlcnic_82xx_napi_disable,
+ .config_intr_coal = qlcnic_82xx_config_intr_coalesce,
+ .config_rss = qlcnic_82xx_config_rss,
+ .config_hw_lro = qlcnic_82xx_config_hw_lro,
+ .config_loopback = qlcnic_82xx_set_lb_mode,
+ .clear_loopback = qlcnic_82xx_clear_lb_mode,
+ .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_82xx_change_filter,
+ .get_board_info = qlcnic_82xx_get_board_info,
+};
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1;
+ int err = -1, i;
+ int max_tx_rings;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
adapter->max_sds_rings = 1;
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
- qlcnic_set_msix_bit(pdev, 0);
if (adapter->ahw->msix_supported) {
enable_msix:
- qlcnic_init_msix_entries(adapter, num_msix);
+ for (i = 0; i < num_msix; i++)
+ adapter->msix_entries[i].entry = i;
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
- qlcnic_set_msix_bit(pdev, 1);
-
- adapter->max_sds_rings = num_msix;
-
+ if (qlcnic_83xx_check(adapter)) {
+ adapter->ahw->num_msix = num_msix;
+ /* subtract mail box and tx ring vectors */
+ max_tx_rings = adapter->max_drv_tx_rings;
+ adapter->max_sds_rings = num_msix -
+ max_tx_rings - 1;
+ } else {
+ adapter->max_sds_rings = num_msix;
+ }
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
- }
- if (err > 0) {
- num_msix = rounddown_pow_of_two(err);
- if (num_msix)
+ } else if (err > 0) {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
+ if (qlcnic_83xx_check(adapter)) {
+ if (err < QLC_83XX_MINIMUM_VECTOR)
+ return err;
+ err -= (adapter->max_drv_tx_rings + 1);
+ num_msix = rounddown_pow_of_two(err);
+ num_msix += (adapter->max_drv_tx_rings + 1);
+ } else {
+ num_msix = rounddown_pow_of_two(err);
+ }
+
+ if (num_msix) {
+ dev_info(&pdev->dev,
+ "Trying to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
goto enable_msix;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
}
}
+
return err;
}
-static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
{
+ int err = 0;
u32 offset, mask_reg;
const struct qlcnic_legacy_intr_set *legacy_intrp;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -323,8 +484,10 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
offset);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
- return;
+ return err;
}
+ if (qlcnic_use_msi || qlcnic_use_msi_x)
+ return -EOPNOTSUPP;
legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
@@ -336,32 +499,47 @@ static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
dev_info(&pdev->dev, "using legacy interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
+ return err;
}
-static void
-qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
{
- int num_msix;
+ int num_msix, err = 0;
- if (adapter->ahw->msix_supported) {
+ if (!num_intr)
+ num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
+
+ if (adapter->ahw->msix_supported)
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
- QLCNIC_DEF_NUM_STS_DESC_RINGS));
- } else
+ num_intr));
+ else
num_msix = 1;
- if (!qlcnic_enable_msix(adapter, num_msix))
- return;
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM || !err)
+ return err;
- qlcnic_enable_msi_legacy(adapter);
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+
+ return -EIO;
}
-static void
-qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
{
if (adapter->flags & QLCNIC_MSIX_ENABLED)
pci_disable_msix(adapter->pdev);
if (adapter->flags & QLCNIC_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ if (adapter->ahw->intr_tbl) {
+ vfree(adapter->ahw->intr_tbl);
+ adapter->ahw->intr_tbl = NULL;
+ }
}
static void
@@ -371,7 +549,36 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
iounmap(adapter->ahw->pci_base0);
}
-static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_pci_info *pci_info;
+ int ret;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ switch (adapter->ahw->port_type) {
+ case QLCNIC_GBE:
+ adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ break;
+ case QLCNIC_XGBE:
+ adapter->ahw->act_pci_func = QLCNIC_NIU_MAX_XG_PORTS;
+ break;
+ }
+ return 0;
+ }
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ kfree(pci_info);
+ return ret;
+}
+
+int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
int i, ret = 0, j = 0;
@@ -423,8 +630,11 @@ static int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
j++;
}
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_enable_eswitch(adapter, i, 1);
+ }
kfree(pci_info);
return 0;
@@ -462,40 +672,31 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
QLC_DEV_SET_DRV(0xf, id));
}
} else {
- data = QLCRD32(adapter, QLCNIC_DRV_OP_MODE);
+ data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
data = (data & ~QLC_DEV_SET_DRV(0xf, ahw->pci_func)) |
(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
ahw->pci_func));
}
- QLCWR32(adapter, QLCNIC_DRV_OP_MODE, data);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
qlcnic_api_unlock(adapter);
err_lock:
return ret;
}
-static void
-qlcnic_check_vf(struct qlcnic_adapter *adapter)
+static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
{
- void __iomem *msix_base_addr;
- void __iomem *priv_op;
- u32 func;
- u32 msix_base;
u32 op_mode, priv_level;
/* Determine FW API version */
- adapter->ahw->fw_hal_version = readl(adapter->ahw->pci_base0 +
- QLCNIC_FW_API);
+ adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FW_API);
/* Find PCI function number */
- pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
- msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
- msix_base = readl(msix_base_addr);
- func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
- adapter->ahw->pci_func = func;
+ qlcnic_get_func_no(adapter);
/* Determine function privilege level */
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
@@ -512,12 +713,16 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
}
#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
{
switch (dev_id) {
case PCI_DEVICE_ID_QLOGIC_QLE824X:
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ *bar = QLCNIC_83XX_BAR0_LENGTH;
+ break;
default:
*bar = 0;
}
@@ -547,6 +752,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
}
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
ahw->pci_base0 = mem_ptr0;
ahw->pci_len0 = pci_len0;
offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
@@ -581,19 +787,26 @@ static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
static void
qlcnic_check_options(struct qlcnic_adapter *adapter)
{
+ int err;
u32 fw_major, fw_minor, fw_build, prev_fw_version;
struct pci_dev *pdev = adapter->pdev;
- struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
prev_fw_version = adapter->fw_version;
- fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
- fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
- fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
- if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ return;
+ }
+ if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
if (fw_dump->tmpl_hdr == NULL ||
adapter->fw_version > prev_fw_version) {
if (fw_dump->tmpl_hdr)
@@ -604,8 +817,9 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
}
}
- dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
- fw_major, fw_minor, fw_build);
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
@@ -648,9 +862,19 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
adapter->ahw->capabilities = nic_info.capabilities;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ u32 temp;
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ adapter->ahw->capabilities2 = temp;
+ }
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
adapter->ahw->max_mtu = nic_info.max_mtu;
+ /* Disable NPAR for 83XX */
+ if (qlcnic_83xx_check(adapter))
+ return err;
+
if (adapter->ahw->capabilities & BIT_6)
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
else
@@ -709,7 +933,7 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
qlcnic_set_netdev_features(adapter, esw_cfg);
}
-static int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
@@ -730,14 +954,17 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- netdev_features_t features, vlan_features;
+ unsigned long features, vlan_features;
+
+ if (qlcnic_83xx_check(adapter))
+ return;
features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO);
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO);
vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
+ NETIF_F_IPV6_CSUM);
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
features |= (NETIF_F_TSO | NETIF_F_TSO6);
vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
@@ -747,12 +974,19 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
if (esw_cfg->offload_flags & BIT_0) {
netdev->features |= features;
- if (!(esw_cfg->offload_flags & BIT_1))
+ adapter->rx_csum = 1;
+ if (!(esw_cfg->offload_flags & BIT_1)) {
netdev->features &= ~NETIF_F_TSO;
- if (!(esw_cfg->offload_flags & BIT_2))
+ features &= ~NETIF_F_TSO;
+ }
+ if (!(esw_cfg->offload_flags & BIT_2)) {
netdev->features &= ~NETIF_F_TSO6;
+ features &= ~NETIF_F_TSO6;
+ }
} else {
netdev->features &= ~features;
+ features &= ~features;
+ adapter->rx_csum = 0;
}
netdev->vlan_features = (features & vlan_features);
@@ -761,7 +995,6 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
static int
qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
{
- void __iomem *priv_op;
u32 op_mode, priv_level;
int err = 0;
@@ -772,8 +1005,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
return 0;
- priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
- op_mode = readl(priv_op);
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (op_mode == QLC_DEV_DRV_DEFAULT)
@@ -805,7 +1037,7 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
return err;
}
-static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
{
struct qlcnic_esw_func_cfg esw_cfg;
struct qlcnic_npar_info *npar;
@@ -838,6 +1070,7 @@ static int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
return 0;
}
+
static int
qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
struct qlcnic_npar_info *npar, int pci_func)
@@ -861,7 +1094,7 @@ qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
{
int i, err;
struct qlcnic_npar_info *npar;
@@ -877,8 +1110,7 @@ static int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
npar = &adapter->npars[i];
pci_func = npar->pci_func;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- err = qlcnic_get_nic_info(adapter,
- &nic_info, pci_func);
+ err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (err)
return err;
nic_info.min_tx_bw = npar->min_bw;
@@ -909,10 +1141,12 @@ static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
return 0;
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
msleep(1000);
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
}
if (!npar_opt_timeo) {
dev_err(&adapter->pdev->dev,
@@ -944,8 +1178,7 @@ qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
return err;
}
-static int
-qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
{
int err;
@@ -985,9 +1218,8 @@ check_fw_status:
if (err)
goto err_out;
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
qlcnic_idc_debug_info(adapter, 1);
-
err = qlcnic_check_eswitch_mode(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
@@ -1005,7 +1237,7 @@ check_fw_status:
return 0;
err_out:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
dev_err(&adapter->pdev->dev, "Device state set to failed\n");
qlcnic_release_firmware(adapter);
@@ -1017,6 +1249,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
{
irq_handler_t handler;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
int err, ring;
unsigned long flags = 0;
@@ -1024,7 +1257,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- handler = qlcnic_tmp_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_tmp_intr;
if (!QLCNIC_IS_MSI_FAMILY(adapter))
flags |= IRQF_SHARED;
@@ -1035,20 +1269,44 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
handler = qlcnic_msi_intr;
else {
flags |= IRQF_SHARED;
- handler = qlcnic_intr;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_intr;
+ else
+ handler = qlcnic_83xx_intr;
}
}
adapter->irq = netdev->irq;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
- err = request_irq(sds_ring->irq, handler,
- flags, sds_ring->name, sds_ring);
- if (err)
- return err;
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ snprintf(sds_ring->name, sizeof(int) + IFNAMSIZ,
+ "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler, flags,
+ sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+ }
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ handler = qlcnic_msix_tx_intr;
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ snprintf(tx_ring->name, sizeof(int) + IFNAMSIZ,
+ "%s[%d]", netdev->name,
+ adapter->max_sds_rings + ring);
+ err = request_irq(tx_ring->irq, handler, flags,
+ tx_ring->name, tx_ring);
+ if (err)
+ return err;
+ }
+ }
}
-
return 0;
}
@@ -1057,21 +1315,48 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- free_irq(sds_ring->irq, sds_ring);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+ }
+ if (qlcnic_83xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->irq)
+ free_irq(tx_ring->irq, tx_ring);
+ }
+ }
}
}
-static int
-__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
{
- int ring;
- u32 capab2;
+ u32 capab = 0;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (adapter->ahw->capabilities2 &
+ QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ } else {
+ capab = adapter->ahw->capabilities;
+ if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+}
+int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1081,19 +1366,14 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return 0;
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
-
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
- capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
- if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
- adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
- }
+ qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
qlcnic_set_multi(netdev);
@@ -1118,10 +1398,7 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return 0;
}
-/* Usage: During resume and firmware recovery module.*/
-
-static int
-qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int err = 0;
@@ -1133,8 +1410,7 @@ qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return err;
}
-static void
-__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1166,8 +1442,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
/* Usage: During suspend and firmware recovery module */
-static void
-qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
rtnl_lock();
if (netif_running(netdev))
@@ -1176,7 +1451,7 @@ qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
}
-static int
+int
qlcnic_attach(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1222,8 +1497,7 @@ err_out_napi_del:
return err;
}
-static void
-qlcnic_detach(struct qlcnic_adapter *adapter)
+void qlcnic_detach(struct qlcnic_adapter *adapter)
{
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1272,21 +1546,9 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
int err = 0;
- adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
- GFP_KERNEL);
- if (!adapter->ahw) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- err = -ENOMEM;
- goto err_out;
- }
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
GFP_KERNEL);
if (!adapter->recv_ctx) {
- dev_err(&adapter->pdev->dev,
- "Failed to allocate recv ctx resources for adapter\n");
- kfree(adapter->ahw);
- adapter->ahw = NULL;
err = -ENOMEM;
goto err_out;
}
@@ -1294,6 +1556,8 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ /* clear stats */
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
return err;
}
@@ -1307,8 +1571,9 @@ static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
vfree(adapter->ahw->fw_dump.tmpl_hdr);
adapter->ahw->fw_dump.tmpl_hdr = NULL;
}
- kfree(adapter->ahw);
- adapter->ahw = NULL;
+
+ kfree(adapter->ahw->reset.buff);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
}
int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
@@ -1328,6 +1593,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->max_sds_rings = 1;
adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1344,7 +1610,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &adapter->recv_ctx->rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, rds_ring);
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
}
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
@@ -1382,6 +1648,7 @@ qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ dev_err(&adapter->pdev->dev, "%s:\n", __func__);
return 0;
}
@@ -1425,34 +1692,40 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int err;
struct pci_dev *pdev = adapter->pdev;
+ adapter->rx_csum = 1;
adapter->ahw->mc_enabled = 0;
- adapter->ahw->max_mc_count = 38;
+ adapter->ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
netdev->netdev_ops = &qlcnic_netdev_ops;
- netdev->watchdog_timeo = 5*HZ;
+ netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
qlcnic_change_mtu(netdev, netdev->mtu);
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
- netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_RX);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
- netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (pci_using_dac == 1)
- netdev->hw_features |= NETIF_F_HIGHDMA;
+ if (pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
- netdev->vlan_features = netdev->hw_features;
+ if (qlcnic_vlan_tx_check(adapter))
+ netdev->features |= (NETIF_F_HW_VLAN_TX);
- if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
- netdev->hw_features |= NETIF_F_HW_VLAN_TX;
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
- netdev->hw_features |= NETIF_F_LRO;
-
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+ netdev->features |= NETIF_F_LRO;
+ netdev->hw_features = netdev->features;
netdev->irq = adapter->msix_entries[0].vector;
err = register_netdev(netdev);
@@ -1480,17 +1753,61 @@ static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac)
return 0;
}
-static int
-qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
+void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
{
- adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
- GFP_KERNEL);
+ int ring;
+ struct qlcnic_host_tx_ring *tx_ring;
- if (adapter->msix_entries)
- return 0;
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring && tx_ring->cmd_buf_arr != NULL) {
+ vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
+ }
+ }
+ if (adapter->tx_ring != NULL)
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, vector, index;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+
+ tx_ring = kcalloc(adapter->max_drv_tx_rings,
+ sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
- dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
- return -ENOMEM;
+ adapter->tx_ring = tx_ring;
+
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, ring);
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ qlcnic_free_tx_rings(adapter);
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->adapter = adapter;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ index = adapter->max_sds_rings + ring;
+ vector = adapter->msix_entries[index].vector;
+ tx_ring->irq = vector;
+ }
+ }
+ }
+ return 0;
}
static int
@@ -1498,9 +1815,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct qlcnic_adapter *adapter = NULL;
+ struct qlcnic_hardware_context *ahw;
int err, pci_using_dac = -1;
- uint8_t revision_id;
- char board_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ u32 capab2;
+ char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
err = pci_enable_device(pdev);
if (err)
@@ -1522,10 +1840,27 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_enable_pcie_error_reporting(pdev);
+ ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
+ if (!ahw)
+ goto err_out_free_res;
+
+ if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) {
+ ahw->hw_ops = &qlcnic_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_reg_tbl;
+ } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ qlcnic_83xx_register_map(ahw);
+ } else {
+ goto err_out_free_hw_res;
+ }
+
+ err = qlcnic_setup_pci_map(pdev, ahw);
+ if (err)
+ goto err_out_free_hw_res;
+
netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
if (!netdev) {
err = -ENOMEM;
- goto err_out_free_res;
+ goto err_out_iounmap;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -1533,15 +1868,25 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
+ adapter->ahw = ahw;
+
+ adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (adapter->qlcnic_wq == NULL) {
+ dev_err(&pdev->dev, "Failed to create workqueue\n");
+ goto err_out_free_netdev;
+ }
err = qlcnic_alloc_adapter_resources(adapter);
if (err)
goto err_out_free_netdev;
adapter->dev_rst_time = jiffies;
- revision_id = pdev->revision;
- adapter->ahw->revision_id = revision_id;
- adapter->mac_learn = qlcnic_mac_learn;
+ adapter->ahw->revision_id = pdev->revision;
+ if (qlcnic_mac_learn == FDB_MAC_LEARN)
+ adapter->fdb_mac_learn = true;
+ else if (qlcnic_mac_learn == DRV_MAC_LEARN)
+ adapter->drv_mac_learn = true;
+ adapter->max_drv_tx_rings = 1;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
@@ -1549,31 +1894,32 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- err = qlcnic_setup_pci_map(pdev, adapter->ahw);
- if (err)
- goto err_out_free_hw;
- qlcnic_check_vf(adapter);
-
- /* This will be reset for mezz cards */
- adapter->portnum = adapter->ahw->pci_func;
-
- err = qlcnic_get_board_info(adapter);
- if (err) {
- dev_err(&pdev->dev, "Error getting board config info.\n");
- goto err_out_iounmap;
- }
-
- err = qlcnic_setup_idc_param(adapter);
- if (err)
- goto err_out_iounmap;
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
+ goto err_out_free_hw;
+ }
- adapter->flags |= QLCNIC_NEED_FLR;
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
+ goto err_out_free_hw;
- err = adapter->nic_ops->start_firmware(adapter);
- if (err) {
- dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
- "\t\tIf reboot doesn't help, try flashing the card\n");
- goto err_out_maintenance_mode;
+ adapter->flags |= QLCNIC_NEED_FLR;
+ } else if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_83xx_init(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "%s: failed\n", __func__);
+ goto err_out_free_hw;
+ }
+ } else {
+ dev_err(&pdev->dev,
+ "%s: failed. Please Reboot\n", __func__);
+ goto err_out_free_hw;
}
if (qlcnic_read_mac_addr(adapter))
@@ -1581,22 +1927,34 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
+
pr_info("%s: %s Board Chip rev 0x%x\n",
module_name(THIS_MODULE),
board_name, adapter->ahw->revision_id);
}
+ err = qlcnic_setup_intr(adapter, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
- qlcnic_clear_stats(adapter);
-
- err = qlcnic_alloc_msix_entries(adapter, adapter->ahw->max_rx_ques);
- if (err)
- goto err_out_decr_ref;
-
- qlcnic_setup_intr(adapter);
+ if (qlcnic_83xx_check(adapter)) {
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+ }
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
if (err)
- goto err_out_disable_msi;
+ goto err_out_disable_mbx_intr;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & QLCNIC_FW_CAPABILITY_2_OCBB)
+ qlcnic_fw_cmd_set_drv_version(adapter);
+ }
+ }
pci_set_drvdata(pdev, adapter);
@@ -1615,29 +1973,37 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- if (adapter->mac_learn)
+ if (qlcnic_get_act_pci_func(adapter))
+ goto err_out_disable_mbx_intr;
+
+ if (adapter->drv_mac_learn)
qlcnic_alloc_lb_filters_mem(adapter);
- qlcnic_create_diag_entries(adapter);
+ qlcnic_add_sysfs(adapter);
return 0;
+err_out_disable_mbx_intr:
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
err_out_disable_msi:
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
-
-err_out_decr_ref:
+ qlcnic_cancel_idc_work(adapter);
qlcnic_clr_all_drv_state(adapter, 0);
-err_out_iounmap:
- qlcnic_cleanup_pci_map(adapter);
-
err_out_free_hw:
qlcnic_free_adapter_resources(adapter);
err_out_free_netdev:
free_netdev(netdev);
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_hw_res:
+ kfree(ahw);
+
err_out_free_res:
pci_release_regions(pdev);
@@ -1645,24 +2011,13 @@ err_out_disable_pdev:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
-
-err_out_maintenance_mode:
- netdev->netdev_ops = &qlcnic_netdev_failed_ops;
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
- err = register_netdev(netdev);
- if (err) {
- dev_err(&pdev->dev, "failed to register net device\n");
- goto err_out_decr_ref;
- }
- pci_set_drvdata(pdev, adapter);
- qlcnic_create_diag_entries(adapter);
- return 0;
}
static void qlcnic_remove(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter;
struct net_device *netdev;
+ struct qlcnic_hardware_context *ahw;
adapter = pci_get_drvdata(pdev);
if (adapter == NULL)
@@ -1670,10 +2025,17 @@ static void qlcnic_remove(struct pci_dev *pdev)
netdev = adapter->netdev;
- qlcnic_cancel_fw_work(adapter);
+ qlcnic_cancel_idc_work(adapter);
+ ahw = adapter->ahw;
unregister_netdev(netdev);
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ }
+
qlcnic_detach(adapter);
if (adapter->npars != NULL)
@@ -1689,9 +2051,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_free_lb_filters_mem(adapter);
qlcnic_teardown_intr(adapter);
- kfree(adapter->msix_entries);
- qlcnic_remove_diag_entries(adapter);
+ qlcnic_remove_sysfs(adapter);
qlcnic_cleanup_pci_map(adapter);
@@ -1702,7 +2063,12 @@ static void qlcnic_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ if (adapter->qlcnic_wq) {
+ destroy_workqueue(adapter->qlcnic_wq);
+ adapter->qlcnic_wq = NULL;
+ }
qlcnic_free_adapter_resources(adapter);
+ kfree(ahw);
free_netdev(netdev);
}
static int __qlcnic_shutdown(struct pci_dev *pdev)
@@ -1713,7 +2079,7 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
netif_device_detach(netdev);
- qlcnic_cancel_fw_work(adapter);
+ qlcnic_cancel_idc_work(adapter);
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
@@ -1726,7 +2092,6 @@ static int __qlcnic_shutdown(struct pci_dev *pdev)
retval = pci_save_state(pdev);
if (retval)
return retval;
-
if (qlcnic_82xx_check(adapter)) {
if (qlcnic_wol_supported(adapter)) {
pci_enable_wake(pdev, PCI_D3cold, 1);
@@ -1774,7 +2139,7 @@ qlcnic_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
- err = adapter->nic_ops->start_firmware(adapter);
+ err = qlcnic_start_firmware(adapter);
if (err) {
dev_err(&pdev->dev, "failed to start firmware\n");
return err;
@@ -1797,14 +2162,8 @@ done:
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
int err;
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(netdev, "Device in FAILED state\n");
- return -EIO;
- }
-
netif_carrier_off(netdev);
err = qlcnic_attach(adapter);
@@ -1832,6 +2191,7 @@ static int qlcnic_close(struct net_device *netdev)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
__qlcnic_down(adapter, netdev);
+
return 0;
}
@@ -1839,22 +2199,53 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
int i;
+ struct net_device *netdev = adapter->netdev;
+ u32 filter_size = 0;
+ u16 act_pci_func = 0;
if (adapter->fhash.fmax && adapter->fhash.fhead)
return;
+ act_pci_func = adapter->ahw->act_pci_func;
spin_lock_init(&adapter->mac_learn_lock);
+ spin_lock_init(&adapter->rx_mac_learn_lock);
+
+ if (qlcnic_82xx_check(adapter)) {
+ filter_size = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
+ } else {
+ filter_size = QLC_83XX_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
+ }
+
+ head = kcalloc(adapter->fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
- head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
- GFP_KERNEL);
if (!head)
return;
- adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fmax = (filter_size / act_pci_func);
adapter->fhash.fhead = head;
- for (i = 0; i < adapter->fhash.fmax; i++)
+ netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
+ act_pci_func, adapter->fhash.fmax);
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++)
INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+
+ adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
+
+ head = kcalloc(adapter->rx_fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->rx_fhash.fmax = (filter_size / act_pci_func);
+ adapter->rx_fhash.fhead = head;
+
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
}
static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
@@ -1864,16 +2255,25 @@ static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
adapter->fhash.fhead = NULL;
adapter->fhash.fmax = 0;
+
+ if (adapter->rx_fhash.fmax && adapter->rx_fhash.fhead)
+ kfree(adapter->rx_fhash.fhead);
+
+ adapter->rx_fhash.fmax = 0;
+ adapter->rx_fhash.fhead = NULL;
}
-static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+int qlcnic_check_temp(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 temp_state, temp_val, temp = 0;
int rv = 0;
+ if (qlcnic_83xx_check(adapter))
+ temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+
if (qlcnic_82xx_check(adapter))
- temp = QLCRD32(adapter, CRB_TEMP_STATE);
+ temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
temp_state = qlcnic_get_temp_state(temp);
temp_val = qlcnic_get_temp_val(temp);
@@ -1933,7 +2333,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
return stats;
}
-static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
{
u32 status;
@@ -2009,6 +2409,14 @@ static irqreturn_t qlcnic_msix_intr(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
+{
+ struct qlcnic_host_tx_ring *tx_ring = data;
+
+ napi_schedule(&tx_ring->napi);
+ return IRQ_HANDLED;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void qlcnic_poll_controller(struct net_device *netdev)
{
@@ -2035,7 +2443,7 @@ qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
val |= encoding << 7;
val |= (jiffies - adapter->dev_rst_time) << 8;
- QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
adapter->dev_rst_time = jiffies;
}
@@ -2050,14 +2458,14 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
if (qlcnic_api_lock(adapter))
return -EIO;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
if (state == QLCNIC_DEV_NEED_RESET)
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
else if (state == QLCNIC_DEV_NEED_QUISCENT)
QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2072,9 +2480,9 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -EBUSY;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
@@ -2089,20 +2497,22 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
if (qlcnic_api_lock(adapter))
goto err;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
if (failed) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_FAILED);
dev_info(&adapter->pdev->dev,
"Device state set to Failed. Please Reboot\n");
} else if (!(val & 0x11111111))
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_COLD);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
qlcnic_api_unlock(adapter);
err:
@@ -2117,12 +2527,13 @@ static int
qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
{
int act, state, active_mask;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
- state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
- act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
- active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
+ active_mask = (~(1 << (ahw->pci_func * 4)));
act = act & active_mask;
}
@@ -2135,7 +2546,7 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
{
- u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+ u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
if (val != QLCNIC_DRV_IDC_VER) {
dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
@@ -2159,19 +2570,21 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
if (!(val & (1 << (portnum * 4)))) {
QLC_DEV_SET_REF_CNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
}
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Device state = %u\n", prev_state);
switch (prev_state) {
case QLCNIC_DEV_COLD:
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
- QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_DRV_IDC_VER);
qlcnic_idc_debug_info(adapter, 0);
qlcnic_api_unlock(adapter);
return 1;
@@ -2182,15 +2595,15 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
return ret;
case QLCNIC_DEV_NEED_RESET:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_NEED_QUISCENT:
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_QSCNT_RDY(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
break;
case QLCNIC_DEV_FAILED:
@@ -2207,7 +2620,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
do {
msleep(1000);
- prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (prev_state == QLCNIC_DEV_QUISCENT)
continue;
@@ -2222,9 +2635,9 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return -1;
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
QLC_DEV_CLR_RST_QSCNT(val, portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
ret = qlcnic_check_idc_ver(adapter);
qlcnic_api_unlock(adapter);
@@ -2243,7 +2656,7 @@ qlcnic_fwinit_work(struct work_struct *work)
if (qlcnic_api_lock(adapter))
goto err_ret;
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_QUISCENT ||
dev_state == QLCNIC_DEV_NEED_QUISCENT) {
qlcnic_api_unlock(adapter);
@@ -2272,17 +2685,19 @@ qlcnic_fwinit_work(struct work_struct *work)
if (!qlcnic_check_drv_state(adapter)) {
skip_ack_check:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (dev_state == QLCNIC_DEV_NEED_RESET) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
- QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCDB(adapter, DRV, "Restarting fw\n");
qlcnic_idc_debug_info(adapter, 0);
- val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DRV_STATE);
QLC_DEV_SET_RST_RDY(val, adapter->portnum);
- QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ QLC_SHARED_REG_WR32(adapter,
+ QLCNIC_CRB_DRV_STATE, val);
}
qlcnic_api_unlock(adapter);
@@ -2308,12 +2723,12 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
wait_npar:
- dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
switch (dev_state) {
case QLCNIC_DEV_READY:
- if (!adapter->nic_ops->start_firmware(adapter)) {
+ if (!qlcnic_start_firmware(adapter)) {
qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
adapter->fw_wait_cnt = 0;
return;
@@ -2350,7 +2765,7 @@ qlcnic_detach_work(struct work_struct *work)
} else
qlcnic_down(adapter, netdev);
- status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (status & QLCNIC_RCODE_FATAL_ERROR) {
dev_err(&adapter->pdev->dev,
@@ -2401,19 +2816,18 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
{
u32 state;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
if (state == QLCNIC_DEV_NPAR_NON_OPER)
return;
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
-/*Transit to RESET state from READY state only */
-void
-qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
{
u32 state, xg_val = 0, gb_val = 0;
@@ -2428,25 +2842,22 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev, "Pause control frames disabled"
" on all ports\n");
adapter->need_fw_reset = 1;
+
if (qlcnic_api_lock(adapter))
return;
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
- netdev_err(adapter->netdev,
- "Device is in FAILED state, Please Reboot\n");
- qlcnic_api_unlock(adapter);
- return;
- }
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_READY) {
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_NEED_RESET);
adapter->flags |= QLCNIC_FW_RESET_OWNER;
QLCDB(adapter, DRV, "NEED_RESET state set\n");
qlcnic_idc_debug_info(adapter, 0);
}
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
qlcnic_api_unlock(adapter);
}
@@ -2457,34 +2868,22 @@ qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
if (qlcnic_api_lock(adapter))
return;
- QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_OPER);
QLCDB(adapter, DRV, "NPAR operational state set\n");
qlcnic_api_unlock(adapter);
}
-static void
-qlcnic_schedule_work(struct qlcnic_adapter *adapter,
- work_func_t func, int delay)
+void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
{
if (test_bit(__QLCNIC_AER, &adapter->state))
return;
INIT_DELAYED_WORK(&adapter->fw_work, func);
- queue_delayed_work(qlcnic_wq, &adapter->fw_work,
- round_jiffies_relative(delay));
-}
-
-static void
-qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
-{
- while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- msleep(10);
-
- if (!adapter->fw_work.work.func)
- return;
-
- cancel_delayed_work_sync(&adapter->fw_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
}
static void
@@ -2496,7 +2895,8 @@ qlcnic_attach_work(struct work_struct *work)
u32 npar_state;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
- npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
qlcnic_clr_all_drv_state(adapter, 0);
else if (npar_state != QLCNIC_DEV_NPAR_OPER)
@@ -2536,16 +2936,16 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
goto detach;
if (adapter->need_fw_reset)
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_NEED_RESET) {
qlcnic_set_npar_non_operational(adapter);
adapter->need_fw_reset = 1;
} else if (state == QLCNIC_DEV_NEED_QUISCENT)
goto detach;
- heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
if (heartbeat != adapter->heartbeat) {
adapter->heartbeat = heartbeat;
adapter->fw_fail_cnt = 0;
@@ -2565,25 +2965,25 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
adapter->flags |= QLCNIC_FW_HANG;
- qlcnic_dev_request_reset(adapter);
+ qlcnic_dev_request_reset(adapter, 0);
if (qlcnic_auto_fw_reset)
clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
"PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
"PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n",
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1),
- QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ peg_status,
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
- peg_status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
@@ -2667,17 +3067,39 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
adapter->need_fw_reset = 1;
set_bit(__QLCNIC_START_FW, &adapter->state);
- QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
QLCDB(adapter, DRV, "Restarting fw\n");
}
qlcnic_api_unlock(adapter);
- err = adapter->nic_ops->start_firmware(adapter);
+ err = qlcnic_start_firmware(adapter);
if (err)
return err;
qlcnic_clr_drv_state(adapter);
- qlcnic_setup_intr(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = qlcnic_setup_intr(adapter, 0);
+
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ qlcnic_clr_all_drv_state(adapter, 1);
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ goto done;
+ }
+ }
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
@@ -2719,6 +3141,12 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ }
+
qlcnic_detach(adapter);
qlcnic_teardown_intr(adapter);
@@ -2738,12 +3166,13 @@ static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
static void qlcnic_io_resume(struct pci_dev *pdev)
{
+ u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
pci_cleanup_aer_uncorrect_error_status(pdev);
-
- if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
- test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
+ &adapter->state))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
FW_POLL_DELAY);
}
@@ -2776,39 +3205,59 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
+int qlcnic_validate_max_rss(u8 max_hw, u8 val)
{
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
- netdev_info(netdev, "no msix or msi support, hence no rss\n");
- return -EINVAL;
+ u32 max_allowed;
+
+ if (max_hw > QLC_MAX_SDS_RINGS) {
+ max_hw = QLC_MAX_SDS_RINGS;
+ pr_info("max rss reset to %d\n", QLC_MAX_SDS_RINGS);
}
- if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
- netdev_info(netdev, "rss_ring valid range [2 - %x] in "
- " powers of 2\n", max_hw);
+ max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+ num_online_cpus()));
+ if ((val > max_allowed) || (val < 2) || !is_power_of_2(val)) {
+ pr_info("rss_ring valid range [2 - %x] in powers of 2\n",
+ max_allowed);
return -EINVAL;
}
return 0;
-
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
{
+ int err;
struct net_device *netdev = adapter->netdev;
- int err = 0;
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
+
qlcnic_detach(adapter);
+
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
qlcnic_teardown_intr(adapter);
+ err = qlcnic_setup_intr(adapter, data);
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
- if (qlcnic_enable_msix(adapter, data)) {
- netdev_info(netdev, "failed setting max_rss; rss disabled\n");
- qlcnic_enable_msi_legacy(adapter);
+ if (qlcnic_83xx_check(adapter)) {
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ goto done;
+ }
}
if (netif_running(netdev)) {
@@ -2820,6 +3269,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
+ err = len;
done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
@@ -2858,8 +3308,7 @@ qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
in_dev_put(indev);
}
-static void
-qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct net_device *dev;
@@ -2867,12 +3316,14 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
qlcnic_config_indev_addr(adapter, netdev, event);
+ rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
dev = __vlan_find_dev_deep(netdev, vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
}
+ rcu_read_unlock();
}
static int qlcnic_netdev_event(struct notifier_block *this,
@@ -2940,9 +3391,11 @@ recheck:
switch (event) {
case NETDEV_UP:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+
break;
case NETDEV_DOWN:
qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+
break;
default:
break;
@@ -2960,11 +3413,10 @@ static struct notifier_block qlcnic_inetaddr_cb = {
.notifier_call = qlcnic_inetaddr_event,
};
#else
-static void
-qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
-static struct pci_error_handlers qlcnic_err_handler = {
+static const struct pci_error_handlers qlcnic_err_handler = {
.error_detected = qlcnic_io_error_detected,
.slot_reset = qlcnic_io_slot_reset,
.resume = qlcnic_io_resume,
@@ -2990,12 +3442,6 @@ static int __init qlcnic_init_module(void)
printk(KERN_INFO "%s\n", qlcnic_driver_string);
- qlcnic_wq = create_singlethread_workqueue("qlcnic");
- if (qlcnic_wq == NULL) {
- printk(KERN_ERR "qlcnic: cannot create workqueue\n");
- return -ENOMEM;
- }
-
#ifdef CONFIG_INET
register_netdevice_notifier(&qlcnic_netdev_cb);
register_inetaddr_notifier(&qlcnic_inetaddr_cb);
@@ -3007,7 +3453,6 @@ static int __init qlcnic_init_module(void)
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
return ret;
@@ -3017,14 +3462,12 @@ module_init(qlcnic_init_module);
static void __exit qlcnic_exit_module(void)
{
-
pci_unregister_driver(&qlcnic_driver);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
unregister_netdevice_notifier(&qlcnic_netdev_cb);
#endif
- destroy_workqueue(qlcnic_wq);
}
module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 0b8d8625834..abbd22c814a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1,8 +1,25 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include "qlcnic.h"
#include "qlcnic_hdr.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_hw.h"
#include <net/ip.h>
+#define QLC_83XX_MINIDUMP_FLASH 0x520000
+#define QLC_83XX_OCM_INDEX 3
+#define QLC_83XX_PCI_INDEX 0
+
+static const u32 qlcnic_ms_read_data[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
+};
+
#define QLCNIC_DUMP_WCRB BIT_0
#define QLCNIC_DUMP_RWCRB BIT_1
#define QLCNIC_DUMP_ANDCRB BIT_2
@@ -102,16 +119,55 @@ struct __queue {
u8 rsvd3[2];
} __packed;
+struct __pollrd {
+ u32 sel_addr;
+ u32 read_addr;
+ u32 sel_val;
+ u16 sel_val_stride;
+ u16 no_ops;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 data_size;
+ u8 rsvd[4];
+} __packed;
+
+struct __mux2 {
+ u32 sel_addr1;
+ u32 sel_addr2;
+ u32 sel_val1;
+ u32 sel_val2;
+ u32 no_ops;
+ u32 sel_val_mask;
+ u32 read_addr;
+ u8 sel_val_stride;
+ u8 data_size;
+ u8 rsvd[2];
+} __packed;
+
+struct __pollrdmwr {
+ u32 addr1;
+ u32 addr2;
+ u32 val1;
+ u32 val2;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 mod_mask;
+ u32 data_size;
+} __packed;
+
struct qlcnic_dump_entry {
struct qlcnic_common_entry_hdr hdr;
union {
- struct __crb crb;
- struct __cache cache;
- struct __ocm ocm;
- struct __mem mem;
- struct __mux mux;
- struct __queue que;
- struct __ctrl ctrl;
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ struct __pollrdmwr pollrdmwr;
+ struct __mux2 mux2;
+ struct __pollrd pollrd;
} region;
} __packed;
@@ -131,6 +187,9 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_L2_ITAG = 22,
QLCNIC_DUMP_L2_DATA = 23,
QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_POLL_RD = 35,
+ QLCNIC_READ_MUX2 = 36,
+ QLCNIC_READ_POLLRDMWR = 37,
QLCNIC_DUMP_READ_ROM = 71,
QLCNIC_DUMP_READ_MEM = 72,
QLCNIC_DUMP_READ_CTRL = 98,
@@ -144,46 +203,17 @@ struct qlcnic_dump_operations {
__le32 *);
};
-static void qlcnic_read_dump_reg(u32 addr, void __iomem *bar0, u32 *data)
-{
- u32 dest;
- void __iomem *window_reg;
-
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- *data = readl(window_reg);
-}
-
-static void qlcnic_write_dump_reg(u32 addr, void __iomem *bar0, u32 data)
-{
- u32 dest;
- void __iomem *window_reg;
-
- dest = addr & 0xFFFF0000;
- window_reg = bar0 + QLCNIC_FW_DUMP_REG1;
- writel(dest, window_reg);
- readl(window_reg);
- window_reg = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
- writel(data, window_reg);
- readl(window_reg);
-}
-
-/* FW dump related functions */
static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
int i;
u32 addr, data;
struct __crb *crb = &entry->region.crb;
- void __iomem *base = adapter->ahw->pci_base0;
addr = crb->addr;
for (i = 0; i < crb->no_ops; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(addr);
*buffer++ = cpu_to_le32(data);
addr += crb->stride;
@@ -195,7 +225,6 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
int i, k, timeout = 0;
- void __iomem *base = adapter->ahw->pci_base0;
u32 addr, data;
u8 no_ops;
struct __ctrl *ctr = &entry->region.ctrl;
@@ -211,28 +240,28 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
continue;
switch (1 << k) {
case QLCNIC_DUMP_WCRB:
- qlcnic_write_dump_reg(addr, base, ctr->val1);
+ qlcnic_ind_wr(adapter, addr, ctr->val1);
break;
case QLCNIC_DUMP_RWCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base, data);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_ANDCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data & ctr->val2);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data & ctr->val2));
break;
case QLCNIC_DUMP_ORCRB:
- qlcnic_read_dump_reg(addr, base, &data);
- qlcnic_write_dump_reg(addr, base,
- data | ctr->val3);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data | ctr->val3));
break;
case QLCNIC_DUMP_POLLCRB:
while (timeout <= ctr->timeout) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
if ((data & ctr->val2) == ctr->val1)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout++;
}
if (timeout > ctr->timeout) {
@@ -244,7 +273,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
case QLCNIC_DUMP_RD_SAVE:
if (ctr->index_a)
addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
t_hdr->saved_state[ctr->index_v] = data;
break;
case QLCNIC_DUMP_WRT_SAVED:
@@ -254,7 +283,7 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
data = ctr->val1;
if (ctr->index_a)
addr = t_hdr->saved_state[ctr->index_a];
- qlcnic_write_dump_reg(addr, base, data);
+ qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_MOD_SAVE_ST:
data = t_hdr->saved_state[ctr->index_v];
@@ -283,12 +312,11 @@ static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
int loop;
u32 val, data = 0;
struct __mux *mux = &entry->region.mux;
- void __iomem *base = adapter->ahw->pci_base0;
val = mux->val;
for (loop = 0; loop < mux->no_ops; loop++) {
- qlcnic_write_dump_reg(mux->addr, base, val);
- qlcnic_read_dump_reg(mux->read_addr, base, &data);
+ qlcnic_ind_wr(adapter, mux->addr, val);
+ data = qlcnic_ind_rd(adapter, mux->read_addr);
*buffer++ = cpu_to_le32(val);
*buffer++ = cpu_to_le32(data);
val += mux->val_stride;
@@ -301,17 +329,16 @@ static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
{
int i, loop;
u32 cnt, addr, data, que_id = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __queue *que = &entry->region.que;
addr = que->read_addr;
cnt = que->read_addr_cnt;
for (loop = 0; loop < que->no_ops; loop++) {
- qlcnic_write_dump_reg(que->sel_addr, base, que_id);
+ qlcnic_ind_wr(adapter, que->sel_addr, que_id);
addr = que->read_addr;
for (i = 0; i < cnt; i++) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += que->read_addr_stride;
}
@@ -343,27 +370,27 @@ static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
int i, count = 0;
u32 fl_addr, size, val, lck_val, addr;
struct __mem *rom = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
fl_addr = rom->addr;
- size = rom->size/4;
+ size = rom->size / 4;
lock_try:
- lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+ lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
if (!lck_val && count < MAX_CTL_CHECK) {
- msleep(10);
+ usleep_range(10000, 11000);
count++;
goto lock_try;
}
- writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ adapter->ahw->pci_func);
for (i = 0; i < size; i++) {
addr = fl_addr & 0xFFFF0000;
- qlcnic_write_dump_reg(FLASH_ROM_WINDOW, base, addr);
+ qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
addr = LSW(fl_addr) + FLASH_ROM_DATA;
- qlcnic_read_dump_reg(addr, base, &val);
+ val = qlcnic_ind_rd(adapter, addr);
fl_addr += 4;
*buffer++ = cpu_to_le32(val);
}
- readl(base + QLCNIC_FLASH_SEM2_ULK);
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
return rom->size;
}
@@ -372,18 +399,17 @@ static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
{
int i;
u32 cnt, val, data, addr;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l1 = &entry->region.cache;
val = l1->init_tag_val;
for (i = 0; i < l1->no_ops; i++) {
- qlcnic_write_dump_reg(l1->addr, base, val);
- qlcnic_write_dump_reg(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+ qlcnic_ind_wr(adapter, l1->addr, val);
+ qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
addr = l1->read_addr;
cnt = l1->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l1->read_addr_stride;
cnt--;
@@ -399,7 +425,6 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
int i;
u32 cnt, val, data, addr;
u8 poll_mask, poll_to, time_out = 0;
- void __iomem *base = adapter->ahw->pci_base0;
struct __cache *l2 = &entry->region.cache;
val = l2->init_tag_val;
@@ -407,17 +432,17 @@ static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
poll_to = MSB(MSW(l2->ctrl_val));
for (i = 0; i < l2->no_ops; i++) {
- qlcnic_write_dump_reg(l2->addr, base, val);
+ qlcnic_ind_wr(adapter, l2->addr, val);
if (LSW(l2->ctrl_val))
- qlcnic_write_dump_reg(l2->ctrl_addr, base,
- LSW(l2->ctrl_val));
+ qlcnic_ind_wr(adapter, l2->ctrl_addr,
+ LSW(l2->ctrl_val));
if (!poll_mask)
goto skip_poll;
do {
- qlcnic_read_dump_reg(l2->ctrl_addr, base, &data);
+ data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
if (!(data & poll_mask))
break;
- msleep(1);
+ usleep_range(1000, 2000);
time_out++;
} while (time_out <= poll_to);
@@ -431,7 +456,7 @@ skip_poll:
addr = l2->read_addr;
cnt = l2->read_addr_num;
while (cnt) {
- qlcnic_read_dump_reg(addr, base, &data);
+ data = qlcnic_ind_rd(adapter, addr);
*buffer++ = cpu_to_le32(data);
addr += l2->read_addr_stride;
cnt--;
@@ -447,7 +472,6 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
u32 addr, data, test, ret = 0;
int i, reg_read;
struct __mem *mem = &entry->region.mem;
- void __iomem *base = adapter->ahw->pci_base0;
reg_read = mem->size;
addr = mem->addr;
@@ -462,13 +486,12 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
mutex_lock(&adapter->ahw->mem_lock);
while (reg_read != 0) {
- qlcnic_write_dump_reg(MIU_TEST_ADDR_LO, base, addr);
- qlcnic_write_dump_reg(MIU_TEST_ADDR_HI, base, 0);
- qlcnic_write_dump_reg(MIU_TEST_CTR, base,
- TA_CTL_ENABLE | TA_CTL_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
for (i = 0; i < MAX_CTL_CHECK; i++) {
- qlcnic_read_dump_reg(MIU_TEST_CTR, base, &test);
+ test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if (!(test & TA_CTL_BUSY))
break;
}
@@ -481,8 +504,7 @@ static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
}
}
for (i = 0; i < 4; i++) {
- qlcnic_read_dump_reg(MIU_TEST_READ_DATA[i], base,
- &data);
+ data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
*buffer++ = cpu_to_le32(data);
}
addr += 16;
@@ -501,48 +523,388 @@ static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
return 0;
}
-static const struct qlcnic_dump_operations fw_dump_ops[] = {
- { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
- { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
- { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
- { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
- { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
- { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
- { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
- { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
- { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
- { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
- { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
- { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
-};
-
-/* Walk the template and collect dump for each entry in the dump template */
-static int
-qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
- u32 size)
+static int qlcnic_valid_dump_entry(struct device *dev,
+ struct qlcnic_dump_entry *entry, u32 size)
{
int ret = 1;
if (size != entry->hdr.cap_size) {
- dev_info(dev,
- "Invalid dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
- entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
- dev_info(dev, "Aborting further dump capture\n");
+ dev_err(dev,
+ "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size,
+ entry->hdr.cap_size);
ret = 0;
}
return ret;
}
+static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry,
+ __le32 *buffer)
+{
+ struct __pollrdmwr *poll = &entry->region.pollrdmwr;
+ u32 data, wait_count, poll_wait, temp;
+
+ poll_wait = poll->poll_wait;
+
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((data & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
+ qlcnic_ind_wr(adapter, poll->addr2, data);
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ temp = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((temp & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ *buffer++ = cpu_to_le32(poll->addr2);
+ *buffer++ = cpu_to_le32(data);
+
+ return 2 * sizeof(u32);
+
+}
+
+static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __pollrd *pollrd = &entry->region.pollrd;
+ u32 data, wait_count, poll_wait, sel_val;
+ int i;
+
+ poll_wait = pollrd->poll_wait;
+ sel_val = pollrd->sel_val;
+
+ for (i = 0; i < pollrd->no_ops; i++) {
+ qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
+ wait_count = 0;
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
+ if ((data & pollrd->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, pollrd->read_addr);
+ *buffer++ = cpu_to_le32(sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val += pollrd->sel_val_stride;
+ }
+ return pollrd->no_ops * (2 * sizeof(u32));
+}
+
+static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __mux2 *mux2 = &entry->region.mux2;
+ u32 data;
+ u32 t_sel_val, sel_val1, sel_val2;
+ int i;
+
+ sel_val1 = mux2->sel_val1;
+ sel_val2 = mux2->sel_val2;
+
+ for (i = 0; i < mux2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val1 += mux2->sel_val_stride;
+ sel_val2 += mux2->sel_val_stride;
+ }
+
+ return mux2->no_ops * (4 * sizeof(u32));
+}
+
+static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ u32 fl_addr, size;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+
+ if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
+ (u8 *)buffer, size))
+ return rom->size;
+
+ return 0;
+}
+
+static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
+ {QLCNIC_READ_MUX2, qlcnic_read_mux2},
+ {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u8 *buffer, u32 size)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ buffer, size / sizeof(u32));
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return ret;
+}
+
+static int
+qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ (u8 *)&tmp_hdr, size);
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ cmd->rsp.arg[2] = tmp_hdr.size;
+ cmd->rsp.arg[3] = tmp_hdr.version;
+
+ return ret;
+}
+
+static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ u32 *version, u32 *temp_size,
+ u8 *use_flash_temp)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
+ return -ENOMEM;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
+ qlcnic_free_mbx_args(&cmd);
+ return -EIO;
+ }
+ *use_flash_temp = 1;
+ }
+
+ *temp_size = cmd.rsp.arg[2];
+ *version = cmd.rsp.arg[3];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (!(*temp_size))
+ return -EIO;
+
+ return 0;
+}
+
+static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u32 *buffer, u32 temp_size)
+{
+ int err = 0, i;
+ void *tmp_addr;
+ __le32 *tmp_buf;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t tmp_addr_t = 0;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get memory for FW dump template\n");
+ return -ENOMEM;
+ }
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ cmd.req.arg[1] = LSD(tmp_addr_t);
+ cmd.req.arg[2] = MSD(tmp_addr_t);
+ cmd.req.arg[3] = temp_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ tmp_buf = tmp_addr;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < temp_size / sizeof(u32); i++)
+ *buffer++ = __le32_to_cpu(*tmp_buf++);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+free_mem:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+
+ return err;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp_size = 0;
+ u32 version, csum, *tmp_buf;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_template_hdr *tmpl_hdr;
+ u8 use_flash_temp = 0;
+
+ ahw = adapter->ahw;
+
+ err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
+ &use_flash_temp);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get template size %d\n", err);
+ return -EIO;
+ }
+
+ ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
+ if (!ahw->fw_dump.tmpl_hdr)
+ return -ENOMEM;
+
+ tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
+ if (use_flash_temp)
+ goto flash_temp;
+
+ err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
+
+ if (err) {
+flash_temp:
+ err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
+ temp_size);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get minidump template header %d\n",
+ err);
+ vfree(ahw->fw_dump.tmpl_hdr);
+ ahw->fw_dump.tmpl_hdr = NULL;
+ return -EIO;
+ }
+ }
+
+ csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
+
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ vfree(ahw->fw_dump.tmpl_hdr);
+ ahw->fw_dump.tmpl_hdr = NULL;
+ return -EIO;
+ }
+
+ tmpl_hdr = ahw->fw_dump.tmpl_hdr;
+ tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ ahw->fw_dump.enable = 1;
+
+ return 0;
+}
+
int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
{
__le32 *buffer;
+ u32 ocm_window;
char mesg[64];
char *msg[] = {mesg, NULL};
int i, k, ops_cnt, ops_index, dump_size = 0;
@@ -550,12 +912,23 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
struct qlcnic_dump_entry *entry;
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+ static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_hardware_context *ahw;
+
+ ahw = adapter->ahw;
+
+ if (!fw_dump->enable) {
+ dev_info(&adapter->pdev->dev, "Dump not enabled\n");
+ return -EIO;
+ }
if (fw_dump->clr) {
dev_info(&adapter->pdev->dev,
"Previous dump not cleared, not capturing dump\n");
return -EIO;
}
+
+ netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
/* Calculate the size for dump data area only */
for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
if (i & tmpl_hdr->drv_cap_mask)
@@ -564,20 +937,27 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
return -EIO;
fw_dump->data = vzalloc(dump_size);
- if (!fw_dump->data) {
- dev_info(&adapter->pdev->dev,
- "Unable to allocate (%d KB) for fw dump\n",
- dump_size / 1024);
+ if (!fw_dump->data)
return -ENOMEM;
- }
+
buffer = fw_dump->data;
fw_dump->size = dump_size;
no_entries = tmpl_hdr->num_entries;
- ops_cnt = ARRAY_SIZE(fw_dump_ops);
entry_offset = tmpl_hdr->offset;
tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
tmpl_hdr->sys_info[1] = adapter->fw_version;
+ if (qlcnic_82xx_check(adapter)) {
+ ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
+ fw_dump_ops = qlcnic_fw_dump_ops;
+ } else {
+ ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
+ fw_dump_ops = qlcnic_83xx_fw_dump_ops;
+ ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
+ tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ }
+
for (i = 0; i < no_entries; i++) {
entry = (void *)tmpl_hdr + entry_offset;
if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
@@ -585,6 +965,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
entry_offset += entry->hdr.offset;
continue;
}
+
/* Find the handler for this entry */
ops_index = 0;
while (ops_index < ops_cnt) {
@@ -592,16 +973,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
break;
ops_index++;
}
+
if (ops_index == ops_cnt) {
dev_info(&adapter->pdev->dev,
"Invalid entry type %d, exiting dump\n",
entry->hdr.type);
goto error;
}
+
/* Collect dump for this entry */
dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
- if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
- dump))
+ if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
buf_offset += entry->hdr.cap_size;
entry_offset += entry->hdr.offset;
@@ -616,8 +998,8 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
adapter->netdev->name);
- dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
- fw_dump->size);
+ dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
+ adapter->netdev->name, fw_dump->size);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
return 0;
@@ -626,3 +1008,21 @@ error:
vfree(fw_dump->data);
return -EINVAL;
}
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+{
+ u32 prev_version, current_version;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ struct pci_dev *pdev = adapter->pdev;
+
+ prev_version = adapter->fw_version;
+ current_version = qlcnic_83xx_get_fw_version(adapter);
+
+ if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+ if (fw_dump->tmpl_hdr)
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev, "Supports FW dump capability\n");
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 341d37c867f..987fb6f8adc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1,8 +1,16 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_hw.h"
#include <linux/swab.h>
#include <linux/dma-mapping.h>
@@ -13,6 +21,10 @@
#include <linux/aer.h>
#include <linux/log2.h>
+#include <linux/sysfs.h>
+
+#define QLC_STATUS_UNSUPPORTED_CMD -2
+
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
@@ -40,7 +52,7 @@ static ssize_t qlcnic_store_bridged_mode(struct device *dev,
if (strict_strtoul(buf, 2, &new))
goto err_out;
- if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
ret = len;
err_out:
@@ -80,9 +92,7 @@ static ssize_t qlcnic_show_diag_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n",
- !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+ return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
}
static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
@@ -111,10 +121,11 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
const char *buf, size_t len)
{
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- int max_sds_rings = adapter->max_sds_rings;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, max_sds_rings = adapter->max_sds_rings;
u16 beacon;
u8 b_state, b_rate;
- int err;
+ unsigned long h_beacon;
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
dev_warn(dev,
@@ -122,6 +133,41 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
return -EOPNOTSUPP;
}
+ if (qlcnic_83xx_check(adapter) &&
+ !test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ if (kstrtoul(buf, 2, &h_beacon))
+ return -EINVAL;
+
+ if (ahw->beacon_state == h_beacon)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE,
+ &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+ }
+ if (h_beacon) {
+ err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+ if (err)
+ goto beacon_err;
+ } else {
+ err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+ if (err)
+ goto beacon_err;
+ }
+ /* set the current beacon state */
+ ahw->beacon_state = h_beacon;
+beacon_err:
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ rtnl_unlock();
+ return len;
+ }
+
if (len != sizeof(u16))
return QL_STATUS_INVALID_PARAM;
@@ -154,11 +200,10 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
}
err = qlcnic_config_led(adapter, b_state, b_rate);
-
- if (!err) {
+ if (!err)
err = len;
- adapter->ahw->beacon_state = b_state;
- }
+ else
+ ahw->beacon_state = b_state;
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
@@ -207,21 +252,13 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
+ qlcnic_read_crb(adapter, buf, offset, size);
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
- memcpy(buf, &qmdata, size);
- } else {
- data = QLCRD32(adapter, offset);
- memcpy(buf, &data, size);
- }
return size;
}
@@ -231,21 +268,13 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 data;
- u64 qmdata;
int ret;
ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
if (ret != 0)
return ret;
- if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
- memcpy(&qmdata, buf, size);
- qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
- } else {
- memcpy(&data, buf, size);
- QLCWR32(adapter, offset, data);
- }
+ qlcnic_write_crb(adapter, buf, offset, size);
return size;
}
@@ -303,33 +332,44 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
+static int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ int i;
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ if (adapter->npars[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -1;
+}
+
static int validate_pm_config(struct qlcnic_adapter *adapter,
struct qlcnic_pm_func_cfg *pm_cfg, int count)
{
- u8 src_pci_func, s_esw_id, d_esw_id, dest_pci_func;
- int i;
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i, src_index, dest_index;
for (i = 0; i < count; i++) {
src_pci_func = pm_cfg[i].pci_func;
dest_pci_func = pm_cfg[i].dest_npar;
- if (src_pci_func >= QLCNIC_MAX_PCI_FUNC ||
- dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
+ src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
- if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
+ if (src_index < 0)
return QL_STATUS_INVALID_PARAM;
- if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
+ dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
+ if (dest_index < 0)
return QL_STATUS_INVALID_PARAM;
- s_esw_id = adapter->npars[src_pci_func].phy_port;
- d_esw_id = adapter->npars[dest_pci_func].phy_port;
+ s_esw_id = adapter->npars[src_index].phy_port;
+ d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
return QL_STATUS_INVALID_PARAM;
}
- return 0;
+ return 0;
}
static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
@@ -342,7 +382,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg *pm_cfg;
u32 id, action, pci_func;
- int count, rem, i, ret;
+ int count, rem, i, ret, index;
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
@@ -350,26 +390,32 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
return QL_STATUS_INVALID_PARAM;
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
ret = validate_pm_config(adapter, pm_cfg, count);
+
if (ret)
return ret;
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
action = !!pm_cfg[i].action;
- id = adapter->npars[pci_func].phy_port;
- ret = qlcnic_config_port_mirroring(adapter, id, action,
- pci_func);
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return QL_STATUS_INVALID_PARAM;
+
+ id = adapter->npars[index].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
if (ret)
return ret;
}
for (i = 0; i < count; i++) {
pci_func = pm_cfg[i].pci_func;
- id = adapter->npars[pci_func].phy_port;
- adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
- adapter->npars[pci_func].dest_npar = id;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ id = adapter->npars[index].phy_port;
+ adapter->npars[index].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[index].dest_npar = id;
}
+
return size;
}
@@ -383,16 +429,19 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
int i;
+ u8 pci_func;
if (size != sizeof(pm_cfg))
return QL_STATUS_INVALID_PARAM;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- pm_cfg[i].action = adapter->npars[i].enable_pm;
- pm_cfg[i].dest_npar = 0;
- pm_cfg[i].pci_func = i;
+ memset(&pm_cfg, 0,
+ sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
+ pm_cfg[pci_func].dest_npar = 0;
+ pm_cfg[pci_func].pci_func = i;
}
memcpy(buf, &pm_cfg, size);
@@ -404,24 +453,33 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
{
u32 op_mode;
u8 pci_func;
- int i;
+ int i, ret;
- op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ if (qlcnic_82xx_check(adapter))
+ op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ else
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
- }
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
- if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
- QLCNIC_NON_PRIV_FUNC) {
+ if (qlcnic_82xx_check(adapter)) {
+ ret = QLC_DEV_GET_DRV(op_mode, pci_func);
+ } else {
+ ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ pci_func);
+ esw_cfg[i].offload_flags = 0;
+ }
+
+ if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
return QL_STATUS_INVALID_PARAM;
if (esw_cfg[i].mac_override != 1)
@@ -444,6 +502,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
return QL_STATUS_INVALID_PARAM;
}
}
+
return 0;
}
@@ -458,7 +517,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
struct qlcnic_esw_func_cfg *esw_cfg;
struct qlcnic_npar_info *npar;
int count, rem, i, ret;
- u8 pci_func, op_mode = 0;
+ int index;
+ u8 op_mode = 0, pci_func;
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
@@ -471,10 +531,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
return ret;
for (i = 0; i < count; i++) {
- if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
return QL_STATUS_INVALID_PARAM;
- }
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -503,7 +562,8 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- npar = &adapter->npars[pci_func];
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
npar->promisc_mode = esw_cfg[i].promisc_mode;
@@ -533,18 +593,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
- u8 i;
+ u8 i, pci_func;
if (size != sizeof(esw_cfg))
return QL_STATUS_INVALID_PARAM;
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
- continue;
- esw_cfg[i].pci_func = i;
- if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
+ memset(&esw_cfg, 0,
+ sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ esw_cfg[pci_func].pci_func = pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
}
+
memcpy(buf, &esw_cfg, size);
return size;
@@ -558,10 +621,7 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
- if (pci_func >= QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
-
- if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
return QL_STATUS_INVALID_PARAM;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
@@ -581,7 +641,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_info nic_info;
struct qlcnic_npar_func_cfg *np_cfg;
- int i, count, rem, ret;
+ int i, count, rem, ret, index;
u8 pci_func;
count = size / sizeof(struct qlcnic_npar_func_cfg);
@@ -594,8 +654,10 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
if (ret)
return ret;
- for (i = 0; i < count ; i++) {
+ for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (ret)
return ret;
@@ -605,12 +667,12 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
ret = qlcnic_set_nic_info(adapter, &nic_info);
if (ret)
return ret;
- adapter->npars[i].min_bw = nic_info.min_tx_bw;
- adapter->npars[i].max_bw = nic_info.max_tx_bw;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ adapter->npars[index].min_bw = nic_info.min_tx_bw;
+ adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
return size;
-
}
static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
@@ -628,8 +690,12 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
if (size != sizeof(np_cfg))
return QL_STATUS_INVALID_PARAM;
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ memset(&np_cfg, 0,
+ sizeof(struct qlcnic_npar_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
- if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
+ if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
@@ -644,6 +710,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
}
+
memcpy(buf, &np_cfg, size);
return size;
}
@@ -659,6 +726,9 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
struct qlcnic_esw_statistics port_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
@@ -691,6 +761,9 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
struct qlcnic_esw_statistics esw_stats;
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
@@ -722,6 +795,9 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
return QL_STATUS_INVALID_PARAM;
@@ -744,10 +820,14 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
char *buf, loff_t offset,
size_t size)
{
+
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
int ret;
+ if (qlcnic_83xx_check(adapter))
+ return QLC_STATUS_UNSUPPORTED_CMD;
+
if (offset >= QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
@@ -789,7 +869,10 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return ret;
}
- for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
+ memset(&pci_cfg, 0,
+ sizeof(struct qlcnic_pci_func_cfg) * QLCNIC_MAX_PCI_FUNC);
+
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].port_num = pci_info[i].default_port;
@@ -797,6 +880,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
}
+
memcpy(buf, &pci_cfg, size);
kfree(pci_info);
return size;
@@ -897,7 +981,6 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -911,9 +994,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
-
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_file(dev, &dev_attr_beacon))
@@ -936,7 +1016,6 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
- u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -945,8 +1024,6 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
- if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
- return;
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_file(dev, &dev_attr_beacon);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -958,3 +1035,23 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_bin_file(dev, &bin_attr_pm_config);
device_remove_bin_file(dev, &bin_attr_esw_stats);
}
+
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
+
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 3e73742024b..b13ab544a7e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2920,14 +2920,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate small buffer queue control blocks.
*/
- rx_ring->sbq =
- kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->sbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Small buffer queue control block allocation failed.\n");
+ rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->sbq == NULL)
goto err_mem;
- }
ql_init_sbq_ring(qdev, rx_ring);
}
@@ -2948,14 +2945,11 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
/*
* Allocate large buffer queue control blocks.
*/
- rx_ring->lbq =
- kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
- GFP_KERNEL);
- if (rx_ring->lbq == NULL) {
- netif_err(qdev, ifup, qdev->ndev,
- "Large buffer queue control block allocation failed.\n");
+ rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
+ sizeof(struct bq_desc),
+ GFP_KERNEL);
+ if (rx_ring->lbq == NULL)
goto err_mem;
- }
ql_init_lbq_ring(qdev, rx_ring);
}
@@ -4572,7 +4566,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
qdev->mpi_coredump =
vmalloc(sizeof(struct ql_mpi_coredump));
if (qdev->mpi_coredump == NULL) {
- dev_err(&pdev->dev, "Coredump alloc failed.\n");
err = -ENOMEM;
goto err_out2;
}
@@ -4586,7 +4579,6 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
goto err_out2;
}
- memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
/* Keep local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/racal/Kconfig b/drivers/net/ethernet/racal/Kconfig
deleted file mode 100644
index 01969e0a9c6..00000000000
--- a/drivers/net/ethernet/racal/Kconfig
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Racal-Interlan device configuration
-#
-
-config NET_VENDOR_RACAL
- bool "Racal-Interlan (Micom) NI devices"
- default y
- depends on ISA
- ---help---
- If you have a network (Ethernet) card belonging to this class, such
- as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about NI cards. If you say Y, you will be asked for
- your specific card in the following questions.
-
-if NET_VENDOR_RACAL
-
-config NI5010
- tristate "NI5010 support (EXPERIMENTAL)"
- depends on ISA && EXPERIMENTAL && BROKEN_ON_SMP
- ---help---
- If you have a network (Ethernet) card of this type, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Note that this is still
- experimental code.
-
- To compile this driver as a module, choose M here. The module
- will be called ni5010.
-
-endif # NET_VENDOR_RACAL
diff --git a/drivers/net/ethernet/racal/Makefile b/drivers/net/ethernet/racal/Makefile
deleted file mode 100644
index 1e210ca1d78..00000000000
--- a/drivers/net/ethernet/racal/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Racal-Interlan network device drivers.
-#
-
-obj-$(CONFIG_NI5010) += ni5010.o
diff --git a/drivers/net/ethernet/racal/ni5010.c b/drivers/net/ethernet/racal/ni5010.c
deleted file mode 100644
index 80798222005..00000000000
--- a/drivers/net/ethernet/racal/ni5010.c
+++ /dev/null
@@ -1,771 +0,0 @@
-/* ni5010.c: A network driver for the MiCom-Interlan NI5010 ethercard.
- *
- * Copyright 1996,1997,2006 Jan-Pascal van Best and Andreas Mohr.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * The authors may be reached as:
- * janpascal@vanbest.org andi@lisas.de
- *
- * Sources:
- * Donald Becker's "skeleton.c"
- * Crynwr ni5010 packet driver
- *
- * Changes:
- * v0.0: First test version
- * v0.1: First working version
- * v0.2:
- * v0.3->v0.90: Now demand setting io and irq when loading as module
- * 970430 v0.91: modified for Linux 2.1.14
- * v0.92: Implemented Andreas' (better) NI5010 probe
- * 970503 v0.93: Fixed auto-irq failure on warm reboot (JB)
- * 970623 v1.00: First kernel version (AM)
- * 970814 v1.01: Added detection of onboard receive buffer size (AM)
- * 060611 v1.02: slight cleanup: email addresses, driver modernization.
- * Bugs:
- * - not SMP-safe (no locking of I/O accesses)
- * - Note that you have to patch ifconfig for the new /proc/net/dev
- * format. It gives incorrect stats otherwise.
- *
- * To do:
- * Fix all bugs :-)
- * Move some stuff to chipset_init()
- * Handle xmt errors other than collisions
- * Complete merge with Andreas' driver
- * Implement ring buffers (Is this useful? You can't squeeze
- * too many packet in a 2k buffer!)
- * Implement DMA (Again, is this useful? Some docs say DMA is
- * slower than programmed I/O)
- *
- * Compile with:
- * gcc -O2 -fomit-frame-pointer -m486 -D__KERNEL__ \
- * -DMODULE -c ni5010.c
- *
- * Insert with e.g.:
- * insmod ni5010.ko io=0x300 irq=5
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
-#include "ni5010.h"
-
-static const char boardname[] = "NI5010";
-static char version[] __initdata =
- "ni5010.c: v1.02 20060611 Jan-Pascal van Best and Andreas Mohr\n";
-
-/* bufsize_rcv == 0 means autoprobing */
-static unsigned int bufsize_rcv;
-
-#define JUMPERED_INTERRUPTS /* IRQ line jumpered on board */
-#undef JUMPERED_DMA /* No DMA used */
-#undef FULL_IODETECT /* Only detect in portlist */
-
-#ifndef FULL_IODETECT
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int ports[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0x380, 0x3a0, 0 };
-#endif
-
-/* Use 0 for production, 1 for verification, >2 for debug */
-#ifndef NI5010_DEBUG
-#define NI5010_DEBUG 0
-#endif
-
-/* Information that needs to be kept for each board. */
-struct ni5010_local {
- int o_pkt_size;
- spinlock_t lock;
-};
-
-/* Index to functions, as function prototypes. */
-
-static int ni5010_probe1(struct net_device *dev, int ioaddr);
-static int ni5010_open(struct net_device *dev);
-static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t ni5010_interrupt(int irq, void *dev_id);
-static void ni5010_rx(struct net_device *dev);
-static void ni5010_timeout(struct net_device *dev);
-static int ni5010_close(struct net_device *dev);
-static void ni5010_set_multicast_list(struct net_device *dev);
-static void reset_receiver(struct net_device *dev);
-
-static int process_xmt_interrupt(struct net_device *dev);
-#define tx_done(dev) 1
-static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad);
-static void chipset_init(struct net_device *dev, int startp);
-static void dump_packet(void *buf, int len);
-static void ni5010_show_registers(struct net_device *dev);
-
-static int io;
-static int irq;
-
-struct net_device * __init ni5010_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct ni5010_local));
- int *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- }
-
- PRINTK2((KERN_DEBUG "%s: Entering ni5010_probe\n", dev->name));
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = ni5010_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
-#ifdef FULL_IODETECT
- for (io=0x200; io<0x400 && ni5010_probe1(dev, io) ; io+=0x20)
- ;
- if (io == 0x400)
- err = -ENODEV;
-
-#else
- for (port = ports; *port && ni5010_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
-#endif /* FULL_IODETECT */
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, NI5010_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static inline int rd_port(int ioaddr)
-{
- inb(IE_RBUF);
- return inb(IE_SAPROM);
-}
-
-static void __init trigger_irq(int ioaddr)
-{
- outb(0x00, EDLC_RESET); /* Clear EDLC hold RESET state */
- outb(0x00, IE_RESET); /* Board reset */
- outb(0x00, EDLC_XMASK); /* Disable all Xmt interrupts */
- outb(0x00, EDLC_RMASK); /* Disable all Rcv interrupt */
- outb(0xff, EDLC_XCLR); /* Clear all pending Xmt interrupts */
- outb(0xff, EDLC_RCLR); /* Clear all pending Rcv interrupts */
- /*
- * Transmit packet mode: Ignore parity, Power xcvr,
- * Enable loopback
- */
- outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
- outb(RMD_BROADCAST, EDLC_RMODE); /* Receive normal&broadcast */
- outb(XM_ALL, EDLC_XMASK); /* Enable all Xmt interrupts */
- udelay(50); /* FIXME: Necessary? */
- outb(MM_EN_XMT|MM_MUX, IE_MMODE); /* Start transmission */
-}
-
-static const struct net_device_ops ni5010_netdev_ops = {
- .ndo_open = ni5010_open,
- .ndo_stop = ni5010_close,
- .ndo_start_xmit = ni5010_send_packet,
- .ndo_set_rx_mode = ni5010_set_multicast_list,
- .ndo_tx_timeout = ni5010_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-
-static int __init ni5010_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned version_printed;
- struct ni5010_local *lp;
- int i;
- unsigned int data = 0;
- int boguscount = 40;
- int err = -ENODEV;
-
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- if (!request_region(ioaddr, NI5010_IO_EXTENT, boardname))
- return -EBUSY;
-
- /*
- * This is no "official" probe method, I've rather tested which
- * probe works best with my seven NI5010 cards
- * (they have very different serial numbers)
- * Suggestions or failure reports are very, very welcome !
- * But I think it is a relatively good probe method
- * since it doesn't use any "outb"
- * It should be nearly 100% reliable !
- * well-known WARNING: this probe method (like many others)
- * will hang the system if a NE2000 card region is probed !
- *
- * - Andreas
- */
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_probe1(%#3x)\n",
- dev->name, ioaddr));
-
- if (inb(ioaddr+0) == 0xff)
- goto out;
-
- while ( (rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr) &
- rd_port(ioaddr) & rd_port(ioaddr) & rd_port(ioaddr)) != 0xff)
- {
- if (boguscount-- == 0)
- goto out;
- }
-
- PRINTK2((KERN_DEBUG "%s: I/O #1 passed!\n", dev->name));
-
- for (i=0; i<32; i++)
- if ( (data = rd_port(ioaddr)) != 0xff) break;
- if (data==0xff)
- goto out;
-
- PRINTK2((KERN_DEBUG "%s: I/O #2 passed!\n", dev->name));
-
- if ((data != SA_ADDR0) || (rd_port(ioaddr) != SA_ADDR1) ||
- (rd_port(ioaddr) != SA_ADDR2))
- goto out;
-
- for (i=0; i<4; i++)
- rd_port(ioaddr);
-
- if ( (rd_port(ioaddr) != NI5010_MAGICVAL1) ||
- (rd_port(ioaddr) != NI5010_MAGICVAL2) )
- goto out;
-
- PRINTK2((KERN_DEBUG "%s: I/O #3 passed!\n", dev->name));
-
- if (NI5010_DEBUG && version_printed++ == 0)
- printk(KERN_INFO "%s", version);
-
- printk("NI5010 ethercard probe at 0x%x: ", ioaddr);
-
- dev->base_addr = ioaddr;
-
- for (i=0; i<6; i++) {
- outw(i, IE_GP);
- dev->dev_addr[i] = inb(IE_SAPROM);
- }
- printk("%pM ", dev->dev_addr);
-
- PRINTK2((KERN_DEBUG "%s: I/O #4 passed!\n", dev->name));
-
-#ifdef JUMPERED_INTERRUPTS
- if (dev->irq == 0xff)
- ;
- else if (dev->irq < 2) {
- unsigned long irq_mask;
-
- PRINTK2((KERN_DEBUG "%s: I/O #5 passed!\n", dev->name));
-
- irq_mask = probe_irq_on();
- trigger_irq(ioaddr);
- mdelay(20);
- dev->irq = probe_irq_off(irq_mask);
-
- PRINTK2((KERN_DEBUG "%s: I/O #6 passed!\n", dev->name));
-
- if (dev->irq == 0) {
- err = -EAGAIN;
- printk(KERN_WARNING "%s: no IRQ found!\n", dev->name);
- goto out;
- }
- PRINTK2((KERN_DEBUG "%s: I/O #7 passed!\n", dev->name));
- } else if (dev->irq == 2) {
- dev->irq = 9;
- }
-#endif /* JUMPERED_INTERRUPTS */
- PRINTK2((KERN_DEBUG "%s: I/O #9 passed!\n", dev->name));
-
- /* DMA is not supported (yet?), so no use detecting it */
- lp = netdev_priv(dev);
-
- spin_lock_init(&lp->lock);
-
- PRINTK2((KERN_DEBUG "%s: I/O #10 passed!\n", dev->name));
-
-/* get the size of the onboard receive buffer
- * higher addresses than bufsize are wrapped into real buffer
- * i.e. data for offs. 0x801 is written to 0x1 with a 2K onboard buffer
- */
- if (!bufsize_rcv) {
- outb(1, IE_MMODE); /* Put Rcv buffer on system bus */
- outw(0, IE_GP); /* Point GP at start of packet */
- outb(0, IE_RBUF); /* set buffer byte 0 to 0 */
- for (i = 1; i < 0xff; i++) {
- outw(i << 8, IE_GP); /* Point GP at packet size to be tested */
- outb(i, IE_RBUF);
- outw(0x0, IE_GP); /* Point GP at start of packet */
- data = inb(IE_RBUF);
- if (data == i) break;
- }
- bufsize_rcv = i << 8;
- outw(0, IE_GP); /* Point GP at start of packet */
- outb(0, IE_RBUF); /* set buffer byte 0 to 0 again */
- }
- printk("-> bufsize rcv/xmt=%d/%d\n", bufsize_rcv, NI5010_BUFSIZE);
-
- dev->netdev_ops = &ni5010_netdev_ops;
- dev->watchdog_timeo = HZ/20;
-
- dev->flags &= ~IFF_MULTICAST; /* Multicast doesn't work */
-
- /* Shut up the ni5010 */
- outb(0, EDLC_RMASK); /* Mask all receive interrupts */
- outb(0, EDLC_XMASK); /* Mask all xmit interrupts */
- outb(0xff, EDLC_RCLR); /* Kill all pending rcv interrupts */
- outb(0xff, EDLC_XCLR); /* Kill all pending xmt interrupts */
-
- printk(KERN_INFO "%s: NI5010 found at 0x%x, using IRQ %d", dev->name, ioaddr, dev->irq);
- if (dev->dma)
- printk(" & DMA %d", dev->dma);
- printk(".\n");
- return 0;
-out:
- release_region(dev->base_addr, NI5010_IO_EXTENT);
- return err;
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is a non-reboot way to recover if something goes wrong.
- */
-
-static int ni5010_open(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- int i;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
-
- if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) {
- printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
- return -EAGAIN;
- }
- PRINTK3((KERN_DEBUG "%s: passed open() #1\n", dev->name));
- /*
- * Always allocate the DMA channel after the IRQ,
- * and clean up on failure.
- */
-#ifdef JUMPERED_DMA
- if (request_dma(dev->dma, cardname)) {
- printk(KERN_WARNING "%s: Cannot get dma %#2x\n", dev->name, dev->dma);
- free_irq(dev->irq, NULL);
- return -EAGAIN;
- }
-#endif /* JUMPERED_DMA */
-
- PRINTK3((KERN_DEBUG "%s: passed open() #2\n", dev->name));
- /* Reset the hardware here. Don't forget to set the station address. */
-
- outb(RS_RESET, EDLC_RESET); /* Hold up EDLC_RESET while configing board */
- outb(0, IE_RESET); /* Hardware reset of ni5010 board */
- outb(XMD_LBC, EDLC_XMODE); /* Only loopback xmits */
-
- PRINTK3((KERN_DEBUG "%s: passed open() #3\n", dev->name));
- /* Set the station address */
- for(i = 0;i < 6; i++) {
- outb(dev->dev_addr[i], EDLC_ADDR + i);
- }
-
- PRINTK3((KERN_DEBUG "%s: Initialising ni5010\n", dev->name));
- outb(0, EDLC_XMASK); /* No xmit interrupts for now */
- outb(XMD_IG_PAR | XMD_T_MODE | XMD_LBC, EDLC_XMODE);
- /* Normal packet xmit mode */
- outb(0xff, EDLC_XCLR); /* Clear all pending xmit interrupts */
- outb(RMD_BROADCAST, EDLC_RMODE);
- /* Receive broadcast and normal packets */
- reset_receiver(dev); /* Ready ni5010 for receiving packets */
-
- outb(0, EDLC_RESET); /* Un-reset the ni5010 */
-
- netif_start_queue(dev);
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-
- PRINTK((KERN_DEBUG "%s: open successful\n", dev->name));
- return 0;
-}
-
-static void reset_receiver(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK3((KERN_DEBUG "%s: resetting receiver\n", dev->name));
- outw(0, IE_GP); /* Receive packet at start of buffer */
- outb(0xff, EDLC_RCLR); /* Clear all pending rcv interrupts */
- outb(0, IE_MMODE); /* Put EDLC to rcv buffer */
- outb(MM_EN_RCV, IE_MMODE); /* Enable rcv */
- outb(0xff, EDLC_RMASK); /* Enable all rcv interrupts */
-}
-
-static void ni5010_timeout(struct net_device *dev)
-{
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
- /* Try to restart the adaptor. */
- /* FIXME: Give it a real kick here */
- chipset_init(dev, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_send_packet\n", dev->name));
-
- /*
- * Block sending
- */
-
- netif_stop_queue(dev);
- hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
- dev_kfree_skb (skb);
- return NETDEV_TX_OK;
-}
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-static irqreturn_t ni5010_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct ni5010_local *lp;
- int ioaddr, status;
- int xmit_was_error = 0;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_interrupt\n", dev->name));
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- spin_lock(&lp->lock);
- status = inb(IE_ISTAT);
- PRINTK3((KERN_DEBUG "%s: IE_ISTAT = %#02x\n", dev->name, status));
-
- if ((status & IS_R_INT) == 0) ni5010_rx(dev);
-
- if ((status & IS_X_INT) == 0) {
- xmit_was_error = process_xmt_interrupt(dev);
- }
-
- if ((status & IS_DMA_INT) == 0) {
- PRINTK((KERN_DEBUG "%s: DMA complete (?)\n", dev->name));
- outb(0, IE_DMA_RST); /* Reset DMA int */
- }
-
- if (!xmit_was_error)
- reset_receiver(dev);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
-}
-
-
-static void dump_packet(void *buf, int len)
-{
- int i;
-
- printk(KERN_DEBUG "Packet length = %#4x\n", len);
- for (i = 0; i < len; i++){
- if (i % 16 == 0) printk(KERN_DEBUG "%#4.4x", i);
- if (i % 2 == 0) printk(" ");
- printk("%2.2x", ((unsigned char *)buf)[i]);
- if (i % 16 == 15) printk("\n");
- }
- printk("\n");
-}
-
-/* We have a good packet, get it out of the buffer. */
-static void ni5010_rx(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- unsigned char rcv_stat;
- struct sk_buff *skb;
- int i_pkt_size;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_rx()\n", dev->name));
-
- rcv_stat = inb(EDLC_RSTAT);
- PRINTK3((KERN_DEBUG "%s: EDLC_RSTAT = %#2x\n", dev->name, rcv_stat));
-
- if ( (rcv_stat & RS_VALID_BITS) != RS_PKT_OK) {
- PRINTK((KERN_INFO "%s: receive error.\n", dev->name));
- dev->stats.rx_errors++;
- if (rcv_stat & RS_RUNT) dev->stats.rx_length_errors++;
- if (rcv_stat & RS_ALIGN) dev->stats.rx_frame_errors++;
- if (rcv_stat & RS_CRC_ERR) dev->stats.rx_crc_errors++;
- if (rcv_stat & RS_OFLW) dev->stats.rx_fifo_errors++;
- outb(0xff, EDLC_RCLR); /* Clear the interrupt */
- return;
- }
-
- outb(0xff, EDLC_RCLR); /* Clear the interrupt */
-
- i_pkt_size = inw(IE_RCNT);
- if (i_pkt_size > ETH_FRAME_LEN || i_pkt_size < 10 ) {
- PRINTK((KERN_DEBUG "%s: Packet size error, packet size = %#4.4x\n",
- dev->name, i_pkt_size));
- dev->stats.rx_errors++;
- dev->stats.rx_length_errors++;
- return;
- }
-
- /* Malloc up new buffer. */
- skb = netdev_alloc_skb(dev, i_pkt_size + 3);
- if (skb == NULL) {
- printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- return;
- }
-
- skb_reserve(skb, 2);
-
- /* Read packet into buffer */
- outb(MM_MUX, IE_MMODE); /* Rcv buffer to system bus */
- outw(0, IE_GP); /* Seek to beginning of packet */
- insb(IE_RBUF, skb_put(skb, i_pkt_size), i_pkt_size);
-
- if (NI5010_DEBUG >= 4)
- dump_packet(skb->data, skb->len);
-
- skb->protocol = eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += i_pkt_size;
-
- PRINTK2((KERN_DEBUG "%s: Received packet, size=%#4.4x\n",
- dev->name, i_pkt_size));
-}
-
-static int process_xmt_interrupt(struct net_device *dev)
-{
- struct ni5010_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int xmit_stat;
-
- PRINTK2((KERN_DEBUG "%s: entering process_xmt_interrupt\n", dev->name));
-
- xmit_stat = inb(EDLC_XSTAT);
- PRINTK3((KERN_DEBUG "%s: EDLC_XSTAT = %2.2x\n", dev->name, xmit_stat));
-
- outb(0, EDLC_XMASK); /* Disable xmit IRQ's */
- outb(0xff, EDLC_XCLR); /* Clear all pending xmit IRQ's */
-
- if (xmit_stat & XS_COLL){
- PRINTK((KERN_DEBUG "%s: collision detected, retransmitting\n",
- dev->name));
- outw(NI5010_BUFSIZE - lp->o_pkt_size, IE_GP);
- /* outb(0, IE_MMODE); */ /* xmt buf on sysbus FIXME: needed ? */
- outb(MM_EN_XMT | MM_MUX, IE_MMODE);
- outb(XM_ALL, EDLC_XMASK); /* Enable xmt IRQ's */
- dev->stats.collisions++;
- return 1;
- }
-
- /* FIXME: handle other xmt error conditions */
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += lp->o_pkt_size;
- netif_wake_queue(dev);
-
- PRINTK2((KERN_DEBUG "%s: sent packet, size=%#4.4x\n",
- dev->name, lp->o_pkt_size));
-
- return 0;
-}
-
-/* The inverse routine to ni5010_open(). */
-static int ni5010_close(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK2((KERN_DEBUG "%s: entering ni5010_close\n", dev->name));
-#ifdef JUMPERED_INTERRUPTS
- free_irq(dev->irq, NULL);
-#endif
- /* Put card in held-RESET state */
- outb(0, IE_MMODE);
- outb(RS_RESET, EDLC_RESET);
-
- netif_stop_queue(dev);
-
- PRINTK((KERN_DEBUG "%s: %s closed down\n", dev->name, boardname));
- return 0;
-
-}
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
-*/
-static void ni5010_set_multicast_list(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
-
- PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
-
- if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
- !netdev_mc_empty(dev)) {
- outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
- PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
- } else {
- PRINTK((KERN_DEBUG "%s: Entering broadcast mode\n", dev->name));
- outb(RMD_BROADCAST, EDLC_RMODE); /* Disable promiscuous mode, use normal mode */
- }
-}
-
-static void hardware_send_packet(struct net_device *dev, char *buf, int length, int pad)
-{
- struct ni5010_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- unsigned long flags;
- unsigned int buf_offs;
-
- PRINTK2((KERN_DEBUG "%s: entering hardware_send_packet\n", dev->name));
-
- if (length > ETH_FRAME_LEN) {
- PRINTK((KERN_WARNING "%s: packet too large, not possible\n",
- dev->name));
- return;
- }
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-
- if (inb(IE_ISTAT) & IS_EN_XMT) {
- PRINTK((KERN_WARNING "%s: sending packet while already transmitting, not possible\n",
- dev->name));
- return;
- }
-
- if (NI5010_DEBUG > 3) dump_packet(buf, length);
-
- buf_offs = NI5010_BUFSIZE - length - pad;
-
- spin_lock_irqsave(&lp->lock, flags);
- lp->o_pkt_size = length + pad;
-
- outb(0, EDLC_RMASK); /* Mask all receive interrupts */
- outb(0, IE_MMODE); /* Put Xmit buffer on system bus */
- outb(0xff, EDLC_RCLR); /* Clear out pending rcv interrupts */
-
- outw(buf_offs, IE_GP); /* Point GP at start of packet */
- outsb(IE_XBUF, buf, length); /* Put data in buffer */
- while(pad--)
- outb(0, IE_XBUF);
-
- outw(buf_offs, IE_GP); /* Rewrite where packet starts */
-
- /* should work without that outb() (Crynwr used it) */
- /*outb(MM_MUX, IE_MMODE);*/ /* Xmt buffer to EDLC bus */
- outb(MM_EN_XMT | MM_MUX, IE_MMODE); /* Begin transmission */
- outb(XM_ALL, EDLC_XMASK); /* Cause interrupt after completion or fail */
-
- spin_unlock_irqrestore(&lp->lock, flags);
-
- netif_wake_queue(dev);
-
- if (NI5010_DEBUG) ni5010_show_registers(dev);
-}
-
-static void chipset_init(struct net_device *dev, int startp)
-{
- /* FIXME: Move some stuff here */
- PRINTK3((KERN_DEBUG "%s: doing NOTHING in chipset_init\n", dev->name));
-}
-
-static void ni5010_show_registers(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
-
- PRINTK3((KERN_DEBUG "%s: XSTAT %#2.2x\n", dev->name, inb(EDLC_XSTAT)));
- PRINTK3((KERN_DEBUG "%s: XMASK %#2.2x\n", dev->name, inb(EDLC_XMASK)));
- PRINTK3((KERN_DEBUG "%s: RSTAT %#2.2x\n", dev->name, inb(EDLC_RSTAT)));
- PRINTK3((KERN_DEBUG "%s: RMASK %#2.2x\n", dev->name, inb(EDLC_RMASK)));
- PRINTK3((KERN_DEBUG "%s: RMODE %#2.2x\n", dev->name, inb(EDLC_RMODE)));
- PRINTK3((KERN_DEBUG "%s: XMODE %#2.2x\n", dev->name, inb(EDLC_XMODE)));
- PRINTK3((KERN_DEBUG "%s: ISTAT %#2.2x\n", dev->name, inb(IE_ISTAT)));
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni5010;
-
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "ni5010 I/O base address");
-MODULE_PARM_DESC(irq, "ni5010 IRQ number");
-
-static int __init ni5010_init_module(void)
-{
- PRINTK2((KERN_DEBUG "%s: entering init_module\n", boardname));
- /*
- if(io <= 0 || irq == 0){
- printk(KERN_WARNING "%s: Autoprobing not allowed for modules.\n", boardname);
- printk(KERN_WARNING "%s: Set symbols 'io' and 'irq'\n", boardname);
- return -EINVAL;
- }
- */
- if (io <= 0){
- printk(KERN_WARNING "%s: Autoprobing for modules is hazardous, trying anyway..\n", boardname);
- }
-
- PRINTK2((KERN_DEBUG "%s: init_module irq=%#2x, io=%#3x\n", boardname, irq, io));
- dev_ni5010 = ni5010_probe(-1);
- if (IS_ERR(dev_ni5010))
- return PTR_ERR(dev_ni5010);
- return 0;
-}
-
-static void __exit ni5010_cleanup_module(void)
-{
- PRINTK2((KERN_DEBUG "%s: entering cleanup_module\n", boardname));
- unregister_netdev(dev_ni5010);
- release_region(dev_ni5010->base_addr, NI5010_IO_EXTENT);
- free_netdev(dev_ni5010);
-}
-module_init(ni5010_init_module);
-module_exit(ni5010_cleanup_module);
-#endif /* MODULE */
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/racal/ni5010.h b/drivers/net/ethernet/racal/ni5010.h
deleted file mode 100644
index e10e717fcd7..00000000000
--- a/drivers/net/ethernet/racal/ni5010.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Racal-Interlan ni5010 Ethernet definitions
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers that work.
- *
- * copyrights (c) 1996 by Jan-Pascal van Best (jvbest@wi.leidenuniv.nl)
- *
- * I have done a look in the following sources:
- * crynwr-packet-driver by Russ Nelson
- */
-
-#define NI5010_BUFSIZE 2048 /* number of bytes in a buffer */
-
-#define NI5010_MAGICVAL0 0x00 /* magic-values for ni5010 card */
-#define NI5010_MAGICVAL1 0x55
-#define NI5010_MAGICVAL2 0xAA
-
-#define SA_ADDR0 0x02
-#define SA_ADDR1 0x07
-#define SA_ADDR2 0x01
-
-/* The number of low I/O ports used by the ni5010 ethercard. */
-#define NI5010_IO_EXTENT 32
-
-#define PRINTK(x) if (NI5010_DEBUG) printk x
-#define PRINTK2(x) if (NI5010_DEBUG>=2) printk x
-#define PRINTK3(x) if (NI5010_DEBUG>=3) printk x
-
-/* The various IE command registers */
-#define EDLC_XSTAT (ioaddr + 0x00) /* EDLC transmit csr */
-#define EDLC_XCLR (ioaddr + 0x00) /* EDLC transmit "Clear IRQ" */
-#define EDLC_XMASK (ioaddr + 0x01) /* EDLC transmit "IRQ Masks" */
-#define EDLC_RSTAT (ioaddr + 0x02) /* EDLC receive csr */
-#define EDLC_RCLR (ioaddr + 0x02) /* EDLC receive "Clear IRQ" */
-#define EDLC_RMASK (ioaddr + 0x03) /* EDLC receive "IRQ Masks" */
-#define EDLC_XMODE (ioaddr + 0x04) /* EDLC transmit Mode */
-#define EDLC_RMODE (ioaddr + 0x05) /* EDLC receive Mode */
-#define EDLC_RESET (ioaddr + 0x06) /* EDLC RESET register */
-#define EDLC_TDR1 (ioaddr + 0x07) /* "Time Domain Reflectometry" reg1 */
-#define EDLC_ADDR (ioaddr + 0x08) /* EDLC station address, 6 bytes */
- /* 0x0E doesn't exist for r/w */
-#define EDLC_TDR2 (ioaddr + 0x0f) /* "Time Domain Reflectometry" reg2 */
-#define IE_GP (ioaddr + 0x10) /* GP pointer (word register) */
- /* 0x11 is 2nd byte of GP Pointer */
-#define IE_RCNT (ioaddr + 0x10) /* Count of bytes in rcv'd packet */
- /* 0x11 is 2nd byte of "Byte Count" */
-#define IE_MMODE (ioaddr + 0x12) /* Memory Mode register */
-#define IE_DMA_RST (ioaddr + 0x13) /* IE DMA Reset. write only */
-#define IE_ISTAT (ioaddr + 0x13) /* IE Interrupt Status. read only */
-#define IE_RBUF (ioaddr + 0x14) /* IE Receive Buffer port */
-#define IE_XBUF (ioaddr + 0x15) /* IE Transmit Buffer port */
-#define IE_SAPROM (ioaddr + 0x16) /* window on station addr prom */
-#define IE_RESET (ioaddr + 0x17) /* any write causes Board Reset */
-
-/* bits in EDLC_XSTAT, interrupt clear on write, status when read */
-#define XS_TPOK 0x80 /* transmit packet successful */
-#define XS_CS 0x40 /* carrier sense */
-#define XS_RCVD 0x20 /* transmitted packet received */
-#define XS_SHORT 0x10 /* transmission media is shorted */
-#define XS_UFLW 0x08 /* underflow. iff failed board */
-#define XS_COLL 0x04 /* collision occurred */
-#define XS_16COLL 0x02 /* 16th collision occurred */
-#define XS_PERR 0x01 /* parity error */
-
-#define XS_CLR_UFLW 0x08 /* clear underflow */
-#define XS_CLR_COLL 0x04 /* clear collision */
-#define XS_CLR_16COLL 0x02 /* clear 16th collision */
-#define XS_CLR_PERR 0x01 /* clear parity error */
-
-/* bits in EDLC_XMASK, mask/enable transmit interrupts. register is r/w */
-#define XM_TPOK 0x80 /* =1 to enable Xmt Pkt OK interrupts */
-#define XM_RCVD 0x20 /* =1 to enable Xmt Pkt Rcvd ints */
-#define XM_UFLW 0x08 /* =1 to enable Xmt Underflow ints */
-#define XM_COLL 0x04 /* =1 to enable Xmt Collision ints */
-#define XM_COLL16 0x02 /* =1 to enable Xmt 16th Coll ints */
-#define XM_PERR 0x01 /* =1 to enable Xmt Parity Error ints */
- /* note: always clear this bit */
-#define XM_ALL (XM_TPOK | XM_RCVD | XM_UFLW | XM_COLL | XM_COLL16)
-
-/* bits in EDLC_RSTAT, interrupt clear on write, status when read */
-#define RS_PKT_OK 0x80 /* received good packet */
-#define RS_RST_PKT 0x10 /* RESET packet received */
-#define RS_RUNT 0x08 /* Runt Pkt rcvd. Len < 64 Bytes */
-#define RS_ALIGN 0x04 /* Alignment error. not 8 bit aligned */
-#define RS_CRC_ERR 0x02 /* Bad CRC on rcvd pkt */
-#define RS_OFLW 0x01 /* overflow for rcv FIFO */
-#define RS_VALID_BITS ( RS_PKT_OK | RS_RST_PKT | RS_RUNT | RS_ALIGN | RS_CRC_ERR | RS_OFLW )
- /* all valid RSTAT bits */
-
-#define RS_CLR_PKT_OK 0x80 /* clear rcvd packet interrupt */
-#define RS_CLR_RST_PKT 0x10 /* clear RESET packet received */
-#define RS_CLR_RUNT 0x08 /* clear Runt Pckt received */
-#define RS_CLR_ALIGN 0x04 /* clear Alignment error */
-#define RS_CLR_CRC_ERR 0x02 /* clear CRC error */
-#define RS_CLR_OFLW 0x01 /* clear rcv FIFO Overflow */
-
-/* bits in EDLC_RMASK, mask/enable receive interrupts. register is r/w */
-#define RM_PKT_OK 0x80 /* =1 to enable rcvd good packet ints */
-#define RM_RST_PKT 0x10 /* =1 to enable RESET packet ints */
-#define RM_RUNT 0x08 /* =1 to enable Runt Pkt rcvd ints */
-#define RM_ALIGN 0x04 /* =1 to enable Alignment error ints */
-#define RM_CRC_ERR 0x02 /* =1 to enable Bad CRC error ints */
-#define RM_OFLW 0x01 /* =1 to enable overflow error ints */
-
-/* bits in EDLC_RMODE, set Receive Packet mode. register is r/w */
-#define RMD_TEST 0x80 /* =1 for Chip testing. normally 0 */
-#define RMD_ADD_SIZ 0x10 /* =1 5-byte addr match. normally 0 */
-#define RMD_EN_RUNT 0x08 /* =1 enable runt rcv. normally 0 */
-#define RMD_EN_RST 0x04 /* =1 to rcv RESET pkt. normally 0 */
-
-#define RMD_PROMISC 0x03 /* receive *all* packets. unusual */
-#define RMD_MULTICAST 0x02 /* receive multicasts too. unusual */
-#define RMD_BROADCAST 0x01 /* receive broadcasts & normal. usual */
-#define RMD_NO_PACKETS 0x00 /* don't receive any packets. unusual */
-
-/* bits in EDLC_XMODE, set Transmit Packet mode. register is r/w */
-#define XMD_COLL_CNT 0xf0 /* coll's since success. read-only */
-#define XMD_IG_PAR 0x08 /* =1 to ignore parity. ALWAYS set */
-#define XMD_T_MODE 0x04 /* =1 to power xcvr. ALWAYS set this */
-#define XMD_LBC 0x02 /* =1 for loopbakc. normally set */
-#define XMD_DIS_C 0x01 /* =1 disables contention. normally 0 */
-
-/* bits in EDLC_RESET, write only */
-#define RS_RESET 0x80 /* =1 to hold EDLC in reset state */
-
-/* bits in IE_MMODE, write only */
-#define MM_EN_DMA 0x80 /* =1 begin DMA xfer, Cplt clrs it */
-#define MM_EN_RCV 0x40 /* =1 allows Pkt rcv. clr'd by rcv */
-#define MM_EN_XMT 0x20 /* =1 begin Xmt pkt. Cplt clrs it */
-#define MM_BUS_PAGE 0x18 /* =00 ALWAYS. Used when MUX=1 */
-#define MM_NET_PAGE 0x06 /* =00 ALWAYS. Used when MUX=0 */
-#define MM_MUX 0x01 /* =1 means Rcv Buff on system bus */
- /* =0 means Xmt Buff on system bus */
-
-/* bits in IE_ISTAT, read only */
-#define IS_TDIAG 0x80 /* =1 if Diagnostic problem */
-#define IS_EN_RCV 0x20 /* =1 until frame is rcv'd cplt */
-#define IS_EN_XMT 0x10 /* =1 until frame is xmt'd cplt */
-#define IS_EN_DMA 0x08 /* =1 until DMA is cplt or aborted */
-#define IS_DMA_INT 0x04 /* =0 iff DMA done interrupt. */
-#define IS_R_INT 0x02 /* =0 iff unmasked Rcv interrupt */
-#define IS_X_INT 0x01 /* =0 iff unmasked Xmt interrupt */
-
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 63c13125db6..5b4103db70f 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -755,9 +755,6 @@ static void r6040_mac_address(struct net_device *dev)
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
-
- /* Store MAC Address in perm_addr */
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
}
static int r6040_open(struct net_device *dev)
@@ -957,9 +954,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct r6040_private *rp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(rp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1045,7 +1042,7 @@ static int r6040_mii_probe(struct net_device *dev)
}
phydev = phy_connect(dev, dev_name(&phydev->dev), &r6040_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
dev_err(&lp->pdev->dev, "could not attach to PHY\n");
@@ -1195,9 +1192,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
lp->mii_bus->name = "r6040_eth_mii";
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
dev_name(&pdev->dev), card_idx);
- lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ lp->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
if (!lp->mii_bus->irq) {
- dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
err = -ENOMEM;
goto err_out_mdio;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5ac93323a40..b62a32484f6 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1949,7 +1949,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < 3; i++)
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
dev->netdev_ops = &cp_netdev_ops;
netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 5dc16163012..1276ac71353 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -991,7 +991,6 @@ static int rtl8139_init_one(struct pci_dev *pdev,
for (i = 0; i < 3; i++)
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* The Rtl8139-specific entries in the device structure. */
dev->netdev_ops = &rtl8139_netdev_ops;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 998974f7874..8900398ba10 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -83,7 +83,7 @@ static const int multicast_filter_limit = 32;
#define R8169_REGS_SIZE 256
#define R8169_NAPI_WEIGHT 64
#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
-#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
+#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
@@ -723,7 +723,6 @@ struct rtl8169_private {
u16 mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
- u32 dirty_rx;
u32 dirty_tx;
struct rtl8169_stats rx_stats;
struct rtl8169_stats tx_stats;
@@ -4136,7 +4135,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{
- tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+ tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
}
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -5870,7 +5869,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
/* The infamous DAC f*ckup only happens at boot time */
- if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
+ if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
void __iomem *ioaddr = tp->mmio_addr;
netif_info(tp, intr, dev, "disabling PCI DAC\n");
@@ -5985,10 +5984,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
unsigned int count;
cur_rx = tp->cur_rx;
- rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
- rx_left = min(rx_left, budget);
- for (; rx_left > 0; rx_left--, cur_rx++) {
+ for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
unsigned int entry = cur_rx % NUM_RX_DESC;
struct RxDesc *desc = tp->RxDescArray + entry;
u32 status;
@@ -6066,8 +6063,6 @@ release_descriptor:
count = cur_rx - tp->cur_rx;
tp->cur_rx = cur_rx;
- tp->dirty_rx += count;
-
return count;
}
@@ -6891,7 +6886,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 3d705862bd7..33e96176e4d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -891,18 +891,16 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
- GFP_KERNEL);
+ mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
+ sizeof(*mdp->rx_skbuff), GFP_KERNEL);
if (!mdp->rx_skbuff) {
- dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
ret = -ENOMEM;
return ret;
}
- mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
- GFP_KERNEL);
+ mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
+ sizeof(*mdp->tx_skbuff), GFP_KERNEL);
if (!mdp->tx_skbuff) {
- dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
ret = -ENOMEM;
goto skb_ring_free;
}
@@ -1422,7 +1420,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
/* Try connect to PHY */
phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
- 0, mdp->phy_interface);
+ mdp->phy_interface);
if (IS_ERR(phydev)) {
dev_err(&ndev->dev, "phy_connect failed\n");
return PTR_ERR(phydev);
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 72fc57dd084..21683e2b1ff 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -795,7 +795,7 @@ static inline int s6gmac_phy_start(struct net_device *dev)
struct phy_device *p = NULL;
while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
i++;
- p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
PHY_INTERFACE_MODE_RGMII);
if (IS_ERR(p)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/ethernet/seeq/Kconfig b/drivers/net/ethernet/seeq/Kconfig
index 29f18533fdc..a71e1ec068e 100644
--- a/drivers/net/ethernet/seeq/Kconfig
+++ b/drivers/net/ethernet/seeq/Kconfig
@@ -26,17 +26,6 @@ config ARM_ETHER3
If you have an Acorn system with one of these network cards, you
should say Y to this option if you wish to use it with Linux.
-config SEEQ8005
- tristate "SEEQ8005 support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
- ---help---
- This is a driver for the SEEQ 8005 network (Ethernet) card. If this
- is for you, read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here. The module
- will be called seeq8005.
-
config SGISEEQ
tristate "SGI Seeq ethernet controller support"
depends on SGI_HAS_SEEQ
diff --git a/drivers/net/ethernet/seeq/Makefile b/drivers/net/ethernet/seeq/Makefile
index 3e258a580c0..0488e99b831 100644
--- a/drivers/net/ethernet/seeq/Makefile
+++ b/drivers/net/ethernet/seeq/Makefile
@@ -3,5 +3,4 @@
#
obj-$(CONFIG_ARM_ETHER3) += ether3.o
-obj-$(CONFIG_SEEQ8005) += seeq8005.o
obj-$(CONFIG_SGISEEQ) += sgiseeq.o
diff --git a/drivers/net/ethernet/seeq/seeq8005.c b/drivers/net/ethernet/seeq/seeq8005.c
deleted file mode 100644
index d6e50de7118..00000000000
--- a/drivers/net/ethernet/seeq/seeq8005.c
+++ /dev/null
@@ -1,749 +0,0 @@
-/* seeq8005.c: A network driver for linux. */
-/*
- Based on skeleton.c,
- Written 1993-94 by Donald Becker.
- See the skeleton.c file for further copyright information.
-
- This software may be used and distributed according to the terms
- of the GNU General Public License, incorporated herein by reference.
-
- The author may be reached as hamish@zot.apana.org.au
-
- This file is a network device driver for the SEEQ 8005 chipset and
- the Linux operating system.
-
-*/
-
-static const char version[] =
- "seeq8005.c:v1.00 8/07/95 Hamish Coleman (hamish@zot.apana.org.au)\n";
-
-/*
- Sources:
- SEEQ 8005 databook
-
- Version history:
- 1.00 Public release. cosmetic changes (no warnings now)
- 0.68 Turning per- packet,interrupt debug messages off - testing for release.
- 0.67 timing problems/bad buffer reads seem to be fixed now
- 0.63 *!@$ protocol=eth_type_trans -- now packets flow
- 0.56 Send working
- 0.48 Receive working
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "seeq8005.h"
-
-/* First, a few definitions that the brave might change. */
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int seeq8005_portlist[] __initdata =
- { 0x300, 0x320, 0x340, 0x360, 0};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 1
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* Information that need to be kept for each board. */
-struct net_local {
- unsigned short receive_ptr; /* What address in packet memory do we expect a recv_pkt_header? */
- long open_time; /* Useless example local info. */
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00
-#define SA_ADDR1 0x80
-#define SA_ADDR2 0x4b
-
-/* Index to functions, as function prototypes. */
-
-static int seeq8005_probe1(struct net_device *dev, int ioaddr);
-static int seeq8005_open(struct net_device *dev);
-static void seeq8005_timeout(struct net_device *dev);
-static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t seeq8005_interrupt(int irq, void *dev_id);
-static void seeq8005_rx(struct net_device *dev);
-static int seeq8005_close(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-
-/* Example routines you must write ;->. */
-#define tx_done(dev) (inw(SEEQ_STATUS) & SEEQSTAT_TX_ON)
-static void hardware_send_packet(struct net_device *dev, char *buf, int length);
-extern void seeq8005_init(struct net_device *dev, int startp);
-static inline void wait_for_buffer(struct net_device *dev);
-
-
-/* Check for a network adaptor of this type, and return '0' iff one exists.
- If dev->base_addr == 0, probe all likely locations.
- If dev->base_addr == 1, always return failure.
- */
-
-static int io = 0x320;
-static int irq = 10;
-
-struct net_device * __init seeq8005_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- unsigned *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- io = dev->base_addr;
- irq = dev->irq;
- }
-
- if (io > 0x1ff) { /* Check a single specified location. */
- err = seeq8005_probe1(dev, io);
- } else if (io != 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = seeq8005_portlist; *port; port++) {
- if (seeq8005_probe1(dev, *port) == 0)
- break;
- }
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- release_region(dev->base_addr, SEEQ8005_IO_EXTENT);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops seeq8005_netdev_ops = {
- .ndo_open = seeq8005_open,
- .ndo_stop = seeq8005_close,
- .ndo_start_xmit = seeq8005_send_packet,
- .ndo_tx_timeout = seeq8005_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/* This is the real probe routine. Linux has a history of friendly device
- probes on the ISA bus. A good device probes avoids doing writes, and
- verifies that the correct device exists and functions. */
-
-static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
-{
- static unsigned version_printed;
- int i,j;
- unsigned char SA_prom[32];
- int old_cfg1;
- int old_cfg2;
- int old_stat;
- int old_dmaar;
- int old_rear;
- int retval;
-
- if (!request_region(ioaddr, SEEQ8005_IO_EXTENT, "seeq8005"))
- return -ENODEV;
-
- if (net_debug>1)
- printk("seeq8005: probing at 0x%x\n",ioaddr);
-
- old_stat = inw(SEEQ_STATUS); /* read status register */
- if (old_stat == 0xffff) {
- retval = -ENODEV;
- goto out; /* assume that 0xffff == no device */
- }
- if ( (old_stat & 0x1800) != 0x1800 ) { /* assume that unused bits are 1, as my manual says */
- if (net_debug>1) {
- printk("seeq8005: reserved stat bits != 0x1800\n");
- printk(" == 0x%04x\n",old_stat);
- }
- retval = -ENODEV;
- goto out;
- }
-
- old_rear = inw(SEEQ_REA);
- if (old_rear == 0xffff) {
- outw(0,SEEQ_REA);
- if (inw(SEEQ_REA) == 0xffff) { /* assume that 0xffff == no device */
- retval = -ENODEV;
- goto out;
- }
- } else if ((old_rear & 0xff00) != 0xff00) { /* assume that unused bits are 1 */
- if (net_debug>1) {
- printk("seeq8005: unused rear bits != 0xff00\n");
- printk(" == 0x%04x\n",old_rear);
- }
- retval = -ENODEV;
- goto out;
- }
-
- old_cfg2 = inw(SEEQ_CFG2); /* read CFG2 register */
- old_cfg1 = inw(SEEQ_CFG1);
- old_dmaar = inw(SEEQ_DMAAR);
-
- if (net_debug>4) {
- printk("seeq8005: stat = 0x%04x\n",old_stat);
- printk("seeq8005: cfg1 = 0x%04x\n",old_cfg1);
- printk("seeq8005: cfg2 = 0x%04x\n",old_cfg2);
- printk("seeq8005: raer = 0x%04x\n",old_rear);
- printk("seeq8005: dmaar= 0x%04x\n",old_dmaar);
- }
-
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD); /* setup for reading PROM */
- outw( 0, SEEQ_DMAAR); /* set starting PROM address */
- outw( SEEQCFG1_BUFFER_PROM, SEEQ_CFG1); /* set buffer to look at PROM */
-
-
- j=0;
- for(i=0; i <32; i++) {
- j+= SA_prom[i] = inw(SEEQ_BUFFER) & 0xff;
- }
-
-#if 0
- /* untested because I only have the one card */
- if ( (j&0xff) != 0 ) { /* checksum appears to be 8bit = 0 */
- if (net_debug>1) { /* check this before deciding that we have a card */
- printk("seeq8005: prom sum error\n");
- }
- outw( old_stat, SEEQ_STATUS);
- outw( old_dmaar, SEEQ_DMAAR);
- outw( old_cfg1, SEEQ_CFG1);
- retval = -ENODEV;
- goto out;
- }
-#endif
-
- outw( SEEQCFG2_RESET, SEEQ_CFG2); /* reset the card */
- udelay(5);
- outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
-
- if (net_debug) {
- printk("seeq8005: prom sum = 0x%08x\n",j);
- for(j=0; j<32; j+=16) {
- printk("seeq8005: prom %02x: ",j);
- for(i=0;i<16;i++) {
- printk("%02x ",SA_prom[j|i]);
- }
- printk(" ");
- for(i=0;i<16;i++) {
- if ((SA_prom[j|i]>31)&&(SA_prom[j|i]<127)) {
- printk("%c", SA_prom[j|i]);
- } else {
- printk(" ");
- }
- }
- printk("\n");
- }
- }
-
-#if 0
- /*
- * testing the packet buffer memory doesn't work yet
- * but all other buffer accesses do
- * - fixing is not a priority
- */
- if (net_debug>1) { /* test packet buffer memory */
- printk("seeq8005: testing packet buffer ... ");
- outw( SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0 , SEEQ_DMAAR);
- for(i=0;i<32768;i++) {
- outw(0x5a5a, SEEQ_BUFFER);
- }
- j=jiffies+HZ;
- while ( ((inw(SEEQ_STATUS) & SEEQSTAT_FIFO_EMPTY) != SEEQSTAT_FIFO_EMPTY) && time_before(jiffies, j) )
- mb();
- outw( 0 , SEEQ_DMAAR);
- while ( ((inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, j+HZ))
- mb();
- if ( (inw(SEEQ_STATUS) & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (inw(SEEQ_STATUS)& SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- j=0;
- for(i=0;i<32768;i++) {
- if (inw(SEEQ_BUFFER) != 0x5a5a)
- j++;
- }
- if (j) {
- printk("%i\n",j);
- } else {
- printk("ok.\n");
- }
- }
-#endif
-
- if (net_debug && version_printed++ == 0)
- printk(version);
-
- printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
-
- /* Fill in the 'dev' fields. */
- dev->base_addr = ioaddr;
- dev->irq = irq;
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = SA_prom[i+6];
- printk("%pM", dev->dev_addr);
-
- if (dev->irq == 0xff)
- ; /* Do nothing: a user-level program will set it. */
- else if (dev->irq < 2) { /* "Auto-IRQ" */
- unsigned long cookie = probe_irq_on();
-
- outw( SEEQCMD_RX_INT_EN | SEEQCMD_SET_RX_ON | SEEQCMD_SET_RX_OFF, SEEQ_CMD );
-
- dev->irq = probe_irq_off(cookie);
-
- if (net_debug >= 2)
- printk(" autoirq is %d\n", dev->irq);
- } else if (dev->irq == 2)
- /* Fixup for users that don't know that IRQ 2 is really IRQ 9,
- * or don't know which one to set.
- */
- dev->irq = 9;
-
-#if 0
- {
- int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
- if (irqval) {
- printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
- dev->irq, irqval);
- retval = -EAGAIN;
- goto out;
- }
- }
-#endif
- dev->netdev_ops = &seeq8005_netdev_ops;
- dev->watchdog_timeo = HZ/20;
- dev->flags &= ~IFF_MULTICAST;
-
- return 0;
-out:
- release_region(ioaddr, SEEQ8005_IO_EXTENT);
- return retval;
-}
-
-
-/* Open/initialize the board. This is called (in the current kernel)
- sometime after booting when the 'ifconfig' program is run.
-
- This routine should set everything up anew at each open, even
- registers that "should" only need to be set once at boot, so that
- there is non-reboot way to recover if something goes wrong.
- */
-static int seeq8005_open(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
-
- {
- int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
- if (irqval) {
- printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
- dev->irq, irqval);
- return -EAGAIN;
- }
- }
-
- /* Reset the hardware here. Don't forget to set the station address. */
- seeq8005_init(dev, 1);
-
- lp->open_time = jiffies;
-
- netif_start_queue(dev);
- return 0;
-}
-
-static void seeq8005_timeout(struct net_device *dev)
-{
- int ioaddr = dev->base_addr;
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
- /* Try to restart the adaptor. */
- seeq8005_init(dev, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- short length = skb->len;
- unsigned char *buf;
-
- if (length < ETH_ZLEN) {
- if (skb_padto(skb, ETH_ZLEN))
- return NETDEV_TX_OK;
- length = ETH_ZLEN;
- }
- buf = skb->data;
-
- /* Block a timer-based transmit from overlapping */
- netif_stop_queue(dev);
-
- hardware_send_packet(dev, buf, length);
- dev->stats.tx_bytes += length;
- dev_kfree_skb (skb);
- /* You might need to clean up and record Tx statistics here. */
-
- return NETDEV_TX_OK;
-}
-
-/*
- * wait_for_buffer
- *
- * This routine waits for the SEEQ chip to assert that the FIFO is ready
- * by checking for a window interrupt, and then clearing it. This has to
- * occur in the interrupt handler!
- */
-inline void wait_for_buffer(struct net_device * dev)
-{
- int ioaddr = dev->base_addr;
- unsigned long tmp;
- int status;
-
- tmp = jiffies + HZ;
- while ( ( ((status=inw(SEEQ_STATUS)) & SEEQSTAT_WINDOW_INT) != SEEQSTAT_WINDOW_INT) && time_before(jiffies, tmp))
- cpu_relax();
-
- if ( (status & SEEQSTAT_WINDOW_INT) == SEEQSTAT_WINDOW_INT)
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-}
-
-/* The typical workload of the driver:
- Handle the network interface interrupts. */
-static irqreturn_t seeq8005_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *lp;
- int ioaddr, status, boguscount = 0;
- int handled = 0;
-
- ioaddr = dev->base_addr;
- lp = netdev_priv(dev);
-
- status = inw(SEEQ_STATUS);
- do {
- if (net_debug >2) {
- printk("%s: int, status=0x%04x\n",dev->name,status);
- }
-
- if (status & SEEQSTAT_WINDOW_INT) {
- handled = 1;
- outw( SEEQCMD_WINDOW_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- if (net_debug) {
- printk("%s: window int!\n",dev->name);
- }
- }
- if (status & SEEQSTAT_TX_INT) {
- handled = 1;
- outw( SEEQCMD_TX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- dev->stats.tx_packets++;
- netif_wake_queue(dev); /* Inform upper layers. */
- }
- if (status & SEEQSTAT_RX_INT) {
- handled = 1;
- /* Got a packet(s). */
- seeq8005_rx(dev);
- }
- status = inw(SEEQ_STATUS);
- } while ( (++boguscount < 10) && (status & SEEQSTAT_ANY_INT)) ;
-
- if(net_debug>2) {
- printk("%s: eoi\n",dev->name);
- }
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void seeq8005_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int boguscount = 10;
- int pkt_hdr;
- int ioaddr = dev->base_addr;
-
- do {
- int next_packet;
- int pkt_len;
- int i;
- int status;
-
- status = inw(SEEQ_STATUS);
- outw( lp->receive_ptr, SEEQ_DMAAR);
- outw(SEEQCMD_FIFO_READ | SEEQCMD_RX_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- wait_for_buffer(dev);
- next_packet = ntohs(inw(SEEQ_BUFFER));
- pkt_hdr = inw(SEEQ_BUFFER);
-
- if (net_debug>2) {
- printk("%s: 0x%04x recv next=0x%04x, hdr=0x%04x\n",dev->name,lp->receive_ptr,next_packet,pkt_hdr);
- }
-
- if ((next_packet == 0) || ((pkt_hdr & SEEQPKTH_CHAIN)==0)) { /* Read all the frames? */
- return; /* Done for now */
- }
-
- if ((pkt_hdr & SEEQPKTS_DONE)==0)
- break;
-
- if (next_packet < lp->receive_ptr) {
- pkt_len = (next_packet + 0x10000 - ((DEFAULT_TEA+1)<<8)) - lp->receive_ptr - 4;
- } else {
- pkt_len = next_packet - lp->receive_ptr - 4;
- }
-
- if (next_packet < ((DEFAULT_TEA+1)<<8)) { /* is the next_packet address sane? */
- printk("%s: recv packet ring corrupt, resetting board\n",dev->name);
- seeq8005_init(dev,1);
- return;
- }
-
- lp->receive_ptr = next_packet;
-
- if (net_debug>2) {
- printk("%s: recv len=0x%04x\n",dev->name,pkt_len);
- }
-
- if (pkt_hdr & SEEQPKTS_ANY_ERROR) { /* There was an error. */
- dev->stats.rx_errors++;
- if (pkt_hdr & SEEQPKTS_SHORT) dev->stats.rx_frame_errors++;
- if (pkt_hdr & SEEQPKTS_DRIB) dev->stats.rx_frame_errors++;
- if (pkt_hdr & SEEQPKTS_OVERSIZE) dev->stats.rx_over_errors++;
- if (pkt_hdr & SEEQPKTS_CRC_ERR) dev->stats.rx_crc_errors++;
- /* skip over this packet */
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_DMA_INT_ACK | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( (lp->receive_ptr & 0xff00)>>8, SEEQ_REA);
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
- unsigned char *buf;
-
- skb = netdev_alloc_skb(dev, pkt_len);
- if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2); /* align data on 16 byte */
- buf = skb_put(skb,pkt_len);
-
- insw(SEEQ_BUFFER, buf, (pkt_len + 1) >> 1);
-
- if (net_debug>2) {
- char * p = buf;
- printk("%s: recv ",dev->name);
- for(i=0;i<14;i++) {
- printk("%02x ",*(p++)&0xff);
- }
- printk("\n");
- }
-
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- } while ((--boguscount) && (pkt_hdr & SEEQPKTH_CHAIN));
-
- /* If any worth-while packets have been received, netif_rx()
- has done a mark_bh(NET_BH) for us and will work on them
- when we get to the bottom-half routine. */
-}
-
-/* The inverse routine to net_open(). */
-static int seeq8005_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- lp->open_time = 0;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx here. */
- outw( SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
-
- free_irq(dev->irq, dev);
-
- /* Update the statistics here. */
-
- return 0;
-
-}
-
-/* Set or clear the multicast filter for this adaptor.
- num_addrs == -1 Promiscuous mode, receive all packets
- num_addrs == 0 Normal mode, clear multicast list
- num_addrs > 0 Multicast mode, receive normal and MC packets, and do
- best-effort filtering.
- */
-static void set_multicast_list(struct net_device *dev)
-{
-/*
- * I _could_ do up to 6 addresses here, but won't (yet?)
- */
-
-#if 0
- int ioaddr = dev->base_addr;
-/*
- * hmm, not even sure if my matching works _anyway_ - seem to be receiving
- * _everything_ . . .
- */
-
- if (num_addrs) { /* Enable promiscuous mode */
- outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_ALL, SEEQ_CFG1);
- dev->flags|=IFF_PROMISC;
- } else { /* Disable promiscuous mode, use normal mode */
- outw( (inw(SEEQ_CFG1) & ~SEEQCFG1_MATCH_MASK)| SEEQCFG1_MATCH_BROAD, SEEQ_CFG1);
- }
-#endif
-}
-
-void seeq8005_init(struct net_device *dev, int startp)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int i;
-
- outw(SEEQCFG2_RESET, SEEQ_CFG2); /* reset device */
- udelay(5);
-
- outw( SEEQCMD_FIFO_WRITE | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0, SEEQ_DMAAR); /* load start address into both low and high byte */
-/* wait_for_buffer(dev); */ /* I think that you only need a wait for memory buffer */
- outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
-
- for(i=0;i<6;i++) { /* set Station address */
- outb(dev->dev_addr[i], SEEQ_BUFFER);
- udelay(2);
- }
-
- outw( SEEQCFG1_BUFFER_TEA, SEEQ_CFG1); /* set xmit end area pointer to 16K */
- outb( DEFAULT_TEA, SEEQ_BUFFER); /* this gives us 16K of send buffer and 48K of recv buffer */
-
- lp->receive_ptr = (DEFAULT_TEA+1)<<8; /* so we can find our packet_header */
- outw( lp->receive_ptr, SEEQ_RPR); /* Receive Pointer Register is set to recv buffer memory */
-
- outw( 0x00ff, SEEQ_REA); /* Receive Area End */
-
- if (net_debug>4) {
- printk("%s: SA0 = ",dev->name);
-
- outw( SEEQCMD_FIFO_READ | SEEQCMD_SET_ALL_OFF, SEEQ_CMD);
- outw( 0, SEEQ_DMAAR);
- outw( SEEQCFG1_BUFFER_MAC0, SEEQ_CFG1);
-
- for(i=0;i<6;i++) {
- printk("%02x ",inb(SEEQ_BUFFER));
- }
- printk("\n");
- }
-
- outw( SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD | SEEQCFG1_BUFFER_BUFFER, SEEQ_CFG1);
- outw( SEEQCFG2_AUTO_REA | SEEQCFG2_CTRLO, SEEQ_CFG2);
- outw( SEEQCMD_SET_RX_ON | SEEQCMD_TX_INT_EN | SEEQCMD_RX_INT_EN, SEEQ_CMD);
-
- if (net_debug>4) {
- int old_cfg1;
- old_cfg1 = inw(SEEQ_CFG1);
- printk("%s: stat = 0x%04x\n",dev->name,inw(SEEQ_STATUS));
- printk("%s: cfg1 = 0x%04x\n",dev->name,old_cfg1);
- printk("%s: cfg2 = 0x%04x\n",dev->name,inw(SEEQ_CFG2));
- printk("%s: raer = 0x%04x\n",dev->name,inw(SEEQ_REA));
- printk("%s: dmaar= 0x%04x\n",dev->name,inw(SEEQ_DMAAR));
-
- }
-}
-
-
-static void hardware_send_packet(struct net_device * dev, char *buf, int length)
-{
- int ioaddr = dev->base_addr;
- int status = inw(SEEQ_STATUS);
- int transmit_ptr = 0;
- unsigned long tmp;
-
- if (net_debug>4) {
- printk("%s: send 0x%04x\n",dev->name,length);
- }
-
- /* Set FIFO to writemode and set packet-buffer address */
- outw( SEEQCMD_FIFO_WRITE | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
- outw( transmit_ptr, SEEQ_DMAAR);
-
- /* output SEEQ Packet header barfage */
- outw( htons(length + 4), SEEQ_BUFFER);
- outw( SEEQPKTH_XMIT | SEEQPKTH_DATA_FOLLOWS | SEEQPKTH_XMIT_INT_EN, SEEQ_BUFFER );
-
- /* blat the buffer */
- outsw( SEEQ_BUFFER, buf, (length +1) >> 1);
- /* paranoia !! */
- outw( 0, SEEQ_BUFFER);
- outw( 0, SEEQ_BUFFER);
-
- /* set address of start of transmit chain */
- outw( transmit_ptr, SEEQ_TPR);
-
- /* drain FIFO */
- tmp = jiffies;
- while ( (((status=inw(SEEQ_STATUS)) & SEEQSTAT_FIFO_EMPTY) == 0) && time_before(jiffies, tmp + HZ))
- mb();
-
- /* doit ! */
- outw( SEEQCMD_WINDOW_INT_ACK | SEEQCMD_SET_TX_ON | (status & SEEQCMD_INT_MASK), SEEQ_CMD);
-
-}
-
-
-#ifdef MODULE
-
-static struct net_device *dev_seeq;
-MODULE_LICENSE("GPL");
-module_param(io, int, 0);
-module_param(irq, int, 0);
-MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
-MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
-
-int __init init_module(void)
-{
- dev_seeq = seeq8005_probe(-1);
- return PTR_RET(dev_seeq);
-}
-
-void __exit cleanup_module(void)
-{
- unregister_netdev(dev_seeq);
- release_region(dev_seeq->base_addr, SEEQ8005_IO_EXTENT);
- free_netdev(dev_seeq);
-}
-
-#endif /* MODULE */
diff --git a/drivers/net/ethernet/seeq/seeq8005.h b/drivers/net/ethernet/seeq/seeq8005.h
deleted file mode 100644
index 5dfb0098c6c..00000000000
--- a/drivers/net/ethernet/seeq/seeq8005.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * defines, etc for the seeq8005
- */
-
-/*
- * This file is distributed under GPL.
- *
- * This style and layout of this file is also copied
- * from many of the other linux network device drivers.
- */
-
-/* The number of low I/O ports used by the ethercard. */
-#define SEEQ8005_IO_EXTENT 16
-
-#define SEEQ_B (ioaddr)
-
-#define SEEQ_CMD (SEEQ_B) /* Write only */
-#define SEEQ_STATUS (SEEQ_B) /* Read only */
-#define SEEQ_CFG1 (SEEQ_B + 2)
-#define SEEQ_CFG2 (SEEQ_B + 4)
-#define SEEQ_REA (SEEQ_B + 6) /* Receive End Area Register */
-#define SEEQ_RPR (SEEQ_B + 10) /* Receive Pointer Register */
-#define SEEQ_TPR (SEEQ_B + 12) /* Transmit Pointer Register */
-#define SEEQ_DMAAR (SEEQ_B + 14) /* DMA Address Register */
-#define SEEQ_BUFFER (SEEQ_B + 8) /* Buffer Window Register */
-
-#define DEFAULT_TEA (0x3f)
-
-#define SEEQCMD_DMA_INT_EN (0x0001) /* DMA Interrupt Enable */
-#define SEEQCMD_RX_INT_EN (0x0002) /* Receive Interrupt Enable */
-#define SEEQCMD_TX_INT_EN (0x0004) /* Transmit Interrupt Enable */
-#define SEEQCMD_WINDOW_INT_EN (0x0008) /* What the hell is this for?? */
-#define SEEQCMD_INT_MASK (0x000f)
-
-#define SEEQCMD_DMA_INT_ACK (0x0010) /* DMA ack */
-#define SEEQCMD_RX_INT_ACK (0x0020)
-#define SEEQCMD_TX_INT_ACK (0x0040)
-#define SEEQCMD_WINDOW_INT_ACK (0x0080)
-#define SEEQCMD_ACK_ALL (0x00f0)
-
-#define SEEQCMD_SET_DMA_ON (0x0100) /* Enables DMA Request logic */
-#define SEEQCMD_SET_RX_ON (0x0200) /* Enables Packet RX */
-#define SEEQCMD_SET_TX_ON (0x0400) /* Starts TX run */
-#define SEEQCMD_SET_DMA_OFF (0x0800)
-#define SEEQCMD_SET_RX_OFF (0x1000)
-#define SEEQCMD_SET_TX_OFF (0x2000)
-#define SEEQCMD_SET_ALL_OFF (0x3800) /* set all logic off */
-
-#define SEEQCMD_FIFO_READ (0x4000) /* Set FIFO to read mode (read from Buffer) */
-#define SEEQCMD_FIFO_WRITE (0x8000) /* Set FIFO to write mode */
-
-#define SEEQSTAT_DMA_INT_EN (0x0001) /* Status of interrupt enable */
-#define SEEQSTAT_RX_INT_EN (0x0002)
-#define SEEQSTAT_TX_INT_EN (0x0004)
-#define SEEQSTAT_WINDOW_INT_EN (0x0008)
-
-#define SEEQSTAT_DMA_INT (0x0010) /* Interrupt flagged */
-#define SEEQSTAT_RX_INT (0x0020)
-#define SEEQSTAT_TX_INT (0x0040)
-#define SEEQSTAT_WINDOW_INT (0x0080)
-#define SEEQSTAT_ANY_INT (0x00f0)
-
-#define SEEQSTAT_DMA_ON (0x0100) /* DMA logic on */
-#define SEEQSTAT_RX_ON (0x0200) /* Packet RX on */
-#define SEEQSTAT_TX_ON (0x0400) /* TX running */
-
-#define SEEQSTAT_FIFO_FULL (0x2000)
-#define SEEQSTAT_FIFO_EMPTY (0x4000)
-#define SEEQSTAT_FIFO_DIR (0x8000) /* 1=read, 0=write */
-
-#define SEEQCFG1_BUFFER_MASK (0x000f) /* define what maps into the BUFFER register */
-#define SEEQCFG1_BUFFER_MAC0 (0x0000) /* MAC station addresses 0-5 */
-#define SEEQCFG1_BUFFER_MAC1 (0x0001)
-#define SEEQCFG1_BUFFER_MAC2 (0x0002)
-#define SEEQCFG1_BUFFER_MAC3 (0x0003)
-#define SEEQCFG1_BUFFER_MAC4 (0x0004)
-#define SEEQCFG1_BUFFER_MAC5 (0x0005)
-#define SEEQCFG1_BUFFER_PROM (0x0006) /* The Address/CFG PROM */
-#define SEEQCFG1_BUFFER_TEA (0x0007) /* Transmit end area */
-#define SEEQCFG1_BUFFER_BUFFER (0x0008) /* Packet buffer memory */
-#define SEEQCFG1_BUFFER_INT_VEC (0x0009) /* Interrupt Vector */
-
-#define SEEQCFG1_DMA_INTVL_MASK (0x0030)
-#define SEEQCFG1_DMA_CONT (0x0000)
-#define SEEQCFG1_DMA_800ns (0x0010)
-#define SEEQCFG1_DMA_1600ns (0x0020)
-#define SEEQCFG1_DMA_3200ns (0x0030)
-
-#define SEEQCFG1_DMA_LEN_MASK (0x00c0)
-#define SEEQCFG1_DMA_LEN1 (0x0000)
-#define SEEQCFG1_DMA_LEN2 (0x0040)
-#define SEEQCFG1_DMA_LEN4 (0x0080)
-#define SEEQCFG1_DMA_LEN8 (0x00c0)
-
-#define SEEQCFG1_MAC_MASK (0x3f00) /* Dis/enable bits for MAC addresses */
-#define SEEQCFG1_MAC0_EN (0x0100)
-#define SEEQCFG1_MAC1_EN (0x0200)
-#define SEEQCFG1_MAC2_EN (0x0400)
-#define SEEQCFG1_MAC3_EN (0x0800)
-#define SEEQCFG1_MAC4_EN (0x1000)
-#define SEEQCFG1_MAC5_EN (0x2000)
-
-#define SEEQCFG1_MATCH_MASK (0xc000) /* Packet matching logic cfg bits */
-#define SEEQCFG1_MATCH_SPECIFIC (0x0000) /* only matching MAC addresses */
-#define SEEQCFG1_MATCH_BROAD (0x4000) /* matching and broadcast addresses */
-#define SEEQCFG1_MATCH_MULTI (0x8000) /* matching, broadcast and multicast */
-#define SEEQCFG1_MATCH_ALL (0xc000) /* Promiscuous mode */
-
-#define SEEQCFG1_DEFAULT (SEEQCFG1_BUFFER_BUFFER | SEEQCFG1_MAC0_EN | SEEQCFG1_MATCH_BROAD)
-
-#define SEEQCFG2_BYTE_SWAP (0x0001) /* 0=Intel byte-order */
-#define SEEQCFG2_AUTO_REA (0x0002) /* if set, Receive End Area will be updated when reading from Buffer */
-
-#define SEEQCFG2_CRC_ERR_EN (0x0008) /* enables receiving of packets with CRC errors */
-#define SEEQCFG2_DRIBBLE_EN (0x0010) /* enables receiving of non-aligned packets */
-#define SEEQCFG2_SHORT_EN (0x0020) /* enables receiving of short packets */
-
-#define SEEQCFG2_SLOTSEL (0x0040) /* 0= standard IEEE802.3, 1= smaller,faster, non-standard */
-#define SEEQCFG2_NO_PREAM (0x0080) /* 1= user supplies Xmit preamble bytes */
-#define SEEQCFG2_ADDR_LEN (0x0100) /* 1= 2byte addresses */
-#define SEEQCFG2_REC_CRC (0x0200) /* 0= received packets will have CRC stripped from them */
-#define SEEQCFG2_XMIT_NO_CRC (0x0400) /* don't xmit CRC with each packet (user supplies it) */
-#define SEEQCFG2_LOOPBACK (0x0800)
-#define SEEQCFG2_CTRLO (0x1000)
-#define SEEQCFG2_RESET (0x8000) /* software Hard-reset bit */
-
-struct seeq_pkt_hdr {
- unsigned short next; /* address of next packet header */
- unsigned char babble_int:1, /* enable int on >1514 byte packet */
- coll_int:1, /* enable int on collision */
- coll_16_int:1, /* enable int on >15 collision */
- xmit_int:1, /* enable int on success (or xmit with <15 collision) */
- unused:1,
- data_follows:1, /* if not set, process this as a header and pointer only */
- chain_cont:1, /* if set, more headers in chain only cmd bit valid in recv header */
- xmit_recv:1; /* if set, a xmit packet, else a receive packet.*/
- unsigned char status;
-};
-
-#define SEEQPKTH_BAB_INT_EN (0x01) /* xmit only */
-#define SEEQPKTH_COL_INT_EN (0x02) /* xmit only */
-#define SEEQPKTH_COL16_INT_EN (0x04) /* xmit only */
-#define SEEQPKTH_XMIT_INT_EN (0x08) /* xmit only */
-#define SEEQPKTH_DATA_FOLLOWS (0x20) /* supposedly in xmit only */
-#define SEEQPKTH_CHAIN (0x40) /* more headers follow */
-#define SEEQPKTH_XMIT (0x80)
-
-#define SEEQPKTS_BABBLE (0x0100) /* xmit only */
-#define SEEQPKTS_OVERSIZE (0x0100) /* recv only */
-#define SEEQPKTS_COLLISION (0x0200) /* xmit only */
-#define SEEQPKTS_CRC_ERR (0x0200) /* recv only */
-#define SEEQPKTS_COLL16 (0x0400) /* xmit only */
-#define SEEQPKTS_DRIB (0x0400) /* recv only */
-#define SEEQPKTS_SHORT (0x0800) /* recv only */
-#define SEEQPKTS_DONE (0x8000)
-#define SEEQPKTS_ANY_ERROR (0x0f00)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 0767043f44a..3f93624fc27 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1439,7 +1439,7 @@ static int efx_phc_settime(struct ptp_clock_info *ptp,
delta = timespec_sub(*e_ts, time_now);
- efx_phc_adjtime(ptp, timespec_to_ns(&delta));
+ rc = efx_phc_adjtime(ptp, timespec_to_ns(&delta));
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index dc171b4961e..7ed08c32a9c 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -1565,9 +1565,9 @@ static void ioc3_get_drvinfo (struct net_device *dev,
{
struct ioc3_private *ip = netdev_priv(dev);
- strcpy (info->driver, IOC3_NAME);
- strcpy (info->version, IOC3_VERSION);
- strcpy (info->bus_info, pci_name(ip->pdev));
+ strlcpy(info->driver, IOC3_NAME, sizeof(info->driver));
+ strlcpy(info->version, IOC3_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(ip->pdev), sizeof(info->bus_info));
}
static int ioc3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index b2315324cc6..28f7268f1b8 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1458,12 +1458,12 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mac0 = ioread32(port_base + MAC0);
mac1 = ioread32(port_base + MAC0 + 4);
- dev->dev_addr[0] = dev->perm_addr[0] = mac0 >> 24;
- dev->dev_addr[1] = dev->perm_addr[1] = mac0 >> 16;
- dev->dev_addr[2] = dev->perm_addr[2] = mac0 >> 8;
- dev->dev_addr[3] = dev->perm_addr[3] = mac0;
- dev->dev_addr[4] = dev->perm_addr[4] = mac1 >> 8;
- dev->dev_addr[5] = dev->perm_addr[5] = mac1;
+ dev->dev_addr[0] = mac0 >> 24;
+ dev->dev_addr[1] = mac0 >> 16;
+ dev->dev_addr[2] = mac0 >> 8;
+ dev->dev_addr[3] = mac0;
+ dev->dev_addr[4] = mac1 >> 8;
+ dev->dev_addr[5] = mac1;
err = register_netdev(dev);
if (err < 0)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5bffd9749a5..efca14eaefa 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -247,8 +247,7 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
- * MAC address is read from read_eeprom() into @net_dev->dev_addr and
- * @net_dev->perm_addr.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr.
*/
static int sis900_get_mac_addr(struct pci_dev *pci_dev,
@@ -271,9 +270,6 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
return 1;
}
@@ -284,8 +280,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
- * MAC address is read into @net_dev->dev_addr and
- * @net_dev->perm_addr.
+ * MAC address is read into @net_dev->dev_addr.
*/
static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
@@ -311,9 +306,6 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -328,7 +320,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
- * @net_dev->dev_addr and @net_dev->perm_addr.
+ * @net_dev->dev_addr.
*/
static int sis635_get_mac_addr(struct pci_dev *pci_dev,
@@ -353,9 +345,6 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
}
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
/* enable packet filtering */
sw32(rfcr, rfcrSave | RFEN);
@@ -375,7 +364,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
- * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
+ * MAC address is read into @net_dev->dev_addr.
*/
static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
@@ -395,9 +384,6 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 3; i++)
mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
- /* Store MAC Address in perm_addr */
- memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
-
rc = 1;
break;
}
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 59a6f88da86..9dd842dbb85 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1522,9 +1522,10 @@ smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void
smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strncpy(info->driver, CARDNAME, sizeof(info->driver));
- strncpy(info->version, version, sizeof(info->version));
- strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, version, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
}
static int smc911x_ethtool_nwayreset(struct net_device *dev)
@@ -2035,7 +2036,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
struct resource *res;
struct smc911x_local *lp;
- unsigned int *addr;
+ void __iomem *addr;
int ret;
DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index a670d23d934..591650a8de3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1597,9 +1597,10 @@ smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
static void
smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strncpy(info->driver, CARDNAME, sizeof(info->driver));
- strncpy(info->version, version, sizeof(info->version));
- strncpy(info->bus_info, dev_name(dev->dev.parent), sizeof(info->bus_info));
+ strlcpy(info->driver, CARDNAME, sizeof(info->driver));
+ strlcpy(info->version, version, sizeof(info->version));
+ strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ sizeof(info->bus_info));
}
static int smc_ethtool_nwayreset(struct net_device *dev)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index e112877d15d..da5cc9a3b34 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -997,9 +997,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X",
phydev->addr, phydev->phy_id);
- ret = phy_connect_direct(dev, phydev,
- &smsc911x_phy_adjust_link, 0,
- pdata->config.phy_interface);
+ ret = phy_connect_direct(dev, phydev, &smsc911x_phy_adjust_link,
+ pdata->config.phy_interface);
if (ret) {
netdev_err(dev, "Could not attach to PHY\n");
@@ -1831,7 +1830,6 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
spin_lock_irq(&pdata->mac_lock);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3c586585e1b..d457fa2d750 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1179,7 +1179,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
phydev->phy_id);
phydev = phy_connect(dev, dev_name(&phydev->dev),
- smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ smsc9420_phy_adjust_link, PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -1250,12 +1250,11 @@ static int smsc9420_alloc_tx_ring(struct smsc9420_pdata *pd)
BUG_ON(!pd->tx_ring);
- pd->tx_buffers = kmalloc((sizeof(struct smsc9420_ring_info) *
- TX_RING_SIZE), GFP_KERNEL);
- if (!pd->tx_buffers) {
- smsc_warn(IFUP, "Failed to allocated tx_buffers");
+ pd->tx_buffers = kmalloc_array(TX_RING_SIZE,
+ sizeof(struct smsc9420_ring_info),
+ GFP_KERNEL);
+ if (!pd->tx_buffers)
return -ENOMEM;
- }
/* Initialize the TX Ring */
for (i = 0; i < TX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1372ce210b5..d1ac39c1b05 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -210,8 +210,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
- strcpy(info->version, DRV_MODULE_VERSION);
- info->fw_version[0] = '\0';
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int stmmac_ethtool_getsettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b75f4b28689..39c6c552463 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -428,8 +428,7 @@ static int stmmac_init_phy(struct net_device *dev)
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
- interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -531,17 +530,18 @@ static void init_dma_desc_rings(struct net_device *dev)
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
- priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL);
- priv->rx_skbuff =
- kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL);
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
priv->dma_rx =
(struct dma_desc *)dma_alloc_coherent(priv->device,
rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
- priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize,
- GFP_KERNEL);
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
priv->dma_tx =
(struct dma_desc *)dma_alloc_coherent(priv->device,
txsize *
@@ -2254,7 +2254,7 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "pause:", 6)) {
if (kstrtoint(opt + 6, 0, &pause))
goto err;
- } else if (!strncmp(opt, "eee_timer:", 6)) {
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 064eaac9616..19b3a2567a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -102,6 +102,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed", __func__);
+ ret = -ENODEV;
goto err_out;
}
priv->dev->irq = pdev->irq;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a0bdf077946..e4c1c88e4c2 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -4342,7 +4342,7 @@ static int niu_alloc_rx_ring_info(struct niu *np,
{
BUILD_BUG_ON(sizeof(struct rxdma_mailbox) != 64);
- rp->rxhash = kzalloc(MAX_RBR_RING_SIZE * sizeof(struct page *),
+ rp->rxhash = kcalloc(MAX_RBR_RING_SIZE, sizeof(struct page *),
GFP_KERNEL);
if (!rp->rxhash)
return -ENOMEM;
@@ -8366,14 +8366,12 @@ static void niu_pci_vpd_validate(struct niu *np)
return;
}
- memcpy(dev->perm_addr, vpd->local_mac, ETH_ALEN);
+ memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
- val8 = dev->perm_addr[5];
- dev->perm_addr[5] += np->port;
- if (dev->perm_addr[5] < val8)
- dev->perm_addr[4]++;
-
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ val8 = dev->dev_addr[5];
+ dev->dev_addr[5] += np->port;
+ if (dev->dev_addr[5] < val8)
+ dev->dev_addr[4]++;
}
static int niu_pci_probe_sprom(struct niu *np)
@@ -8470,29 +8468,27 @@ static int niu_pci_probe_sprom(struct niu *np)
val = nr64(ESPC_MAC_ADDR0);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
- dev->perm_addr[0] = (val >> 0) & 0xff;
- dev->perm_addr[1] = (val >> 8) & 0xff;
- dev->perm_addr[2] = (val >> 16) & 0xff;
- dev->perm_addr[3] = (val >> 24) & 0xff;
+ dev->dev_addr[0] = (val >> 0) & 0xff;
+ dev->dev_addr[1] = (val >> 8) & 0xff;
+ dev->dev_addr[2] = (val >> 16) & 0xff;
+ dev->dev_addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
- dev->perm_addr[4] = (val >> 0) & 0xff;
- dev->perm_addr[5] = (val >> 8) & 0xff;
+ dev->dev_addr[4] = (val >> 0) & 0xff;
+ dev->dev_addr[5] = (val >> 8) & 0xff;
- if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
- dev->perm_addr);
+ dev->dev_addr);
return -EINVAL;
}
- val8 = dev->perm_addr[5];
- dev->perm_addr[5] += np->port;
- if (dev->perm_addr[5] < val8)
- dev->perm_addr[4]++;
-
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
+ val8 = dev->dev_addr[5];
+ dev->dev_addr[5] += np->port;
+ if (dev->dev_addr[5] < val8)
+ dev->dev_addr[4]++;
val = nr64(ESPC_MOD_STR_LEN);
netif_printk(np, probe, KERN_DEBUG, np->dev,
@@ -9267,16 +9263,14 @@ static int niu_get_of_props(struct niu *np)
netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
dp->full_name, prop_len);
}
- memcpy(dev->perm_addr, mac_addr, dev->addr_len);
- if (!is_valid_ether_addr(&dev->perm_addr[0])) {
+ memcpy(dev->dev_addr, mac_addr, dev->addr_len);
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
netdev_err(dev, "%s: OF MAC address is invalid\n",
dp->full_name);
- netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr);
return -EINVAL;
}
- memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
-
model = of_get_property(dp, "model", &prop_len);
if (model)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index be82f6d13c5..5fafca06530 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1042,8 +1042,8 @@ static void bigmac_set_multicast(struct net_device *dev)
/* Ethtool support... */
static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "sunbmac");
- strcpy(info->version, "2.0");
+ strlcpy(info->driver, "sunbmac", sizeof(info->driver));
+ strlcpy(info->version, "2.0", sizeof(info->version));
}
static u32 bigmac_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 1dcee691584..49bf3e2eb65 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -685,13 +685,14 @@ static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
- strcpy(info->driver, "sunqe");
- strcpy(info->version, "3.0");
+ strlcpy(info->driver, "sunqe", sizeof(info->driver));
+ strlcpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
- sprintf(info->bus_info, "SBUS:%d", regs->which_io);
+ snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
+ regs->which_io);
}
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index e1b89553082..289b4eefb42 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -882,8 +882,8 @@ static int vnet_set_mac_addr(struct net_device *dev, void *p)
static void vnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static u32 vnet_get_msglevel(struct net_device *dev)
@@ -1032,8 +1032,6 @@ static struct vnet *vnet_new(const u64 *local_mac)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
-
vp = netdev_priv(dev);
spin_lock_init(&vp->lock);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 1e4d743ff03..e15cc71b826 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2179,10 +2179,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct bdx_priv *priv = netdev_priv(netdev);
- strlcat(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
- strlcat(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
- strlcat(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcat(drvinfo->bus_info, pci_name(priv->pdev),
+ strlcpy(drvinfo->driver, BDX_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, BDX_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(priv->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index d9625f62b02..31bbbca341a 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -904,10 +904,9 @@ static int cpmac_set_ringparam(struct net_device *dev,
static void cpmac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "cpmac");
- strcpy(info->version, CPMAC_VERSION);
- info->fw_version[0] = '\0';
- sprintf(info->bus_info, "%s", "cpmac");
+ strlcpy(info->driver, "cpmac", sizeof(info->driver));
+ strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
info->regdump_len = 0;
}
@@ -1173,8 +1172,8 @@ static int cpmac_probe(struct platform_device *pdev)
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) {
if (netif_msg_drv(priv))
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 40aff684aa2..7e93df6585e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -32,6 +32,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/if_vlan.h>
#include <linux/platform_data/cpsw.h>
@@ -118,6 +119,13 @@ do { \
#define TX_PRIORITY_MAPPING 0x33221100
#define CPDMA_TX_PRIORITY_MAP 0x76543210
+#define CPSW_VLAN_AWARE BIT(1)
+#define CPSW_ALE_VLAN_AWARE 1
+
+#define CPSW_FIFO_NORMAL_MODE (0 << 15)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+
#define cpsw_enable_irq(priv) \
do { \
u32 i; \
@@ -250,7 +258,7 @@ struct cpsw_ss_regs {
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
- u32 flow_thresh;
+ u32 tx_in_ctl;
u32 port_vlan;
u32 tx_pri_map;
u32 cpdma_tx_pri_map;
@@ -277,6 +285,9 @@ struct cpsw_slave {
u32 mac_control;
struct cpsw_slave_data *data;
struct phy_device *phy;
+ struct net_device *ndev;
+ u32 port_vlan;
+ u32 open_stat;
};
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -315,17 +326,65 @@ struct cpsw_priv {
/* snapshot of IRQ numbers */
u32 irqs_table[4];
u32 num_irqs;
- struct cpts cpts;
+ struct cpts *cpts;
+ u32 emac_port;
};
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
-#define for_each_slave(priv, func, arg...) \
- do { \
- int idx; \
- for (idx = 0; idx < (priv)->data.slaves; idx++) \
- (func)((priv)->slaves + idx, ##arg); \
+#define for_each_slave(priv, func, arg...) \
+ do { \
+ int idx; \
+ if (priv->data.dual_emac) \
+ (func)((priv)->slaves + priv->emac_port, ##arg);\
+ else \
+ for (idx = 0; idx < (priv)->data.slaves; idx++) \
+ (func)((priv)->slaves + idx, ##arg); \
+ } while (0)
+#define cpsw_get_slave_ndev(priv, __slave_no__) \
+ (priv->slaves[__slave_no__].ndev)
+#define cpsw_get_slave_priv(priv, __slave_no__) \
+ ((priv->slaves[__slave_no__].ndev) ? \
+ netdev_priv(priv->slaves[__slave_no__].ndev) : NULL) \
+
+#define cpsw_dual_emac_src_port_detect(status, priv, ndev, skb) \
+ do { \
+ if (!priv->data.dual_emac) \
+ break; \
+ if (CPDMA_RX_SOURCE_PORT(status) == 1) { \
+ ndev = cpsw_get_slave_ndev(priv, 0); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } else if (CPDMA_RX_SOURCE_PORT(status) == 2) { \
+ ndev = cpsw_get_slave_ndev(priv, 1); \
+ priv = netdev_priv(ndev); \
+ skb->dev = ndev; \
+ } \
+ } while (0)
+#define cpsw_add_mcast(priv, addr) \
+ do { \
+ if (priv->data.dual_emac) { \
+ struct cpsw_slave *slave = priv->slaves + \
+ priv->emac_port; \
+ int slave_port = cpsw_get_slave_port(priv, \
+ slave->slave_num); \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ 1 << slave_port | 1 << priv->host_port, \
+ ALE_VLAN, slave->port_vlan, 0); \
+ } else { \
+ cpsw_ale_add_mcast(priv->ale, addr, \
+ ALE_ALL_PORTS << priv->host_port, \
+ 0, 0, 0); \
+ } \
} while (0)
+static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+ else
+ return slave_num;
+}
+
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -344,8 +403,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* program multicast address list into ALE register */
netdev_for_each_mc_addr(ha, ndev) {
- cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
- ALE_ALL_PORTS << priv->host_port, 0, 0);
+ cpsw_add_mcast(priv, (u8 *)ha->addr);
}
}
}
@@ -374,9 +432,12 @@ void cpsw_tx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
struct cpsw_priv *priv = netdev_priv(ndev);
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
- cpts_tx_timestamp(&priv->cpts, skb);
+ cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
@@ -389,6 +450,8 @@ void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_priv *priv = netdev_priv(ndev);
int ret = 0;
+ cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
+
/* free and bail if we are shutting down */
if (unlikely(!netif_running(ndev)) ||
unlikely(!netif_carrier_ok(ndev))) {
@@ -397,7 +460,7 @@ void cpsw_rx_handler(void *token, int len, int status)
}
if (likely(status >= 0)) {
skb_put(skb, len);
- cpts_rx_timestamp(&priv->cpts, skb);
+ cpts_rx_timestamp(priv->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
priv->stats.rx_bytes += len;
@@ -417,7 +480,7 @@ void cpsw_rx_handler(void *token, int len, int status)
return;
ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
}
WARN_ON(ret < 0);
}
@@ -430,37 +493,38 @@ static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
cpsw_intr_disable(priv);
cpsw_disable_irq(priv);
napi_schedule(&priv->napi);
+ } else {
+ priv = cpsw_get_slave_priv(priv, 1);
+ if (likely(priv) && likely(netif_running(priv->ndev))) {
+ cpsw_intr_disable(priv);
+ cpsw_disable_irq(priv);
+ napi_schedule(&priv->napi);
+ }
}
return IRQ_HANDLED;
}
-static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
-{
- if (priv->host_port == 0)
- return slave_num + 1;
- else
- return slave_num;
-}
-
static int cpsw_poll(struct napi_struct *napi, int budget)
{
struct cpsw_priv *priv = napi_to_priv(napi);
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- num_rx = cpdma_chan_process(priv->rxch, budget);
-
- if (num_rx || num_tx)
- cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
- num_rx, num_tx);
+ if (num_tx)
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
cpsw_enable_irq(priv);
}
+ if (num_rx || num_tx)
+ cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
+ num_rx, num_tx);
+
return num_rx;
}
@@ -559,6 +623,54 @@ static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
leader + strlen(name), val);
}
+static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
+{
+ u32 i;
+ u32 usage_count = 0;
+
+ if (!priv->data.dual_emac)
+ return 0;
+
+ for (i = 0; i < priv->data.slaves; i++)
+ if (priv->slaves[i].open_stat)
+ usage_count++;
+
+ return usage_count;
+}
+
+static inline int cpsw_tx_packet_submit(struct net_device *ndev,
+ struct cpsw_priv *priv, struct sk_buff *skb)
+{
+ if (!priv->data.dual_emac)
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 0, GFP_KERNEL);
+
+ if (ndev == cpsw_get_slave_ndev(priv, 0))
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 1, GFP_KERNEL);
+ else
+ return cpdma_chan_submit(priv->txch, skb, skb->data,
+ skb->len, 2, GFP_KERNEL);
+}
+
+static inline void cpsw_add_dual_emac_def_ale_entries(
+ struct cpsw_priv *priv, struct cpsw_slave *slave,
+ u32 slave_port)
+{
+ u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+
+ if (priv->version == CPSW_VERSION_1)
+ slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
+ else
+ slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
+ cpsw_ale_add_vlan(priv->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, 0);
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, slave->port_vlan);
+}
+
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
char name[32];
@@ -588,11 +700,14 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_port = cpsw_get_slave_port(priv, slave->slave_num);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << slave_port, 0, ALE_MCAST_FWD_2);
+ if (priv->data.dual_emac)
+ cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
+ else
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
- &cpsw_adjust_link, 0, slave->data->phy_if);
+ &cpsw_adjust_link, slave->data->phy_if);
if (IS_ERR(slave->phy)) {
dev_err(priv->dev, "phy %s not found on slave %d\n",
slave->data->phy_id, slave->slave_num);
@@ -604,14 +719,44 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
}
+static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
+{
+ const int vlan = priv->data.default_vlan;
+ const int port = priv->host_port;
+ u32 reg;
+ int i;
+
+ reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+
+ writel(vlan, &priv->host_port_regs->port_vlan);
+
+ for (i = 0; i < 2; i++)
+ slave_write(priv->slaves + i, vlan, reg);
+
+ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
+ ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
+ (ALE_PORT_1 | ALE_PORT_2) << port);
+}
+
static void cpsw_init_host_port(struct cpsw_priv *priv)
{
+ u32 control_reg;
+ u32 fifo_mode;
+
/* soft reset the controller and initialize ale */
soft_reset("cpsw", &priv->regs->soft_reset);
cpsw_ale_start(priv->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
+ cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&priv->regs->control);
+ control_reg |= CPSW_VLAN_AWARE;
+ writel(control_reg, &priv->regs->control);
+ fifo_mode = (priv->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+ CPSW_FIFO_NORMAL_MODE;
+ writel(fifo_mode, &priv->host_port_regs->tx_in_ctl);
/* setup host port priority mapping */
__raw_writel(CPDMA_TX_PRIORITY_MAP,
@@ -621,9 +766,12 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
cpsw_ale_control_set(priv->ale, priv->host_port,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
- cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << priv->host_port, 0, ALE_MCAST_FWD_2);
+ if (!priv->data.dual_emac) {
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ 0, 0);
+ cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+ }
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -632,7 +780,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
- cpsw_intr_disable(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_intr_disable(priv);
netif_carrier_off(ndev);
pm_runtime_get_sync(&priv->pdev->dev);
@@ -644,43 +793,55 @@ static int cpsw_ndo_open(struct net_device *ndev)
CPSW_RTL_VERSION(reg));
/* initialize host and slave ports */
- cpsw_init_host_port(priv);
+ if (!cpsw_common_res_usage_state(priv))
+ cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
- /* setup tx dma to fixed prio and zero offset */
- cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
- cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
+ /* Add default VLAN */
+ if (!priv->data.dual_emac)
+ cpsw_add_default_vlan(priv);
- /* disable priority elevation and enable statistics on all ports */
- __raw_writel(0, &priv->regs->ptype);
+ if (!cpsw_common_res_usage_state(priv)) {
+ /* setup tx dma to fixed prio and zero offset */
+ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
+ cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
- /* enable statistics collection only on the host port */
- __raw_writel(0x7, &priv->regs->stat_port_en);
+ /* disable priority elevation */
+ __raw_writel(0, &priv->regs->ptype);
- if (WARN_ON(!priv->data.rx_descs))
- priv->data.rx_descs = 128;
+ /* enable statistics collection only on all ports */
+ __raw_writel(0x7, &priv->regs->stat_port_en);
- for (i = 0; i < priv->data.rx_descs; i++) {
- struct sk_buff *skb;
+ if (WARN_ON(!priv->data.rx_descs))
+ priv->data.rx_descs = 128;
- ret = -ENOMEM;
- skb = netdev_alloc_skb_ip_align(priv->ndev,
- priv->rx_packet_max);
- if (!skb)
- break;
- ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
- if (WARN_ON(ret < 0))
- break;
+ for (i = 0; i < priv->data.rx_descs; i++) {
+ struct sk_buff *skb;
+
+ ret = -ENOMEM;
+ skb = netdev_alloc_skb_ip_align(priv->ndev,
+ priv->rx_packet_max);
+ if (!skb)
+ break;
+ ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
+ skb_tailroom(skb), 0, GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+ /* continue even if we didn't manage to submit all
+ * receive descs
+ */
+ cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
}
- /* continue even if we didn't manage to submit all receive descs */
- cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
napi_enable(&priv->napi);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = true;
return 0;
}
@@ -701,12 +862,17 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_stop_queue(priv->ndev);
napi_disable(&priv->napi);
netif_carrier_off(priv->ndev);
- cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
- cpdma_ctlr_stop(priv->dma);
- cpsw_ale_stop(priv->ale);
+
+ if (cpsw_common_res_usage_state(priv) <= 1) {
+ cpsw_intr_disable(priv);
+ cpdma_ctlr_int_ctrl(priv->dma, false);
+ cpdma_ctlr_stop(priv->dma);
+ cpsw_ale_stop(priv->ale);
+ }
for_each_slave(priv, cpsw_slave_stop, priv);
pm_runtime_put_sync(&priv->pdev->dev);
+ if (priv->data.dual_emac)
+ priv->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -724,18 +890,24 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->cpts->tx_enable)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_tx_timestamp(skb);
- ret = cpdma_chan_submit(priv->txch, skb, skb->data,
- skb->len, GFP_KERNEL);
+ ret = cpsw_tx_packet_submit(ndev, priv, skb);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
}
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
fail:
priv->stats.tx_dropped++;
@@ -773,7 +945,7 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
u32 ts_en, seq_id;
- if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+ if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -781,10 +953,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ts_en |= CPSW_V1_TS_TX_EN;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -793,16 +965,21 @@ static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ struct cpsw_slave *slave;
u32 ctrl, mtype;
+ if (priv->data.dual_emac)
+ slave = &priv->slaves[priv->emac_port];
+ else
+ slave = &priv->slaves[priv->data.cpts_active_slave];
+
ctrl = slave_read(slave, CPSW2_CONTROL);
ctrl &= ~CTRL_ALL_TS_MASK;
- if (priv->cpts.tx_enable)
+ if (priv->cpts->tx_enable)
ctrl |= CTRL_TX_TS_BITS;
- if (priv->cpts.rx_enable)
+ if (priv->cpts->rx_enable)
ctrl |= CTRL_RX_TS_BITS;
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -815,7 +992,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct cpts *cpts = &priv->cpts;
+ struct cpts *cpts = priv->cpts;
struct hwtstamp_config cfg;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -901,7 +1078,9 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
@@ -920,10 +1099,79 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+ cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
+
}
#endif
+static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ int ret;
+
+ ret = cpsw_ale_add_vlan(priv->ale, vid,
+ ALE_ALL_PORTS << priv->host_port,
+ 0, ALE_ALL_PORTS << priv->host_port,
+ (ALE_PORT_1 | ALE_PORT_2) << priv->host_port);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
+ ALE_ALL_PORTS << priv->host_port,
+ ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(priv->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ return cpsw_add_vlan_ale_entry(priv, vid);
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ unsigned short vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (vid == priv->data.default_vlan)
+ return 0;
+
+ dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
+ priv->host_port, ALE_VLAN, vid);
+ if (ret != 0)
+ return ret;
+
+ return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+}
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -938,15 +1186,18 @@ static const struct net_device_ops cpsw_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cpsw_ndo_poll_controller,
#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
};
static void cpsw_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
struct cpsw_priv *priv = netdev_priv(ndev);
- strcpy(info->driver, "TI CPSW Driver v1.0");
- strcpy(info->version, "1.0");
- strcpy(info->bus_info, priv->pdev->name);
+
+ strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver));
+ strlcpy(info->version, "1.0", sizeof(info->version));
+ strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
}
static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -974,7 +1225,7 @@ static int cpsw_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = priv->cpts.phc_index;
+ info->phc_index = priv->cpts->phc_index;
info->tx_types =
(1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
@@ -1011,6 +1262,7 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->data = data;
slave->regs = regs + slave_reg_ofs;
slave->sliver = regs + sliver_reg_ofs;
+ slave->port_vlan = data->dual_emac_res_vlan;
}
static int cpsw_probe_dt(struct cpsw_platform_data *data,
@@ -1051,12 +1303,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->cpts_clock_shift = prop;
- data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
- data->slaves, GFP_KERNEL);
- if (!data->slave_data) {
- pr_err("Could not allocate slave memory.\n");
+ data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data)
return -EINVAL;
- }
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
pr_err("Missing cpdma_channels property in the DT.\n");
@@ -1093,6 +1343,9 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->mac_control = prop;
+ if (!of_property_read_u32(node, "dual_emac", &prop))
+ data->dual_emac = prop;
+
/*
* Populate all the child nodes here...
*/
@@ -1126,6 +1379,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (mac_addr)
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
+ if (data->dual_emac) {
+ if (of_property_read_u32(node, "dual_emac_res_vlan",
+ &prop)) {
+ pr_err("Missing dual_emac_res_vlan in DT.\n");
+ slave_data->dual_emac_res_vlan = i+1;
+ pr_err("Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
i++;
}
@@ -1136,6 +1401,79 @@ error_ret:
return ret;
}
+static int cpsw_probe_dual_emac(struct platform_device *pdev,
+ struct cpsw_priv *priv)
+{
+ struct cpsw_platform_data *data = &priv->data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv_sl2;
+ int ret = 0, i;
+
+ ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+ if (!ndev) {
+ pr_err("cpsw: error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv_sl2 = netdev_priv(ndev);
+ spin_lock_init(&priv_sl2->lock);
+ priv_sl2->data = *data;
+ priv_sl2->pdev = pdev;
+ priv_sl2->ndev = ndev;
+ priv_sl2->dev = &ndev->dev;
+ priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv_sl2->rx_packet_max = max(rx_packet_max, 128);
+
+ if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
+ memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
+ ETH_ALEN);
+ pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ } else {
+ random_ether_addr(priv_sl2->mac_addr);
+ pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ }
+ memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+
+ priv_sl2->slaves = priv->slaves;
+ priv_sl2->clk = priv->clk;
+
+ priv_sl2->cpsw_res = priv->cpsw_res;
+ priv_sl2->regs = priv->regs;
+ priv_sl2->host_port = priv->host_port;
+ priv_sl2->host_port_regs = priv->host_port_regs;
+ priv_sl2->wr_regs = priv->wr_regs;
+ priv_sl2->dma = priv->dma;
+ priv_sl2->txch = priv->txch;
+ priv_sl2->rxch = priv->rxch;
+ priv_sl2->ale = priv->ale;
+ priv_sl2->emac_port = 1;
+ priv->slaves[1].ndev = ndev;
+ priv_sl2->cpts = priv->cpts;
+ priv_sl2->version = priv->version;
+
+ for (i = 0; i < priv->num_irqs; i++) {
+ priv_sl2->irqs_table[i] = priv->irqs_table[i];
+ priv_sl2->num_irqs = priv->num_irqs;
+ }
+
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("cpsw: error registering net device\n");
+ free_netdev(ndev);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
static int cpsw_probe(struct platform_device *pdev)
{
struct cpsw_platform_data *data = pdev->dev.platform_data;
@@ -1162,6 +1500,11 @@ static int cpsw_probe(struct platform_device *pdev)
priv->dev = &ndev->dev;
priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
priv->rx_packet_max = max(rx_packet_max, 128);
+ priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
+ if (!ndev) {
+ pr_err("error allocating cpts\n");
+ goto clean_ndev_ret;
+ }
/*
* This may be required here for child devices.
@@ -1194,6 +1537,9 @@ static int cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
+ priv->slaves[0].ndev = ndev;
+ priv->emac_port = 0;
+
priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "fck is not found\n");
@@ -1248,7 +1594,7 @@ static int cpsw_probe(struct platform_device *pdev)
switch (priv->version) {
case CPSW_VERSION_1:
priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1259,7 +1605,7 @@ static int cpsw_probe(struct platform_device *pdev)
break;
case CPSW_VERSION_2:
priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts.reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
@@ -1346,7 +1692,7 @@ static int cpsw_probe(struct platform_device *pdev)
k++;
}
- ndev->flags |= IFF_ALLMULTI; /* see cpsw_ndo_change_rx_flags() */
+ ndev->features |= NETIF_F_HW_VLAN_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
@@ -1361,13 +1707,21 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_irq_ret;
}
- if (cpts_register(&pdev->dev, &priv->cpts,
+ if (cpts_register(&pdev->dev, priv->cpts,
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
priv->cpsw_res->start, ndev->irq);
+ if (priv->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(pdev, priv);
+ if (ret) {
+ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+ goto clean_irq_ret;
+ }
+ }
+
return 0;
clean_irq_ret:
@@ -1406,7 +1760,7 @@ static int cpsw_remove(struct platform_device *pdev)
pr_info("removing device");
platform_set_drvdata(pdev, NULL);
- cpts_unregister(&priv->cpts);
+ cpts_unregister(priv->cpts);
free_irq(ndev->irq, priv);
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0e9ccc2cf91..7fa60d6092e 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -148,7 +148,7 @@ static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
return idx;
}
-static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
+int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int type, idx;
@@ -160,6 +160,8 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
cpsw_ale_get_addr(ale_entry, entry_addr);
if (memcmp(entry_addr, addr, 6) == 0)
return idx;
@@ -167,6 +169,22 @@ static int cpsw_ale_match_addr(struct cpsw_ale *ale, u8 *addr)
return -ENOENT;
}
+int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) == vid)
+ return idx;
+ }
+ return -ENOENT;
+}
+
static int cpsw_ale_match_free(struct cpsw_ale *ale)
{
u32 ale_entry[ALE_ENTRY_WORDS];
@@ -274,19 +292,32 @@ int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
return 0;
}
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
+static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
+ int flags, u16 vid)
+{
+ if (flags & ALE_VLAN) {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+ } else {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ }
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_port_num(ale_entry, port);
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
if (idx < 0)
@@ -298,12 +329,13 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags)
return 0;
}
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -ENOENT;
@@ -313,18 +345,19 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port)
}
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state)
+ int flags, u16 vid, int mcast_state)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx, mask;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
cpsw_ale_set_addr(ale_entry, addr);
- cpsw_ale_set_super(ale_entry, super);
+ cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry);
@@ -342,12 +375,13 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
return 0;
}
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid)
{
u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
int idx;
- idx = cpsw_ale_match_addr(ale, addr);
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -EINVAL;
@@ -362,6 +396,55 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask)
return 0;
}
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+
+ cpsw_ale_set_vlan_untag_force(ale_entry, untag);
+ cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
+ cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+ cpsw_ale_set_vlan_member_list(ale_entry, port);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ if (port_mask)
+ cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
struct ale_control_info {
const char *name;
int offset, port_offset;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 2bd09cbce52..30daa1265f0 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -64,8 +64,14 @@ enum cpsw_ale_port_state {
};
/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
-#define ALE_SECURE 1
-#define ALE_BLOCKED 2
+#define ALE_SECURE BIT(0)
+#define ALE_BLOCKED BIT(1)
+#define ALE_SUPER BIT(2)
+#define ALE_VLAN BIT(3)
+
+#define ALE_PORT_HOST BIT(0)
+#define ALE_PORT_1 BIT(1)
+#define ALE_PORT_2 BIT(2)
#define ALE_MCAST_FWD 0
#define ALE_MCAST_BLOCK_LEARN_FWD 1
@@ -81,11 +87,17 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
-int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
+ int flags, u16 vid);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
- int super, int mcast_state);
-int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask);
+ int flags, u16 vid, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
+ int flags, u16 vid);
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast);
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49956730cd8..68c3418160b 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -60,6 +60,9 @@
#define CPDMA_DESC_EOQ BIT(28)
#define CPDMA_DESC_TD_COMPLETE BIT(27)
#define CPDMA_DESC_PASS_CRC BIT(26)
+#define CPDMA_DESC_TO_PORT_EN BIT(20)
+#define CPDMA_TO_PORT_SHIFT 16
+#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
#define CPDMA_TEARDOWN_VALUE 0xfffffffc
@@ -105,13 +108,13 @@ struct cpdma_ctlr {
};
struct cpdma_chan {
+ struct cpdma_desc __iomem *head, *tail;
+ void __iomem *hdp, *cp, *rxfree;
enum cpdma_state state;
struct cpdma_ctlr *ctlr;
int chan_num;
spinlock_t lock;
- struct cpdma_desc __iomem *head, *tail;
int count;
- void __iomem *hdp, *cp, *rxfree;
u32 mask;
cpdma_handler_fn handler;
enum dma_data_direction dir;
@@ -132,6 +135,14 @@ struct cpdma_chan {
#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+#define cpdma_desc_to_port(chan, mode, directed) \
+ do { \
+ if (!is_rx_chan(chan) && ((directed == 1) || \
+ (directed == 2))) \
+ mode |= (CPDMA_DESC_TO_PORT_EN | \
+ (directed << CPDMA_TO_PORT_SHIFT)); \
+ } while (0)
+
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
@@ -217,17 +228,27 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
}
static struct cpdma_desc __iomem *
-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
{
unsigned long flags;
int index;
+ int desc_start;
+ int desc_end;
struct cpdma_desc __iomem *desc = NULL;
spin_lock_irqsave(&pool->lock, flags);
- index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
- num_desc, 0);
- if (index < pool->num_desc) {
+ if (is_rx) {
+ desc_start = 0;
+ desc_end = pool->num_desc/2;
+ } else {
+ desc_start = pool->num_desc/2;
+ desc_end = pool->num_desc;
+ }
+
+ index = bitmap_find_next_zero_area(pool->bitmap,
+ desc_end, desc_start, num_desc, 0);
+ if (index < desc_end) {
bitmap_set(pool->bitmap, index, num_desc);
desc = pool->iomap + pool->desc_size * index;
pool->used_desc++;
@@ -439,10 +460,8 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
- for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
- if (ctlr->channels[i])
- cpdma_chan_destroy(ctlr->channels[i]);
- }
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
@@ -474,9 +493,9 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
return 0;
}
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
{
- dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
}
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
@@ -652,7 +671,7 @@ static void __cpdma_chan_submit(struct cpdma_chan *chan,
}
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask)
+ int len, int directed, gfp_t gfp_mask)
{
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc __iomem *desc;
@@ -668,7 +687,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
goto unlock_ret;
}
- desc = cpdma_desc_alloc(ctlr->pool, 1);
+ desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
if (!desc) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
@@ -682,6 +701,7 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+ cpdma_desc_to_port(chan, mode, directed);
desc_write(desc, hw_next, 0);
desc_write(desc, hw_buffer, buffer);
@@ -704,6 +724,29 @@ unlock_ret:
}
EXPORT_SYMBOL_GPL(cpdma_chan_submit);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ int index;
+ bool ret;
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap,
+ pool->num_desc, pool->num_desc/2, 1, 0);
+
+ if (index < pool->num_desc)
+ ret = true;
+ else
+ ret = false;
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
+
static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc,
int outlen, int status)
@@ -749,7 +792,8 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
status = -EBUSY;
goto unlock_ret;
}
- status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
+ CPDMA_DESC_PORT_MASK);
chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
chan_write(chan, cp, desc_dma);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index afa19a0c0d8..d9bcc6032fd 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -24,6 +24,13 @@
#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
#define chan_linear(chan) __chan_linear((chan)->chan_num)
+#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
+
+#define CPDMA_EOI_RX_THRESH 0x0
+#define CPDMA_EOI_RX 0x1
+#define CPDMA_EOI_TX 0x2
+#define CPDMA_EOI_MISC 0x3
+
struct cpdma_params {
struct device *dev;
void __iomem *dmaregs;
@@ -82,12 +89,13 @@ int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
- int len, gfp_t gfp_mask);
+ int len, int directed, gfp_t gfp_mask);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
-void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
enum cpdma_control {
CPDMA_CMD_IDLE, /* write-only */
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2a3e2c56bc6..52c05366599 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -120,7 +120,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_NUM_DESC (128)
-#define EMAC_DEF_TX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -342,7 +341,6 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
- atomic_t cur_tx;
const char *phy_id;
#ifdef CONFIG_OF
struct device_node *phy_node;
@@ -480,8 +478,8 @@ static void emac_dump_regs(struct emac_priv *priv)
static void emac_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, emac_version_string);
- strcpy(info->version, EMAC_MODULE_VERSION);
+ strlcpy(info->driver, emac_version_string, sizeof(info->driver));
+ strlcpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
}
/**
@@ -1039,7 +1037,7 @@ static void emac_rx_handler(void *token, int len, int status)
recycle:
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
WARN_ON(ret == -ENOMEM);
if (unlikely(ret < 0))
@@ -1050,10 +1048,10 @@ static void emac_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
- struct emac_priv *priv = netdev_priv(ndev);
-
- atomic_dec(&priv->cur_tx);
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
ndev->stats.tx_packets++;
@@ -1094,14 +1092,17 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
- GFP_KERNEL);
+ 0, GFP_KERNEL);
if (unlikely(ret_code != 0)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
goto fail_tx;
}
- if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(cpdma_check_free_tx_desc(priv->txchan)))
netif_stop_queue(ndev);
return NETDEV_TX_OK;
@@ -1264,7 +1265,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
@@ -1558,7 +1558,7 @@ static int emac_dev_open(struct net_device *ndev)
break;
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
- skb_tailroom(skb), GFP_KERNEL);
+ skb_tailroom(skb), 0, GFP_KERNEL);
if (WARN_ON(ret < 0))
break;
}
@@ -1600,7 +1600,7 @@ static int emac_dev_open(struct net_device *ndev)
if (priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
- &emac_adjust_link, 0,
+ &emac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cca25509b03..d04a622b08d 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -320,10 +320,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
int ret, addr;
data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- dev_err(dev, "failed to alloc device data\n");
+ if (!data)
return -ENOMEM;
- }
data->bus = mdiobus_alloc();
if (!data->bus) {
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 96070e9b50d..36435499814 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -2195,7 +2195,6 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
/* ISSUE: Note that "dev_addr" is now a pointer. */
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index e321d0b6fc8..445c0595c99 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1226,8 +1226,8 @@ int gelic_net_open(struct net_device *netdev)
void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1);
- strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int gelic_ether_get_settings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/toshiba/spider_net_ethtool.c b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
index 9c288cd7d17..ffe519382e1 100644
--- a/drivers/net/ethernet/toshiba/spider_net_ethtool.c
+++ b/drivers/net/ethernet/toshiba/spider_net_ethtool.c
@@ -72,11 +72,13 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
card = netdev_priv(netdev);
/* clear and fill out info */
- memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strncpy(drvinfo->driver, spider_net_driver_name, 32);
- strncpy(drvinfo->version, VERSION, 32);
- strcpy(drvinfo->fw_version, "no information");
- strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
+ strlcpy(drvinfo->driver, spider_net_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "no information",
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(card->pdev),
+ sizeof(drvinfo->bus_info));
}
static void
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 9819349eaa1..fe256094db3 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -633,9 +633,8 @@ static int tc_mii_probe(struct net_device *dev)
/* attach the mac to the phy */
phydev = phy_connect(dev, dev_name(&phydev->dev),
- &tc_handle_link_change, 0,
- lp->chiptype == TC35815_TX4939 ?
- PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
+ &tc_handle_link_change,
+ lp->chiptype == TC35815_TX4939 ? PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
return PTR_ERR(phydev);
@@ -856,7 +855,6 @@ static int tc35815_init_one(struct pci_dev *pdev,
if (rc)
goto err_out;
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
printk(KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
dev->name,
chip_info[ent->driver_data].name,
@@ -1976,9 +1974,10 @@ tc35815_set_multicast_list(struct net_device *dev)
static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tc35815_local *lp = netdev_priv(dev);
- strcpy(info->driver, MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(lp->pci_dev));
+
+ strlcpy(info->driver, MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 78ace59efd2..185c721c52d 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -417,6 +417,12 @@ enum chip_cmd_bits {
Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
};
+struct rhine_stats {
+ u64 packets;
+ u64 bytes;
+ struct u64_stats_sync syncp;
+};
+
struct rhine_private {
/* Bit mask for configured VLAN ids */
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -458,6 +464,8 @@ struct rhine_private {
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int cur_tx, dirty_tx;
unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ struct rhine_stats rx_stats;
+ struct rhine_stats tx_stats;
u8 wolopts;
u8 tx_thresh, rx_thresh;
@@ -495,7 +503,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
static void rhine_tx(struct net_device *dev);
static int rhine_rx(struct net_device *dev, int limit);
static void rhine_set_rx_mode(struct net_device *dev);
-static struct net_device_stats *rhine_get_stats(struct net_device *dev);
+static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
@@ -842,7 +851,7 @@ static const struct net_device_ops rhine_netdev_ops = {
.ndo_open = rhine_open,
.ndo_stop = rhine_close,
.ndo_start_xmit = rhine_start_tx,
- .ndo_get_stats = rhine_get_stats,
+ .ndo_get_stats64 = rhine_get_stats64,
.ndo_set_rx_mode = rhine_set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -990,7 +999,6 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev_info(dev, "Using random MAC address: %pM\n",
dev->dev_addr);
}
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* For Rhine-I/II, phy_id is loaded from EEPROM */
if (!phy_id)
@@ -1791,8 +1799,11 @@ static void rhine_tx(struct net_device *dev)
dev->stats.collisions += txstatus & 0x0F;
netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
(txstatus >> 3) & 0xF, txstatus & 0xF);
- dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
- dev->stats.tx_packets++;
+
+ u64_stats_update_begin(&rp->tx_stats.syncp);
+ rp->tx_stats.bytes += rp->tx_skbuff[entry]->len;
+ rp->tx_stats.packets++;
+ u64_stats_update_end(&rp->tx_stats.syncp);
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
@@ -1924,8 +1935,11 @@ static int rhine_rx(struct net_device *dev, int limit)
if (unlikely(desc_length & DescTag))
__vlan_hwaccel_put_tag(skb, vlan_tci);
netif_receive_skb(skb);
- dev->stats.rx_bytes += pkt_len;
- dev->stats.rx_packets++;
+
+ u64_stats_update_begin(&rp->rx_stats.syncp);
+ rp->rx_stats.bytes += pkt_len;
+ rp->rx_stats.packets++;
+ u64_stats_update_end(&rp->rx_stats.syncp);
}
entry = (++rp->cur_rx) % RX_RING_SIZE;
rp->rx_head_desc = &rp->rx_ring[entry];
@@ -2016,15 +2030,31 @@ out_unlock:
mutex_unlock(&rp->task_lock);
}
-static struct net_device_stats *rhine_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rhine_private *rp = netdev_priv(dev);
+ unsigned int start;
spin_lock_bh(&rp->lock);
rhine_update_rx_crc_and_missed_errord(rp);
spin_unlock_bh(&rp->lock);
- return &dev->stats;
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp);
+ stats->rx_packets = rp->rx_stats.packets;
+ stats->rx_bytes = rp->rx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp);
+ stats->tx_packets = rp->tx_stats.packets;
+ stats->tx_bytes = rp->tx_stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start));
+
+ return stats;
}
static void rhine_set_rx_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 35238389032..545043cc4c0 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -570,7 +570,6 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
w5100_write_macaddr(priv);
return 0;
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 9d1d986f8d4..7cbd0e6fc6f 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -490,7 +490,6 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
w5300_write_macaddr(priv);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index aad909d793d..9fc2ada4c3c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -238,11 +238,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
int i;
lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
- if (!lp->rx_skb) {
- dev_err(&ndev->dev,
- "can't allocate memory for DMA RX buffer\n");
+ if (!lp->rx_skb)
goto out;
- }
+
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
@@ -319,18 +317,10 @@ out:
* net_device_ops
*/
-static int temac_set_mac_address(struct net_device *ndev, void *address)
+static void temac_do_set_mac_address(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
-
- if (!is_valid_ether_addr(ndev->dev_addr))
- eth_hw_addr_random(ndev);
- else
- ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
-
/* set up unicast MAC address filter set its mac address */
mutex_lock(&lp->indirect_mutex);
temac_indirect_out32(lp, XTE_UAW0_OFFSET,
@@ -344,15 +334,26 @@ static int temac_set_mac_address(struct net_device *ndev, void *address)
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
mutex_unlock(&lp->indirect_mutex);
+}
+static int temac_init_mac_address(struct net_device *ndev, void *address)
+{
+ memcpy(ndev->dev_addr, address, ETH_ALEN);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_random(ndev);
+ temac_do_set_mac_address(ndev);
return 0;
}
-static int netdev_set_mac_address(struct net_device *ndev, void *p)
+static int temac_set_mac_address(struct net_device *ndev, void *p)
{
struct sockaddr *addr = p;
- return temac_set_mac_address(ndev, addr->sa_data);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ temac_do_set_mac_address(ndev);
+ return 0;
}
static void temac_set_multicast_list(struct net_device *ndev)
@@ -579,7 +580,7 @@ static void temac_device_reset(struct net_device *ndev)
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
- temac_set_mac_address(ndev, NULL);
+ temac_do_set_mac_address(ndev);
/* Set address filter table */
temac_set_multicast_list(ndev);
@@ -938,7 +939,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
- .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = temac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1106,7 +1107,7 @@ static int temac_of_probe(struct platform_device *op)
rc = -ENODEV;
goto err_iounmap_2;
}
- temac_set_mac_address(ndev, (void *)addr);
+ temac_init_mac_address(ndev, (void *)addr);
rc = temac_mdio_setup(lp, op->dev.of_node);
if (rc)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 6f47100e58d..278c9db3b5b 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1124,9 +1124,8 @@ static int axienet_ethtools_set_settings(struct net_device *ndev,
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- memset(ed, 0, sizeof(struct ethtool_drvinfo));
- strcpy(ed->driver, DRIVER_NAME);
- strcpy(ed->version, DRIVER_VERSION);
+ strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
}
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 94a1f94f74b..98e09d0d3ce 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1412,7 +1412,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ snprintf(info->bus_info, sizeof(info->bus_info), "PCMCIA 0x%lx",
+ dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index d3ebb73277b..6958a5e8770 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -977,11 +977,12 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct port *port = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
port->firmware[0], port->firmware[1],
port->firmware[2], port->firmware[3]);
- strcpy(info->bus_info, "internal");
+ strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1450,7 +1451,7 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
+ port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(port->phydev)) {
err = PTR_ERR(port->phydev);