summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c10
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/declance.c1
-rw-r--r--drivers/net/ethernet/amd/mvme147.c2
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c6
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c9
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c31
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c44
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c317
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h76
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c195
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c150
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c309
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c402
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h41
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c267
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h41
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c195
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h69
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c152
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig2
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c53
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c195
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c293
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h23
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h55
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c48
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h30
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c257
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c329
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h176
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c8
-rw-r--r--drivers/net/ethernet/dlink/sundance.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h78
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c512
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h101
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c25
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c851
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h4
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c245
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c21
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c168
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c74
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c21
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h19
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/Kconfig18
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e100.c15
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c115
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c153
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c132
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h558
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c982
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h112
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c2041
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2076
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c1449
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c366
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h245
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c1006
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7377
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c391
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h239
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h4688
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h101
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1817
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h259
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1154
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h368
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2335
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h120
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c202
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h42
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c155
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c21
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c80
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c135
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c148
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c321
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c157
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c160
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c199
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c542
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c90
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/skge.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c177
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c12
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c6
-rw-r--r--drivers/net/ethernet/moxa/Kconfig30
-rw-r--r--drivers/net/ethernet/moxa/Makefile5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c559
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h330
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c213
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c3
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c3
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c67
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c98
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c24
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h306
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c745
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h50
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c292
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c237
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c231
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c181
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c493
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c166
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c10
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c71
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h10
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig11
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3059
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h415
-rw-r--r--drivers/net/ethernet/sfc/efx.c500
-rw-r--r--drivers/net/ethernet/sfc/efx.h129
-rw-r--r--drivers/net/ethernet/sfc/enum.h10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c399
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1171
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c362
-rw-r--r--drivers/net/ethernet/sfc/farch.c2942
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h (renamed from drivers/net/ethernet/sfc/regs.h)272
-rw-r--r--drivers/net/ethernet/sfc/filter.c1274
-rw-r--r--drivers/net/ethernet/sfc/filter.h238
-rw-r--r--drivers/net/ethernet/sfc/io.h50
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1268
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h313
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c130
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c274
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h5540
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c (renamed from drivers/net/ethernet/sfc/mcdi_phy.c)347
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c634
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h408
-rw-r--r--drivers/net/ethernet/sfc/nic.c1902
-rw-r--r--drivers/net/ethernet/sfc/nic.h542
-rw-r--r--drivers/net/ethernet/sfc/phy.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c95
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c176
-rw-r--r--drivers/net/ethernet/sfc/selftest.c15
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c711
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c102
-rw-r--r--drivers/net/ethernet/sfc/spi.h99
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c35
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h22
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis190.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c30
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/ti/cpmac.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c263
-rw-r--r--drivers/net/ethernet/ti/cpsw.h42
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c7
-rw-r--r--drivers/net/ethernet/tile/Kconfig11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1120
-rw-r--r--drivers/net/ethernet/tile/tilepro.c241
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c11
-rw-r--r--drivers/net/ethernet/via/via-velocity.c20
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c18
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
312 files changed, 59888 insertions, 11301 deletions
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a5f91e1e8fe..becef25fa19 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -148,7 +148,7 @@ config PCMCIA_PCNET
config NE_H8300
tristate "NE2000 compatible support for H8/300"
- depends on H8300
+ depends on H8300H_AKI3068NET || H8300H_H8MAX
---help---
Say Y here if you want to use the NE2000 compatible
controller on the Renesas H8/300 processor.
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e1d26433d61..f92f001551d 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev)
#ifdef CONFIG_AX88796_93CX6
if (ax->plat->flags & AXFLG_HAS_93CX6) {
- unsigned char mac_addr[6];
+ unsigned char mac_addr[ETH_ALEN];
struct eeprom_93cx6 eeprom;
eeprom.data = ei_local;
@@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -840,7 +840,7 @@ static int ax_probe(struct platform_device *pdev)
ei_local = netdev_priv(dev);
ax = to_ax_dev(dev);
- ax->plat = pdev->dev.platform_data;
+ ax->plat = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
ei_local->rxcr_base = ax->plat->rcr_val;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 2037080c504..506b0248c40 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 390bd0bfaa2..c0b8789952e 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e904b3838dc..75fb1d20d6f 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -530,7 +530,7 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
if (lp->wol && !lp->irq_wake_requested) {
/* register wake irq handler */
rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
- IRQF_DISABLED, "EMAC_WAKE", dev);
+ 0, "EMAC_WAKE", dev);
if (rc)
return rc;
lp->irq_wake_requested = true;
@@ -1647,12 +1647,12 @@ static int bfin_mac_probe(struct platform_device *pdev)
setup_mac_addr(ndev->dev_addr);
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
rc = -ENODEV;
goto out_err_probe_mac;
}
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
lp->mii_bus = platform_get_drvdata(pd);
if (!lp->mii_bus) {
dev_err(&pdev->dev, "Cannot get mii_bus!\n");
@@ -1660,7 +1660,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
goto out_err_probe_mac;
}
lp->mii_bus->priv = ndev;
- mii_bus_data = pd->dev.platform_data;
+ mii_bus_data = dev_get_platdata(&pd->dev);
rc = mii_probe(ndev, mii_bus_data->phy_mode);
if (rc) {
@@ -1686,7 +1686,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
/* now, enable interrupts */
/* register irq handler */
rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
- IRQF_DISABLED, "EMAC_RX", ndev);
+ 0, "EMAC_RX", ndev);
if (rc) {
dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
rc = -EBUSY;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7ff4b30d55e..e0669455514 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,18 +1464,18 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL | __GFP_ZERO);
+ greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->tx_bd_base) {
err = -ENOMEM;
goto error3;
}
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL | __GFP_ZERO);
+ greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->rx_bd_base) {
err = -ENOMEM;
goto error4;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index e8d0ef508f4..10ceca523fc 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -1147,7 +1147,7 @@ static struct net_device *atarilance_dev;
static int __init atarilance_module_init(void)
{
atarilance_dev = atarilance_probe(-1);
- return PTR_RET(atarilance_dev);
+ return PTR_ERR_OR_ZERO(atarilance_dev);
}
static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index ceb45bc963a..91d52b49584 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1131,7 +1131,7 @@ static int au1000_probe(struct platform_device *pdev)
writel(0, aup->enable);
aup->mac_enabled = 0;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_info(&pdev->dev, "no platform_data passed,"
" PHY search on MAC0\n");
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 3d86ffeb4e1..94edc9c6fbb 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -725,6 +725,7 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
+ clear_ioasic_dma_irq(irq);
printk(KERN_ERR "%s: DMA error\n", dev->name);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index a51497c9d2a..e108e911da0 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,7 +188,7 @@ static struct net_device *dev_mvme147_lance;
int __init init_module(void)
{
dev_mvme147_lance = mvme147lance_probe(-1);
- return PTR_RET(dev_mvme147_lance);
+ return PTR_ERR_OR_ZERO(dev_mvme147_lance);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 26fc0ce0faa..1cf33addd15 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
- return PTR_RET(dev_ni65);
+ return PTR_ERR_OR_ZERO(dev_ni65);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index ed213072764..2d8e2881977 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
char *chipname;
struct net_device *dev;
const struct pcnet32_access *a = NULL;
- u8 promaddr[6];
+ u8 promaddr[ETH_ALEN];
int ret = -ENODEV;
/* reset the chip */
@@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
/* read PROM address and compare with CSR address */
- for (i = 0; i < 6; i++)
+ for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
- if (memcmp(promaddr, dev->dev_addr, 6) ||
+ if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 4375abe61da..3d8c6b2cdea 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -358,7 +358,7 @@ static int __init lance_probe( struct net_device *dev)
REGA(CSR0) = CSR0_STOP;
- if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) {
+ if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
@@ -940,7 +940,7 @@ static struct net_device *sun3lance_dev;
int __init init_module(void)
{
sun3lance_dev = sun3lance_probe(-1);
- return PTR_RET(sun3lance_dev);
+ return PTR_ERR_OR_ZERO(sun3lance_dev);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 55d79cb53a7..9e160148726 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev)
struct sk_buff *skb = tx_buff->skb;
unsigned int info = le32_to_cpu(txbd->info);
- *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
if ((info & FOR_EMAC) || !txbd->data)
break;
@@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->data = 0;
txbd->info = 0;
+ *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
+
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 027398ebbba..fc95b235e21 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1188,7 +1188,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct alx_priv *alx;
struct alx_hw *hw;
bool phy_configured;
- int bars, pm_cap, err;
+ int bars, err;
err = pci_enable_device_mem(pdev);
if (err)
@@ -1225,18 +1225,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
- pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pm_cap == 0) {
+ if (!pdev->pm_cap) {
dev_err(&pdev->dev,
"Can't find power management capability, aborting\n");
err = -EIO;
goto out_pci_release;
}
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- goto out_pci_release;
-
netdev = alloc_etherdev(sizeof(*alx));
if (!netdev) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 52c96036dcc..2fa5b86f139 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -130,7 +130,7 @@ config BNX2X_SRIOV
config BGMAC
tristate "BCMA bus GBit core support"
- depends on BCMA_HOST_SOC && HAS_DMA
+ depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
select PHYLIB
---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b1bcd4ba474..b9a5fb6400d 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -926,13 +926,13 @@ static int bcm_enet_open(struct net_device *dev)
if (ret)
goto out_phy_disconnect;
- ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
+ ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 0,
dev->name, dev);
if (ret)
goto out_freeirq;
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq_rx;
@@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_freeirq_tx;
@@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_free_rx_ring;
@@ -1747,11 +1745,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (!bcm_enet_shared_base[0])
return -ENODEV;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
+ if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
ret = 0;
@@ -1767,9 +1764,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
- if (priv->base == NULL) {
- ret = -ENOMEM;
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
goto out;
}
@@ -1800,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
priv->has_phy = pd->has_phy;
@@ -1964,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
} else {
struct bcm63xx_enet_platform_data *pd;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd && pd->mii_config)
pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii);
@@ -2158,13 +2156,13 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq;
if (priv->irq_tx != -1) {
ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
- IRQF_DISABLED, dev->name, dev);
+ 0, dev->name, dev);
if (ret)
goto out_freeirq_rx;
}
@@ -2742,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
memcpy(priv->used_ports, pd->used_ports,
@@ -2836,7 +2834,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index eec0af45b85..249468f9536 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
if (++ring->end >= BGMAC_TX_RING_SLOTS)
ring->end = 0;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
+ ring->index_base +
ring->end * sizeof(struct bgmac_dma_desc));
/* Always keep one slot free to allow detecting bugged calls. */
@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
/* The last slot that hardware didn't consume yet */
empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
empty_slot &= BGMAC_DMA_TX_STATDPTR;
+ empty_slot -= ring->index_base;
+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
empty_slot /= sizeof(struct bgmac_dma_desc);
while (ring->start != empty_slot) {
@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
end_slot &= BGMAC_DMA_RX_STATDPTR;
+ end_slot -= ring->index_base;
+ end_slot &= BGMAC_DMA_RX_STATDPTR;
end_slot /= sizeof(struct bgmac_dma_desc);
ring->end = end_slot;
@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring = &bgmac->tx_ring[i];
ring->num_slots = BGMAC_TX_RING_SLOTS;
ring->mmio_base = ring_base[i];
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
- bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
- ring->mmio_base);
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+ BGMAC_DMA_RING_TX);
+ if (ring->unaligned)
+ ring->index_base = lower_32_bits(ring->dma_base);
+ else
+ ring->index_base = 0;
+
/* No need to alloc TX slots yet */
}
@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
ring = &bgmac->rx_ring[i];
ring->num_slots = BGMAC_RX_RING_SLOTS;
ring->mmio_base = ring_base[i];
- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
- bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
- ring->mmio_base);
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
+ BGMAC_DMA_RING_RX);
+ if (ring->unaligned)
+ ring->index_base = lower_32_bits(ring->dma_base);
+ else
+ ring->index_base = 0;
+
/* Alloc RX slots */
for (j = 0; j < ring->num_slots; j++) {
err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
ring = &bgmac->tx_ring[i];
- /* We don't implement unaligned addressing, so enable first */
- bgmac_dma_tx_enable(bgmac, ring);
+ if (!ring->unaligned)
+ bgmac_dma_tx_enable(bgmac, ring);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
lower_32_bits(ring->dma_base));
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
upper_32_bits(ring->dma_base));
+ if (ring->unaligned)
+ bgmac_dma_tx_enable(bgmac, ring);
ring->start = 0;
ring->end = 0; /* Points the slot that should *not* be read */
@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
ring = &bgmac->rx_ring[i];
- /* We don't implement unaligned addressing, so enable first */
- bgmac_dma_rx_enable(bgmac, ring);
+ if (!ring->unaligned)
+ bgmac_dma_rx_enable(bgmac, ring);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
lower_32_bits(ring->dma_base));
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
upper_32_bits(ring->dma_base));
+ if (ring->unaligned)
+ bgmac_dma_rx_enable(bgmac, ring);
for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
j++, dma_desc++) {
@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
}
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->index_base +
ring->num_slots * sizeof(struct bgmac_dma_desc));
ring->start = 0;
@@ -908,10 +926,10 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
u8 et_swtype = 0;
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
- BGMAC_CHIPCTL_1_IF_TYPE_RMII;
- char buf[2];
+ BGMAC_CHIPCTL_1_IF_TYPE_MII;
+ char buf[4];
- if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
buf);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 98d4b5fcc07..66c8afbdc8c 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -333,7 +333,7 @@
#define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
#define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
-#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
+#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
#define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
#define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
#define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
u16 mmio_base;
struct bgmac_dma_desc *cpu_base;
dma_addr_t dma_base;
+ u32 index_base; /* Used for unaligned rings only, otherwise 0 */
+ bool unaligned;
struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6a2de1d79ff..e838a3f74b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.3"
-#define DRV_MODULE_RELDATE "June 27, 2012"
+#define DRV_MODULE_VERSION "2.2.4"
+#define DRV_MODULE_RELDATE "Aug 05, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
if (status_blk == NULL)
goto alloc_mem_err;
@@ -3908,136 +3907,121 @@ init_cpu_err:
return rc;
}
-static int
-bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+static void
+bnx2_setup_wol(struct bnx2 *bp)
{
- u16 pmcsr;
+ int i;
+ u32 val, wol_msg;
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (bp->wol) {
+ u32 advertising;
+ u8 autoneg;
- switch (state) {
- case PCI_D0: {
- u32 val;
+ autoneg = bp->autoneg;
+ advertising = bp->advertising;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
- (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
- PCI_PM_CTRL_PME_STATUS);
+ if (bp->phy_port == PORT_TP) {
+ bp->autoneg = AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg;
+ }
- if (pmcsr & PCI_PM_CTRL_STATE_MASK)
- /* delay required during transition out of D3hot */
- msleep(20);
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_setup_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
- val = BNX2_RD(bp, BNX2_EMAC_MODE);
- val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
- val &= ~BNX2_EMAC_MODE_MPKT;
- BNX2_WR(bp, BNX2_EMAC_MODE, val);
+ bp->autoneg = autoneg;
+ bp->advertising = advertising;
- val = BNX2_RD(bp, BNX2_RPM_CONFIG);
- val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- BNX2_WR(bp, BNX2_RPM_CONFIG, val);
- break;
- }
- case PCI_D3hot: {
- int i;
- u32 val, wol_msg;
-
- if (bp->wol) {
- u32 advertising;
- u8 autoneg;
-
- autoneg = bp->autoneg;
- advertising = bp->advertising;
-
- if (bp->phy_port == PORT_TP) {
- bp->autoneg = AUTONEG_SPEED;
- bp->advertising = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_Autoneg;
- }
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
- spin_lock_bh(&bp->phy_lock);
- bnx2_setup_phy(bp, bp->phy_port);
- spin_unlock_bh(&bp->phy_lock);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
- bp->autoneg = autoneg;
- bp->advertising = advertising;
+ /* Enable port mode. */
+ val &= ~BNX2_EMAC_MODE_PORT;
+ val |= BNX2_EMAC_MODE_MPKT_RCVD |
+ BNX2_EMAC_MODE_ACPI_RCVD |
+ BNX2_EMAC_MODE_MPKT;
+ if (bp->phy_port == PORT_TP) {
+ val |= BNX2_EMAC_MODE_PORT_MII;
+ } else {
+ val |= BNX2_EMAC_MODE_PORT_GMII;
+ if (bp->line_speed == SPEED_2500)
+ val |= BNX2_EMAC_MODE_25G_MODE;
+ }
- bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
- val = BNX2_RD(bp, BNX2_EMAC_MODE);
+ /* receive all multicast */
+ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+ BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
+ }
+ BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
- /* Enable port mode. */
- val &= ~BNX2_EMAC_MODE_PORT;
- val |= BNX2_EMAC_MODE_MPKT_RCVD |
- BNX2_EMAC_MODE_ACPI_RCVD |
- BNX2_EMAC_MODE_MPKT;
- if (bp->phy_port == PORT_TP)
- val |= BNX2_EMAC_MODE_PORT_MII;
- else {
- val |= BNX2_EMAC_MODE_PORT_GMII;
- if (bp->line_speed == SPEED_2500)
- val |= BNX2_EMAC_MODE_25G_MODE;
- }
+ val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
- BNX2_WR(bp, BNX2_EMAC_MODE, val);
+ /* Need to enable EMAC and RPM for WOL. */
+ BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
- /* receive all multicast */
- for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
- BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
- 0xffffffff);
- }
- BNX2_WR(bp, BNX2_EMAC_RX_MODE,
- BNX2_EMAC_RX_MODE_SORT_MODE);
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
- val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
- BNX2_RPM_SORT_USER0_MC_EN;
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
- BNX2_RPM_SORT_USER0_ENA);
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+ } else {
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+ }
- /* Need to enable EMAC and RPM for WOL. */
- BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
- BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL))
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
- val = BNX2_RD(bp, BNX2_RPM_CONFIG);
- val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+}
- wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
- }
- else {
- wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
- }
+static int
+bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+{
+ switch (state) {
+ case PCI_D0: {
+ u32 val;
+
+ pci_enable_wake(bp->pdev, PCI_D0, false);
+ pci_set_power_state(bp->pdev, PCI_D0);
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
- 1, 0);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
+ val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
+ val &= ~BNX2_EMAC_MODE_MPKT;
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+ break;
+ }
+ case PCI_D3hot: {
+ bnx2_setup_wol(bp);
+ pci_wake_from_d3(bp->pdev, bp->wol);
if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
(BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
if (bp->wol)
- pmcsr |= 3;
- }
- else {
- pmcsr |= 3;
- }
- if (bp->wol) {
- pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+ pci_set_power_state(bp->pdev, PCI_D3hot);
+ } else {
+ pci_set_power_state(bp->pdev, PCI_D3hot);
}
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
- pmcsr);
/* No more memory access after this point until
* device is brought back to D0.
*/
- udelay(50);
break;
}
default:
@@ -6317,7 +6301,6 @@ bnx2_open(struct net_device *dev)
netif_carrier_off(dev);
- bnx2_set_power_state(bp, PCI_D0);
bnx2_disable_int(bp);
rc = bnx2_setup_int_mode(bp, disable_msi);
@@ -6724,7 +6707,6 @@ bnx2_close(struct net_device *dev)
bnx2_del_napi(bp);
bp->link_up = 0;
netif_carrier_off(bp->dev);
- bnx2_set_power_state(bp, PCI_D3hot);
return 0;
}
@@ -7081,6 +7063,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else {
bp->wol = 0;
}
+
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
return 0;
}
@@ -7156,9 +7141,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
- return -EAGAIN;
-
/* parameters already validated in ethtool_get_eeprom */
rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7173,9 +7155,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
- return -EAGAIN;
-
/* parameters already validated in ethtool_set_eeprom */
rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7535,8 +7514,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
{
struct bnx2 *bp = netdev_priv(dev);
- bnx2_set_power_state(bp, PCI_D0);
-
memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i;
@@ -7585,8 +7562,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (!netif_running(bp->dev))
- bnx2_set_power_state(bp, PCI_D3hot);
}
static void
@@ -7658,8 +7633,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
switch (state) {
case ETHTOOL_ID_ACTIVE:
- bnx2_set_power_state(bp, PCI_D0);
-
bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
return 1; /* cycle on/off once per second */
@@ -7680,9 +7653,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
case ETHTOOL_ID_INACTIVE:
BNX2_WR(bp, BNX2_EMAC_LED, 0);
BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
-
- if (!netif_running(dev))
- bnx2_set_power_state(bp, PCI_D3hot);
break;
}
@@ -8130,8 +8100,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_release;
}
- bnx2_set_power_state(bp, PCI_D0);
-
/* Configure byte swap and enable write to the reg_window registers.
* Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses
@@ -8170,13 +8138,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ if (pdev->msix_cap)
bp->flags |= BNX2_FLAG_MSIX_CAP;
}
if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
- if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
+ if (pdev->msi_cap)
bp->flags |= BNX2_FLAG_MSI_CAP;
}
@@ -8369,6 +8337,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->wol = 0;
}
+ if (bp->flags & BNX2_FLAG_NO_WOL)
+ device_set_wakeup_capable(&bp->pdev->dev, false);
+ else
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
bp->tx_quick_cons_trip_int =
bp->tx_quick_cons_trip;
@@ -8609,46 +8582,52 @@ bnx2_remove_one(struct pci_dev *pdev)
}
static int
-bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
+bnx2_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- /* PCI register 4 needs to be saved whether netif_running() or not.
- * MSI address and data need to be saved if using MSI and
- * netif_running().
- */
- pci_save_state(pdev);
- if (!netif_running(dev))
- return 0;
-
- cancel_work_sync(&bp->reset_task);
- bnx2_netif_stop(bp, true);
- netif_device_detach(dev);
- del_timer_sync(&bp->timer);
- bnx2_shutdown_chip(bp);
- bnx2_free_skbs(bp);
- bnx2_set_power_state(bp, pci_choose_state(pdev, state));
+ if (netif_running(dev)) {
+ cancel_work_sync(&bp->reset_task);
+ bnx2_netif_stop(bp, true);
+ netif_device_detach(dev);
+ del_timer_sync(&bp->timer);
+ bnx2_shutdown_chip(bp);
+ __bnx2_free_irq(bp);
+ bnx2_free_skbs(bp);
+ }
+ bnx2_setup_wol(bp);
return 0;
}
static int
-bnx2_resume(struct pci_dev *pdev)
+bnx2_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- pci_restore_state(pdev);
if (!netif_running(dev))
return 0;
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
+ bnx2_request_irq(bp);
bnx2_init_nic(bp, 1);
bnx2_netif_start(bp, true);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
+#define BNX2_PM_OPS (&bnx2_pm_ops)
+
+#else
+
+#define BNX2_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
/**
* bnx2_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -8694,24 +8673,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- pci_ers_result_t result;
- int err;
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+ int err = 0;
rtnl_lock();
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
- result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- if (netif_running(dev)) {
- bnx2_set_power_state(bp, PCI_D0);
- bnx2_init_nic(bp, 1);
- }
- result = PCI_ERS_RESULT_RECOVERED;
+ if (netif_running(dev))
+ err = bnx2_init_nic(bp, 1);
+
+ if (!err)
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
+ bnx2_napi_enable(bp);
+ dev_close(dev);
}
rtnl_unlock();
@@ -8748,6 +8731,28 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_unlock();
}
+static void bnx2_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp;
+
+ if (!dev)
+ return;
+
+ bp = netdev_priv(dev);
+ if (!bp)
+ return;
+
+ rtnl_lock();
+ if (netif_running(dev))
+ dev_close(bp->dev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ bnx2_set_power_state(bp, PCI_D3hot);
+
+ rtnl_unlock();
+}
+
static const struct pci_error_handlers bnx2_err_handler = {
.error_detected = bnx2_io_error_detected,
.slot_reset = bnx2_io_slot_reset,
@@ -8759,9 +8764,9 @@ static struct pci_driver bnx2_pci_driver = {
.id_table = bnx2_pci_tbl,
.probe = bnx2_init_one,
.remove = bnx2_remove_one,
- .suspend = bnx2_suspend,
- .resume = bnx2_resume,
+ .driver.pm = BNX2_PM_OPS,
.err_handler = &bnx2_err_handler,
+ .shutdown = bnx2_shutdown,
};
module_pci_driver(bnx2_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 172efbecfea..18cb2d23e56 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 00b88cbfde2..97b3d32a98b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -246,8 +246,37 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+/* use a value high enough to be above all the PFs, which has least significant
+ * nibble as 8, so when cnic needs to come up with a CID for UIO to use to
+ * calculate doorbell address according to old doorbell configuration scheme
+ * (db_msg_sz 1 << 7 * cid + 0x40 DPM offset) it can come up with a valid number
+ * We must avoid coming up with cid 8 for iscsi since according to this method
+ * the designated UIO cid will come out 0 and it has a special handling for that
+ * case which doesn't suit us. Therefore will will cieling to closes cid which
+ * has least signigifcant nibble 8 and if it is 8 we will move forward to 0x18.
+ */
+
+#define BNX2X_1st_NON_L2_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * \
(bp)->max_cos)
+/* amount of cids traversed by UIO's DPM addition to doorbell */
+#define UIO_DPM 8
+/* roundup to DPM offset */
+#define UIO_ROUNDUP(bp) (roundup(BNX2X_1st_NON_L2_ETH_CID(bp), \
+ UIO_DPM))
+/* offset to nearest value which has lsb nibble matching DPM */
+#define UIO_CID_OFFSET(bp) ((UIO_ROUNDUP(bp) + UIO_DPM) % \
+ (UIO_DPM * 2))
+/* add offset to rounded-up cid to get a value which could be used with UIO */
+#define UIO_DPM_ALIGN(bp) (UIO_ROUNDUP(bp) + UIO_CID_OFFSET(bp))
+/* but wait - avoid UIO special case for cid 0 */
+#define UIO_DPM_CID0_OFFSET(bp) ((UIO_DPM * 2) * \
+ (UIO_DPM_ALIGN(bp) == UIO_DPM))
+/* Properly DPM aligned CID dajusted to cid 0 secal case */
+#define BNX2X_CNIC_START_ETH_CID(bp) (UIO_DPM_ALIGN(bp) + \
+ (UIO_DPM_CID0_OFFSET(bp)))
+/* how many cids were wasted - need this value for cid allocation */
+#define UIO_CID_PAD(bp) (BNX2X_CNIC_START_ETH_CID(bp) - \
+ BNX2X_1st_NON_L2_ETH_CID(bp))
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
@@ -825,15 +854,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
-#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
- DPM_TRIGER_TYPE); \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
} while (0)
/* TX CSUM helpers */
@@ -1100,13 +1127,27 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
-#define BNX2X_VF_CID_WND 0
+#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
-#define BNX2X_CLIENTS_PER_VF 1
-#define BNX2X_FIRST_VF_CID 256
+
+/* We need to reserve doorbell addresses for all VF and queue combinations */
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
+
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE 512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
/*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the
@@ -1331,7 +1372,7 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_TX_RESUME,
@@ -1530,7 +1571,6 @@ struct bnx2x {
*/
bool fcoe_init;
- int pm_cap;
int mrrs;
struct delayed_work sp_task;
@@ -1650,10 +1690,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries.
*/
-#define ILT_MAX_L2_LINES 8
+#define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
@@ -1669,10 +1709,11 @@ struct bnx2x {
* Maximum CID count that might be required by the bnx2x:
* Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
+
#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
- + 2 * CNIC_SUPPORT(bp))
+ + CNIC_SUPPORT(bp) * (2 + UIO_CID_PAD(bp)))
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
@@ -1869,7 +1910,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
-
+#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -2069,9 +2110,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0cc26110868..e66beff2704 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -490,10 +490,10 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
-static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+ struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
dma_addr_t mapping;
@@ -572,7 +572,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* If we fail to allocate a substitute page, we simply stop
where we are and drop the whole packet */
- err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
if (unlikely(err)) {
bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
@@ -616,12 +616,17 @@ static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
kfree(data);
}
-static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
+static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
{
- if (fp->rx_frag_size)
+ if (fp->rx_frag_size) {
+ /* GFP_KERNEL allocations are used only during initialization */
+ if (unlikely(gfp_mask & __GFP_WAIT))
+ return (void *)__get_free_page(gfp_mask);
+
return netdev_alloc_frag(fp->rx_frag_size);
+ }
- return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
}
#ifdef CONFIG_INET
@@ -701,7 +706,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
goto drop;
/* Try to allocate the new data */
- new_data = bnx2x_frag_alloc(fp);
+ new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
@@ -752,15 +757,15 @@ drop:
bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
-static int bnx2x_alloc_rx_data(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ u16 index, gfp_t gfp_mask)
{
u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- data = bnx2x_frag_alloc(fp);
+ data = bnx2x_frag_alloc(fp, gfp_mask);
if (unlikely(data == NULL))
return -ENOMEM;
@@ -953,7 +958,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
memcpy(skb->data, data + pad, len);
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
} else {
- if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
+ GFP_ATOMIC) == 0)) {
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
@@ -1313,7 +1319,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->data = bnx2x_frag_alloc(fp);
+ first_buf->data =
+ bnx2x_frag_alloc(fp, GFP_KERNEL);
if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
@@ -1335,7 +1342,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
for (i = 0, ring_prod = 0;
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
- if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
+ GFP_KERNEL) < 0) {
BNX2X_ERR("was only able to allocate %d rx sges\n",
i);
BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -1948,7 +1956,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
}
}
-static int bnx2x_init_rss_pf(struct bnx2x *bp)
+static int bnx2x_init_rss(struct bnx2x *bp)
{
int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1972,8 +1980,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash)
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable)
{
struct bnx2x_config_rss_params params = {NULL};
@@ -1988,17 +1996,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
- __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-
- /* RSS configuration */
- __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
- if (rss_obj->udp_rss_v4)
- __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
- if (rss_obj->udp_rss_v6)
- __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ if (enable) {
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ } else {
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ }
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
@@ -2007,11 +2019,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- prandom_bytes(params.rss_key, sizeof(params.rss_key));
+ prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
- return bnx2x_config_rss(bp, &params);
+ if (IS_PF(bp))
+ return bnx2x_config_rss(bp, &params);
+ else
+ return bnx2x_vfpf_config_rss(bp, &params);
}
static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -2066,7 +2081,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
rparam.mcast_obj = &bp->mcast_obj;
__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
- /* Add a DEL command... */
+ /* Add a DEL command... - Since we're doing a driver cleanup only,
+ * we take a lock surrounding both the initial send and the CONTs,
+ * as we don't want a true completion to disrupt us in the middle.
+ */
+ netif_addr_lock_bh(bp->dev);
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2078,11 +2097,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
if (rc < 0) {
BNX2X_ERR("Failed to clean multi-cast object: %d\n",
rc);
+ netif_addr_unlock_bh(bp->dev);
return;
}
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
}
+ netif_addr_unlock_bh(bp->dev);
}
#ifndef BNX2X_STOP_ON_ERROR
@@ -2438,9 +2459,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
}
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
@@ -2462,8 +2481,7 @@ load_error_cnic2:
load_error_cnic1:
bnx2x_napi_disable_cnic(bp);
/* Update the number of queues without the cnic queues */
- rc = bnx2x_set_real_num_queues(bp, 0);
- if (rc)
+ if (bnx2x_set_real_num_queues(bp, 0))
BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
load_error_cnic0:
BNX2X_ERR("CNIC-related load failed\n");
@@ -2647,38 +2665,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* initialize FW coalescing state machines in RAM */
bnx2x_update_coalesce(bp);
+ }
- /* setup the leading queue */
- rc = bnx2x_setup_leading(bp);
- if (rc) {
- BNX2X_ERR("Setup leading failed!\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
-
- /* set up the rest of the queues */
- for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
- /* setup rss */
- rc = bnx2x_init_rss_pf(bp);
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ if (IS_PF(bp))
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
+ else /* VF */
+ rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
if (rc) {
- BNX2X_ERR("PF RSS init failed\n");
+ BNX2X_ERR("Queue %d setup failed\n", i);
LOAD_ERROR_EXIT(bp, load_error3);
}
+ }
- } else { /* vf */
- for_each_eth_queue(bp, i) {
- rc = bnx2x_vfpf_setup_q(bp, i);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup rss */
+ rc = bnx2x_init_rss(bp);
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
}
/* Now when Clients are configured we are ready to work */
@@ -2710,9 +2722,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* Start the Tx */
switch (load_mode) {
@@ -2997,16 +3007,16 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
u16 pmcsr;
/* If there is no power capability, silently succeed */
- if (!bp->pm_cap) {
+ if (!bp->pdev->pm_cap) {
BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
switch (state) {
case PCI_D0:
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
PCI_PM_CTRL_PME_STATUS));
@@ -3030,7 +3040,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
if (bp->wol)
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+ pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
pmcsr);
/* No more memory access after this point until
@@ -4218,7 +4228,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
failure_cnt++;
continue;
}
@@ -4789,6 +4799,11 @@ int bnx2x_resume(struct pci_dev *pdev)
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid)
{
+ if (!cxt) {
+ BNX2X_ERR("bad context pointer %p\n", cxt);
+ return;
+ }
+
/* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c07a6d054cf..da8fcaa7449 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -51,8 +51,7 @@ extern int int_mode;
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO); \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
@@ -106,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
* @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
+ * @enable: enabled or disabled configuration
*/
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash);
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable);
/**
* bnx2x__init_func_obj - init function object
@@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* netif_addr_lock_bh()
*/
void bnx2x_set_rx_mode(struct net_device *dev);
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
/**
* bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
@@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c5f22510168..324de5f0533 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1387,9 +1387,9 @@ static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
u16 pm = 0;
struct net_device *dev = pci_get_drvdata(bp->pdev);
- if (bp->pm_cap)
+ if (bp->pdev->pm_cap)
rc = pci_read_config_word(bp->pdev,
- bp->pm_cap + PCI_PM_CTRL, &pm);
+ bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
if ((rc && !netif_running(dev)) ||
(!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
@@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
}
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9d64b988ab3..51468227bf3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -175,6 +175,7 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
#define EDC_MODE_LINEAR 0x0022
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+#define EDC_MODE_ACTIVE_DAC 0x0066
/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
@@ -3684,6 +3685,41 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
bnx2x_update_link_attr(params, vars->link_attr_sync);
}
+static void bnx2x_disable_kr2(struct link_params *params,
+ struct link_vars *vars,
+ struct bnx2x_phy *phy)
+{
+ struct bnx2x *bp = params->bp;
+ int i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Step 1 - Program the TX/RX alignment markers */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
+ };
+ DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
+
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
+ vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
+ bnx2x_update_link_attr(params, vars->link_attr_sync);
+
+ vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
+}
+
static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -3715,7 +3751,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
u16 lane, i, cl72_ctrl, an_adv = 0;
- u16 ucode_ver;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3806,15 +3841,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars);
- /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
- */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
- if (ucode_ver < 0xd108) {
- DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
- ucode_ver);
- vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
- }
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
@@ -3838,6 +3865,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
+ } else {
+ bnx2x_disable_kr2(params, vars, phy);
}
/* Enable Autoneg: only on the main lane */
@@ -4347,20 +4376,14 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u32 serdes_net_if;
u16 gp_status1 = 0, lnkup = 0, lnkup_kr = 0;
- u16 lane = bnx2x_get_warpcore_lane(phy, params);
vars->turn_to_run_wc_rt = vars->turn_to_run_wc_rt ? 0 : 1;
if (!vars->turn_to_run_wc_rt)
return;
- /* Return if there is no link partner */
- if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
- DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
- return;
- }
-
if (vars->rx_tx_asic_rst) {
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
serdes_net_if = (REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
@@ -4375,14 +4398,8 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
/*10G KR*/
lnkup_kr = (gp_status1 >> (12+lane)) & 0x1;
- DP(NETIF_MSG_LINK,
- "gp_status1 0x%x\n", gp_status1);
-
if (lnkup_kr || lnkup) {
- vars->rx_tx_asic_rst = 0;
- DP(NETIF_MSG_LINK,
- "link up, rx_tx_asic_rst 0x%x\n",
- vars->rx_tx_asic_rst);
+ vars->rx_tx_asic_rst = 0;
} else {
/* Reset the lane to see if link comes up.*/
bnx2x_warpcore_reset_lane(bp, phy, 1);
@@ -4507,10 +4524,14 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
* enabled transmitter to avoid current leakage in case
* no module is connected
*/
- if (bnx2x_is_sfp_module_plugged(phy, params))
- bnx2x_sfp_module_detection(phy, params);
- else
- bnx2x_sfp_e3_set_transmitter(params, phy, 1);
+ if ((params->loopback_mode == LOOPBACK_NONE) ||
+ (params->loopback_mode == LOOPBACK_EXT)) {
+ if (bnx2x_is_sfp_module_plugged(phy, params))
+ bnx2x_sfp_module_detection(phy, params);
+ else
+ bnx2x_sfp_e3_set_transmitter(params,
+ phy, 1);
+ }
bnx2x_warpcore_config_sfi(phy, params);
break;
@@ -5757,6 +5778,11 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
duplex);
+ /* In case of KR link down, start up the recovering procedure */
+ if ((!link_up) && (phy->media_type == ETH_PHY_KR) &&
+ (!(phy->flags & FLAGS_WC_DUAL_MODE)))
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
@@ -6503,12 +6529,15 @@ static int bnx2x_link_initialize(struct link_params *params,
(CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)))
bnx2x_set_parallel_detection(phy, params);
- if (params->phy[INT_PHY].config_init)
- params->phy[INT_PHY].config_init(phy,
- params,
- vars);
+ if (params->phy[INT_PHY].config_init)
+ params->phy[INT_PHY].config_init(phy, params, vars);
}
+ /* Re-read this value in case it was changed inside config_init due to
+ * limitations of optic module
+ */
+ vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
/* Init external phy*/
if (non_ext_phy) {
if (params->phy[INT_PHY].supported &
@@ -8082,7 +8111,10 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
- check_limiting_mode = 1;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+ *edc_mode = EDC_MODE_ACTIVE_DAC;
+ else
+ check_limiting_mode = 1;
} else if (copper_module_type &
SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
@@ -8557,6 +8589,7 @@ static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
break;
case EDC_MODE_PASSIVE_DAC:
+ case EDC_MODE_ACTIVE_DAC:
mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
break;
default:
@@ -9732,32 +9765,41 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
an_1000_val);
- /* set 100 speed advertisement */
- if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Set 10/100 speed advertisement */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
- /* set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
- (phy->supported &
- (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
+
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ /* Enable autoneg and restart autoneg for legacy speeds
+ */
+ autoneg_val |= (1<<9 | 1<<12);
+ an_10_100_val |= (1<<7);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+ (phy->supported & SUPPORTED_10baseT_Full)) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+
+ if ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) &&
+ (phy->supported & SUPPORTED_10baseT_Half)) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -13434,43 +13476,6 @@ static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
}
}
}
-static void bnx2x_disable_kr2(struct link_params *params,
- struct link_vars *vars,
- struct bnx2x_phy *phy)
-{
- struct bnx2x *bp = params->bp;
- int i;
- static struct bnx2x_reg_set reg_set[] = {
- /* Step 1 - Program the TX/RX alignment markers */
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL5, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL7, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL6, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_TX_CTRL9, 0x7690},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL11, 0xe647},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL82_USERB1_RX_CTRL10, 0xc4f0},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_USERB0_CTRL, 0x000c},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL1, 0x6000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CTRL3, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_CL73_BAM_CODE_FIELD, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI1, 0x0000},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI2, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_OUI3, 0x0af7},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
- {MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
- };
- DP(NETIF_MSG_LINK, "Disabling 20G-KR2\n");
-
- for (i = 0; i < ARRAY_SIZE(reg_set); i++)
- bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
- reg_set[i].val);
- vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
- bnx2x_update_link_attr(params, vars->link_attr_sync);
-
- vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
- /* Restart AN on leading lane */
- bnx2x_warpcore_restart_AN_KR(phy, params);
-}
-
static void bnx2x_kr2_recovery(struct link_params *params,
struct link_vars *vars,
struct bnx2x_phy *phy)
@@ -13548,6 +13553,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Disable KR2 on both lanes */
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, next_page);
bnx2x_disable_kr2(params, vars, phy);
+ /* Restart AN on leading lane */
+ bnx2x_warpcore_restart_AN_KR(phy, params);
return;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 1627a4e09c3..82b658d8c04 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4703,6 +4703,14 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
attn.sig[3] = REG_RD(bp,
MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
port*4);
+ /* Since MCP attentions can't be disabled inside the block, we need to
+ * read AEU registers to see whether they're currently disabled
+ */
+ attn.sig[3] &= ((REG_RD(bp,
+ !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
+ : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
+ MISC_AEU_ENABLE_MCP_PRTY_BITS) |
+ ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
if (!CHIP_IS_E1x(bp))
attn.sig[4] = REG_RD(bp,
@@ -5447,26 +5455,24 @@ static void bnx2x_timer(unsigned long data)
if (IS_PF(bp) &&
!BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
- u32 drv_pulse;
- u32 mcp_pulse;
+ u16 drv_pulse;
+ u16 mcp_pulse;
++bp->fw_drv_pulse_wr_seq;
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
- /* TBD - add SYSTEM_TIME */
drv_pulse = bp->fw_drv_pulse_wr_seq;
bnx2x_drv_pulse(bp);
mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
MCP_PULSE_SEQ_MASK);
/* The delta between driver pulse and mcp response
- * should be 1 (before mcp response) or 0 (after mcp response)
+ * should not get too big. If the MFW is more than 5 pulses
+ * behind, we should worry about it enough to generate an error
+ * log.
*/
- if ((drv_pulse != mcp_pulse) &&
- (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
- /* someone lost a heartbeat... */
- BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+ if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
+ BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
drv_pulse, mcp_pulse);
- }
}
if (bp->state == BNX2X_STATE_OPEN)
@@ -6893,7 +6899,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
- REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+
if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -7645,6 +7651,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, init_phase);
bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+ REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
bnx2x_iov_init_dq(bp);
@@ -8063,7 +8070,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
int bnx2x_setup_leading(struct bnx2x *bp)
{
- return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+ if (IS_PF(bp))
+ return bnx2x_setup_queue(bp, &bp->fp[0], true);
+ else /* VF */
+ return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
}
/**
@@ -8077,8 +8087,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
{
int rc = 0;
- if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
+ BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
return -EINVAL;
+ }
switch (int_mode) {
case BNX2X_INT_MODE_MSIX:
@@ -8646,6 +8658,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
u8 *mac_addr = bp->dev->dev_addr;
+ struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -8662,9 +8675,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
/* Enable the PME and clear the status */
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
@@ -9647,11 +9660,9 @@ sp_rtnl_not_reset:
}
}
- if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state)) {
- DP(BNX2X_MSG_SP,
- "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
- bnx2x_vfpf_storm_rx_mode(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+ bnx2x_set_rx_mode_inner(bp);
}
if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
@@ -9960,8 +9971,6 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
static int bnx2x_do_flr(struct bnx2x *bp)
{
- int i;
- u16 status;
struct pci_dev *dev = bp->pdev;
if (CHIP_IS_E1x(bp)) {
@@ -9976,20 +9985,8 @@ static int bnx2x_do_flr(struct bnx2x *bp)
return -EINVAL;
}
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev,
- "transaction is not cleared; proceeding with reset anyway\n");
-
-clear:
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
@@ -10409,7 +10406,7 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
break;
}
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
BNX2X_DEV_INFO("%sWoL capable\n",
@@ -11663,9 +11660,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* second status block for the L2 queue, and a third status block for
* CNIC if supported.
*/
- if (CNIC_SUPPORT(bp))
+ if (IS_VF(bp))
+ bp->min_msix_vec_cnt = 1;
+ else if (CNIC_SUPPORT(bp))
bp->min_msix_vec_cnt = 3;
- else
+ else /* PF w/o cnic */
bp->min_msix_vec_cnt = 2;
BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
@@ -11882,34 +11881,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 rx_mode = BNX2X_RX_MODE_NORMAL;
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
+ } else {
+ /* Schedule an SP task to handle rest of change */
+ DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
- if (dev->flags & IFF_PROMISC)
+ netif_addr_lock_bh(bp->dev);
+
+ if (bp->dev->flags & IFF_PROMISC) {
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp))) {
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else {
+ } else {
if (IS_PF(bp)) {
/* some multicasts */
if (bnx2x_set_mc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ /* release bh lock, as bnx2x_set_uc_list might sleep */
+ netif_addr_unlock_bh(bp->dev);
if (bnx2x_set_uc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_PROMISC;
+ netif_addr_lock_bh(bp->dev);
} else {
/* configuring mcast to a vf involves sleeping (when we
- * wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ * wait for the pf's response).
*/
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
@@ -11927,22 +11940,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ netif_addr_unlock_bh(bp->dev);
return;
}
if (IS_PF(bp)) {
bnx2x_set_storm_rx_mode(bp);
+ netif_addr_unlock_bh(bp->dev);
} else {
- /* configuring rx mode to storms in a vf involves sleeping (when
- * we wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ /* VF will need to request the PF to make this change, and so
+ * the VF needs to release the bottom-half lock prior to the
+ * request (as it will likely require sleep on the VF side)
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ netif_addr_unlock_bh(bp->dev);
+ bnx2x_vfpf_storm_rx_mode(bp);
}
}
@@ -12137,8 +12148,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
}
if (IS_PF(bp)) {
- bp->pm_cap = pdev->pm_cap;
- if (bp->pm_cap == 0) {
+ if (!pdev->pm_cap) {
dev_err(&bp->pdev->dev,
"Cannot find power management capability, aborting\n");
rc = -EIO;
@@ -12564,19 +12574,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device
*
*/
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt, bool is_vf)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
{
- int pos, index;
+ int index;
u16 control = 0;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-
/*
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos) {
+ if (!pdev->msix_cap) {
dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
}
@@ -12589,11 +12596,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
- return is_vf ? index + 1 : index;
+ return index;
}
static int set_max_cos_est(int chip_id)
@@ -12673,10 +12680,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
is_vf = set_is_vf(ent->driver_data);
cnic_cnt = is_vf ? 0 : 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+
+ /* add another SB for VF as it has no default SB */
+ max_non_def_sbs += is_vf ? 1 : 0;
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+ rss_count = max_non_def_sbs - cnic_cnt;
if (rss_count < 1)
return -EINVAL;
@@ -13628,6 +13638,10 @@ void bnx2x_setup_cnic_info(struct bnx2x *bp)
cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+ DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
+ BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
+ cp->iscsi_l2_cid);
+
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 8e627b886d7..5ecf267dc4c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6335,6 +6335,7 @@
#define PCI_ID_VAL2 0x438
#define PCI_ID_VAL3 0x43c
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8f03c984550..9fbeee522d2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
}
}
-static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
- struct bnx2x_exe_queue_obj *o)
-{
- spin_lock_bh(&o->lock);
-
- __bnx2x_exe_queue_reset_pending(bp, o);
-
- spin_unlock_bh(&o->lock);
-}
-
/**
* bnx2x_exe_queue_step - execute one execution chunk atomically
*
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
* @o: queue
* @ramrod_flags: flags
*
- * (Atomicity is ensured using the exe_queue->lock).
+ * (Should be called while holding the exe_queue->lock).
*/
static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
memset(&spacer, 0, sizeof(spacer));
- spin_lock_bh(&o->lock);
-
/* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else {
- spin_unlock_bh(&o->lock);
return 1;
}
}
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
}
/* Sanity check */
- if (!cur_len) {
- spin_unlock_bh(&o->lock);
+ if (!cur_len)
return 0;
- }
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
__bnx2x_exe_queue_reset_pending(bp, o);
- spin_unlock_bh(&o->lock);
return rc;
}
@@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true;
}
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ * queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (o->head_reader) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+ return -EBUSY;
+ }
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+ return 0;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ * and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+ unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+ ramrod_flags);
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+ if (rc != 0) {
+ BNX2X_ERR("execution of pending commands failed with rc %d\n",
+ rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
+}
+
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ * @ramrod_flags: ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long ramrod_flags)
+{
+ o->head_exe_request = true;
+ o->saved_ramrod_flags = ramrod_flags;
+ DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+ ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would perform it - possibly releasing and
+ * reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* It's possible a new pending execution was added since this writer
+ * executed. If so, execute again. [Ad infinitum]
+ */
+ while (o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+ __bnx2x_vlan_mac_h_exec_pending(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would perform it -
+ * possibly releasing and reclaiming the execution queue lock.
+ */
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ * release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* If we got here, we're holding lock --> no WRITER exists */
+ o->head_reader++;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+ o->head_reader);
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+
+ spin_lock_bh(&o->exe_queue.lock);
+ rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would be performed if this was the last
+ * reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (!o->head_reader) {
+ BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ } else {
+ o->head_reader--;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+ o->head_reader);
+ }
+
+ /* It's possible a new pending execution was added, and that this reader
+ * was last - if so we need to execute the command.
+ */
+ if (!o->head_reader && o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+ /* Writer release will do the trick */
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ * was the last reader. Claims and releases the execution queue lock
+ * during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_read_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+}
+
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
u8 *next = base;
int counter = 0;
+ int read_lock;
+
+ DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
next += stride + size;
}
}
+
+ if (read_lock == 0) {
+ DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+ }
+
return counter * ETH_ALEN;
}
@@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
return -EBUSY;
}
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *ramrod_flags)
+{
+ int rc = 0;
+
+ spin_lock_bh(&o->exe_queue.lock);
+
+ DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+ rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+ if (rc != 0) {
+ __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+ /* Calling function should not diffrentiate between this case
+ * and the case in which there is already a pending ramrod
+ */
+ rc = 1;
+ } else {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ }
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
/**
* bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
*
@@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
int rc;
+ /* Clearing the pending list & raw state should be made
+ * atomically (as execution flow assumes they represent the same).
+ */
+ spin_lock_bh(&o->exe_queue.lock);
+
/* Reset pending list */
- bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+ __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
/* Clear pending */
r->clear_pending(r);
+ spin_unlock_bh(&o->exe_queue.lock);
+
/* If ramrod failed this is most likely a SW bug */
if (cqe->message.error)
return -EINVAL;
/* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
if (rc < 0)
return rc;
}
@@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
* @p:
*
*/
-int bnx2x_config_vlan_mac(
- struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p)
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
{
int rc = 0;
struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac(
/* Execute commands if required */
if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac(
return rc;
/* Make a next step */
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
- ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp,
+ p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
unsigned long *ramrod_flags)
{
struct bnx2x_vlan_mac_registry_elem *pos = NULL;
- int rc = 0;
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ int read_lock;
+ int rc = 0;
/* Clear pending commands first */
@@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ return read_lock;
+
list_for_each_entry(pos, &o->head, link) {
if (pos->vlan_mac_flags == *vlan_mac_flags) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
@@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, &p);
if (rc < 0) {
BNX2X_ERR("Failed to add a new DEL command\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
return rc;
}
}
}
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+
p.ramrod_flags = *ramrod_flags;
__set_bit(RAMROD_CONT, &p.ramrod_flags);
@@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
struct bnx2x_credit_pool_obj *vlans_pool)
{
INIT_LIST_HEAD(&o->head);
+ o->head_reader = 0;
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
o->macs_pool = macs_pool;
o->vlans_pool = vlans_pool;
@@ -4171,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss;
}
+int validate_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac)
+{
+ if (!vlan_mac->get_n_elements) {
+ BNX2X_ERR("vlan mac object was not intialized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/********************** Queue state object ***********************************/
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 798dfe99673..658f4e33abf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj {
* entries.
*/
struct list_head head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ u8 head_reader; /* Num. of readers accessing head list */
+ bool head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
/* TODO: Add it's initialization in the init functions */
struct bnx2x_exe_queue_obj exe_queue;
@@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *macs_pool,
struct bnx2x_credit_pool_obj *vlans_pool);
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p);
+ struct bnx2x_vlan_mac_ramrod_params *p);
int bnx2x_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p,
@@ -1393,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
+int validate_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e8706e19f96..9ad012bdd91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_DONE
};
+enum bnx2x_vfop_rss_state {
+ BNX2X_VFOP_RSS_CONFIG,
+ BNX2X_VFOP_RSS_DONE
+};
+
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
- if (vfq_is_leading(q)) {
- __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
- __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
- }
-
/* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV,
"Entered qdtor but queue was already stopped. Aborting gracefully\n");
- goto op_done;
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_DONE;
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
/* next state */
@@ -432,8 +436,10 @@ op_err:
op_done:
case BNX2X_VFOP_QDTOR_DONE:
/* invalidate the context */
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ if (qdtor->cxt) {
+ qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
+ qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ }
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block);
}
- DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
+ vf->abs_vfid, vfop->rc);
return -ENOMEM;
}
@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
{
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (vf) {
+ /* the first igu entry belonging to VFs of this PF */
+ if (!BP_VFDB(bp)->first_vf_igu_entry)
+ BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+ /* the first igu entry belonging to this VF */
if (!vf_sb_count(vf))
vf->igu_base_id = igu_sb_id;
+
++vf_sb_count(vf);
+ ++vf->sb_count;
}
+ BP_VFDB(bp)->vf_sbs_pool++;
}
/* VFOP MAC/VLAN helpers */
@@ -491,12 +506,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp,
* and a valid credit counter
*/
if (!vfop->rc && args->credit) {
- int cnt = 0;
struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
+
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
list_for_each(pos, &obj->head)
cnt++;
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
+
atomic_set(args->credit, cnt);
}
}
@@ -692,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -711,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -731,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -753,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -773,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -793,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid;
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -812,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -831,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -851,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -870,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -980,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
+ true);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
+ true);
DP(BNX2X_MSG_IOV,
"VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
vf->abs_vfid, vfop->rc);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1291,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ /* for non leading queues skip directly to qdown sate */
if (vfop) {
vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
- bnx2x_vfop_qdown, cmd->done);
+ bnx2x_vfop_opset(qid == LEADING_IDX ?
+ BNX2X_VFOP_QTEARDOWN_RXMODE :
+ BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
+ cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
cmd->block);
}
@@ -1447,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
* both known
*/
static void
-bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
+ struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculcis for macs (just yet) */
+ /* no credit calculations for macs (just yet) */
resc->num_mac_filters = 1;
/* divvy up vlan rules */
@@ -1467,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
resc->num_mc_filters = 0;
/* num_sbs already set */
+ resc->num_sbs = vf->sb_count;
}
/* FLR routines: */
static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
/* reset the state variables */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
vf->state = VF_FREE;
}
@@ -1693,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
* the Pf doorbell size although the 2 are independent.
*/
- REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
- BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
/* No security checks for now -
* configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1705,9 +1756,6 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
- /* set the number of VF allowed doorbells to the full DQ range */
- REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
-
/* set the VF doorbell threshold */
REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
}
@@ -1761,7 +1809,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{
int sb_id;
u32 val;
- u8 fid;
+ u8 fid, current_pf = 0;
/* IGU in normal mode - read CAM */
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1769,16 +1817,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
continue;
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
- if (!(fid & IGU_FID_ENCODE_IS_PF))
+ if (fid & IGU_FID_ENCODE_IS_PF)
+ current_pf = fid & IGU_FID_PF_NUM_MASK;
+ else if (current_pf == BP_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK));
-
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
(fid & IGU_FID_VF_NUM_MASK)), sb_id,
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
}
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
}
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1844,23 +1894,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
return 0;
}
-static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
-{
- int i;
- u8 queue_count = 0;
-
- if (IS_SRIOV(bp))
- for_each_vf(bp, i)
- queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
-
- return queue_count;
-}
-
/* must be called after PF bars are mapped */
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
- int num_vfs_param)
+ int num_vfs_param)
{
- int err, i, qcount;
+ int err, i;
struct bnx2x_sriov *iov;
struct pci_dev *dev = bp->pdev;
@@ -1958,12 +1996,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp);
- /* get the total queue count and allocate the global queue arrays */
- qcount = bnx2x_iov_get_max_queue_count(bp);
-
/* allocate the queue arrays for all VFs */
- bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
- GFP_KERNEL);
+ bp->vfdb->vfqs = kzalloc(
+ BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+
+ DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
+
if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM;
@@ -2084,49 +2123,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
q_type);
DP(BNX2X_MSG_IOV,
- "initialized vf %d's queue object. func id set to %d\n",
- vf->abs_vfid, q->sp_obj.func_id);
-
- /* mac/vlan objects are per queue, but only those
- * that belong to the leading queue are initialized
- */
- if (vfq_is_leading(q)) {
- /* mac */
- bnx2x_init_mac_obj(bp, &q->mac_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, mac_rdata),
- bnx2x_vf_sp_map(bp, vf, mac_rdata),
- BNX2X_FILTER_MAC_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
- /* vlan */
- bnx2x_init_vlan_obj(bp, &q->vlan_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, vlan_rdata),
- bnx2x_vf_sp_map(bp, vf, vlan_rdata),
- BNX2X_FILTER_VLAN_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
- /* mcast */
- bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
- q->cid, func_id, func_id,
- bnx2x_vf_sp(bp, vf, mcast_rdata),
- bnx2x_vf_sp_map(bp, vf, mcast_rdata),
- BNX2X_FILTER_MCAST_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX);
-
- vf->leading_rss = cl_id;
- }
+ "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+ vf->abs_vfid, q->sp_obj.func_id, q->cid);
}
/* called by bnx2x_nic_load */
int bnx2x_iov_nic_init(struct bnx2x *bp)
{
- int vfid, qcount, i;
+ int vfid;
if (!IS_SRIOV(bp)) {
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2155,7 +2159,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
/* init statically provisioned resources */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */
@@ -2191,13 +2195,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
}
/* Final VF init */
- qcount = 0;
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
/* fill in the BDF and bars */
- vf->bus = bnx2x_vf_bus(bp, i);
- vf->devfn = bnx2x_vf_devfn(bp, i);
+ vf->bus = bnx2x_vf_bus(bp, vfid);
+ vf->devfn = bnx2x_vf_devfn(bp, vfid);
bnx2x_vf_set_bars(bp, vf);
DP(BNX2X_MSG_IOV,
@@ -2206,10 +2209,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
(unsigned)vf->bars[0].bar, vf->bars[0].size,
(unsigned)vf->bars[1].bar, vf->bars[1].size,
(unsigned)vf->bars[2].bar, vf->bars[2].size);
-
- /* set local queue arrays */
- vf->vfqs = &bp->vfdb->vfqs[qcount];
- qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
}
return 0;
@@ -2515,6 +2514,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+ dma_addr_t q_stats_addr =
+ vf->fw_stat_map + j * vf->stats_stride;
+
/* collect stats fro active queues only */
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2522,13 +2524,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
/* create stats query entry for this queue */
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->index = vfq_stat_id(vf, rxq);
cur_query_entry->funcID =
cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
cur_query_entry->address.hi =
- cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
- cpu_to_le32(U64_LO(vf->fw_stat_map));
+ cpu_to_le32(U64_LO(q_stats_addr));
DP(BNX2X_MSG_IOV,
"added address %x %x for vf %d queue %d client %d\n",
cur_query_entry->address.hi,
@@ -2537,6 +2539,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
+
+ /* all stats are coalesced to the leading queue */
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ break;
}
}
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
@@ -2555,6 +2561,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
+ if (!vf) {
+ BNX2X_ERR("VF was null! skipping...\n");
+ continue;
+ }
+
if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
@@ -2702,7 +2713,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q = vfq_get(vf, i);
if (!q) {
- DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ BNX2X_ERR("q number %d was not allocated\n", i);
return -EINVAL;
}
@@ -2930,6 +2941,43 @@ op_done:
bnx2x_vfop_end(bp, vf, vfop);
}
+static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ enum bnx2x_vfop_rss_state state;
+
+ if (!vfop) {
+ BNX2X_ERR("vfop was null\n");
+ return;
+ }
+
+ state = vfop->state;
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_RSS_CONFIG:
+ /* next state */
+ vfop->state = BNX2X_VFOP_RSS_DONE;
+ bnx2x_config_rss(bp, &vfop->op_p->rss);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_RSS_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
@@ -2944,6 +2992,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
return -ENOMEM;
}
+int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
+ cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
@@ -2955,6 +3018,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
.block = block,
};
int rc;
+
+ DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
@@ -2983,6 +3048,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(tlv)) {
+ BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+ return;
+ }
+
/* lock the channel */
mutex_lock(&vf->op_mutex);
@@ -2997,19 +3068,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv)
{
+ enum channel_tlvs current_tlv;
+
+ if (!vf) {
+ BNX2X_ERR("VF was %p\n", vf);
+ return;
+ }
+
+ current_tlv = vf->op_current;
+
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(expected_tlv))
+ return;
+
WARN(expected_tlv != vf->op_current,
"lock mismatch: expected %d found %d", expected_tlv,
vf->op_current);
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+
/* lock the channel */
mutex_unlock(&vf->op_mutex);
/* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current);
-
- /* record the locking op */
- vf->op_current = CHANNEL_TLV_NONE;
}
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
@@ -3040,11 +3124,80 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
return bnx2x_enable_sriov(bp);
}
}
+#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp)
{
int rc = 0, req_vfs = bp->requested_nr_virtfn;
+ int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+ u32 igu_entry, address;
+ u16 num_vf_queues;
+
+ if (req_vfs == 0)
+ return 0;
+
+ first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+ /* statically distribute vf sb pool between VFs */
+ num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+ BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+ /* zero previous values learned from igu cam */
+ for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ vf->sb_count = 0;
+ vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+ }
+ bp->vfdb->vf_sbs_pool = 0;
+
+ /* prepare IGU cam */
+ sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+ address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+ igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+ vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+ IGU_REG_MAPPING_MEMORY_VALID;
+ DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+ sb_idx, vf_idx);
+ REG_WR(bp, address, igu_entry);
+ sb_idx++;
+ address += IGU_ENTRY_SIZE;
+ }
+ }
+
+ /* Reinitialize vf database according to igu cam */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+ BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+ qcount = 0;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += vf_sb_count(vf);
+ bnx2x_iov_static_resc(bp, vf);
+ }
+ /* prepare msix vectors in VF configuration space */
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+ REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+ num_vf_queues);
+ DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
+ vf_idx, num_vf_queues);
+ }
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* enable sriov. This will probe all the VFs, and consequentially cause
+ * the "acquire" messages to appear on the VF PF channel.
+ */
+ DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+ pci_disable_sriov(bp->pdev);
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3072,9 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf **vf,
- struct pf_vf_bulletin_content **bulletin)
+int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3097,7 +3249,13 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
*bulletin = BP_VF_BULLETIN(bp, vfidx);
if (!*vf) {
- BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!(*vf)->vfqs) {
+ BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
vfidx);
return -EINVAL;
}
@@ -3125,8 +3283,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
@@ -3138,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
- 0, ETH_ALEN);
- vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
- 0, VLAN_HLEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
+ vlan_obj->get_n_elements(bp, vlan_obj, 1,
+ (u8 *)&ivi->vlan, 0,
+ VLAN_HLEN);
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3209,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return rc;
}
- /* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bnx2x_leading_vfq(vf, mac_obj);
+
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
@@ -3276,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the vlan in device on this vf's queue */
unsigned long ramrod_flags = 0;
unsigned long vlan_mac_flags = 0;
struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_vfq(vf, 0, vlan_obj);
+ &bnx2x_leading_vfq(vf, vlan_obj);
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_update_params *update_params;
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
memset(&ramrod_param, 0, sizeof(ramrod_param));
/* must lock vfpf channel to protect against vf flows */
@@ -3307,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
*/
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
update_params = &q_params.params.update;
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d143a7cdbbb..059f0d460af 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
u32 cid;
u16 index;
u16 sb_idx;
+ bool is_leading;
};
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -194,6 +195,7 @@ struct bnx2x_virtf {
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020
+#define VF_CFG_STATS_COALESCE 0x0040
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -213,6 +215,7 @@ struct bnx2x_virtf {
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ u16 stats_stride;
dma_addr_t spq_map;
dma_addr_t bulletin_map;
@@ -239,7 +242,10 @@ struct bnx2x_virtf {
u8 igu_base_id; /* base igu status block id */
struct bnx2x_vf_queue *vfqs;
-#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define LEADING_IDX 0
+#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
u8 index; /* index in the vf array */
u8 abs_vfid;
@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
} q_data;
+
+ union {
+ struct eth_rss_update_ramrod_data e2;
+ } rss_rdata;
};
struct hw_dma {
@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
u32 flrd_vfs[FLRD_VFS_DWORDS];
+
+ /* the number of msix vectors belonging to this PF designated for VFs */
+ u16 vf_sbs_pool;
+ u16 first_vf_igu_entry;
};
/* queue access */
@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
return &(vf->vfqs[index]);
}
-static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
-{
- return (vfq->index == 0);
-}
-
/* FW ids */
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{
@@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
- return vfq_cl_id(vf, q);
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ return vf->leading_rss;
+ else
+ return vfq_cl_id(vf, q);
}
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
@@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
@@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
- return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
}
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
@@ -793,10 +812,12 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
-static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
+static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params) {return 0; }
static inline int bnx2x_vfpf_set_mcast(struct net_device *dev) {return 0; }
static inline int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp) {return 0; }
static inline int bnx2x_iov_nic_init(struct bnx2x *bp) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 2088063151d..da16953eb2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* humble our request */
req->resc_request.num_txqs =
- bp->acquire_resp.resc.num_txqs;
+ min(req->resc_request.num_txqs,
+ bp->acquire_resp.resc.num_txqs);
req->resc_request.num_rxqs =
- bp->acquire_resp.resc.num_rxqs;
+ min(req->resc_request.num_rxqs,
+ bp->acquire_resp.resc.num_rxqs);
req->resc_request.num_sbs =
- bp->acquire_resp.resc.num_sbs;
+ min(req->resc_request.num_sbs,
+ bp->acquire_resp.resc.num_sbs);
req->resc_request.num_mac_filters =
- bp->acquire_resp.resc.num_mac_filters;
+ min(req->resc_request.num_mac_filters,
+ bp->acquire_resp.resc.num_mac_filters);
req->resc_request.num_vlan_filters =
- bp->acquire_resp.resc.num_vlan_filters;
+ min(req->resc_request.num_vlan_filters,
+ bp->acquire_resp.resc.num_vlan_filters);
req->resc_request.num_mc_filters =
- bp->acquire_resp.resc.num_mc_filters;
+ min(req->resc_request.num_mc_filters,
+ bp->acquire_resp.resc.num_mc_filters);
/* Clear response buffer */
memset(&bp->vf2pf_mbox->resp, 0,
@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->common.flash_size = 0;
bp->flags |=
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
- bp->igu_sb_cnt = 1;
+ bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
@@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
req->stats_addr = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ req->stats_stride = sizeof(struct per_queue_stats);
+
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -452,12 +460,60 @@ free_irq:
bnx2x_free_irq(bp);
}
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* rss */
+ bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+ func_id, func_id,
+ bnx2x_vf_sp(bp, vf, rss_rdata),
+ bnx2x_vf_sp_map(bp, vf, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ q->is_leading = true;
+}
+
/* ask the pf to open a queue for the vf */
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading)
{
struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 fp_idx = fp->index;
u16 tpa_agg_size = 0, flags = 0;
int rc;
@@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
tpa_agg_size = TPA_AGG_SIZE;
}
+ if (is_leading)
+ flags |= VFPF_QUEUE_FLG_LEADING_RSS;
+
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
@@ -646,6 +705,71 @@ out:
return 0;
}
+/* request pf to config rss table for vf queues*/
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params)
+{
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
+ sizeof(*req));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
+ req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+ req->rss_key_size = T_ETH_RSS_KEY;
+ req->rss_result_mask = params->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
+ req->rss_flags |= VFPF_RSS_MODE_DISABLED;
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
+ req->rss_flags |= VFPF_RSS_MODE_REGULAR;
+ if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
+ req->rss_flags |= VFPF_RSS_SET_SRCH;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
+ req->rss_flags |= VFPF_RSS_IPV4;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
+ req->rss_flags |= VFPF_RSS_IPV4_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
+ req->rss_flags |= VFPF_RSS_IPV4_UDP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
+ req->rss_flags |= VFPF_RSS_IPV6;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
+ req->rss_flags |= VFPF_RSS_IPV6_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
+ req->rss_flags |= VFPF_RSS_IPV6_UDP;
+
+ DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
+ resp->hdr.status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return 0;
+}
+
int bnx2x_vfpf_set_mcast(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* fill in pfdev info */
resp->pfdev_info.chip_num = bp->common.chip_id;
- resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
@@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
+ vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+ /* set VF multiqueue statistics collection mode */
+ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+ vf->cfg_flags |= VF_CFG_STATS_COALESCE;
+
/* response */
bnx2x_vf_mbx_resp(bp, vf);
}
@@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
/* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp))
@@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p;
+ if (bnx2x_vfq_is_leading(q))
+ bnx2x_leading_vfq_init(bp, vf, q);
+
/* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup;
@@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf);
}
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+ struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
+ struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+
+ if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+ rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+ BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+ vf->index);
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ /* set vfop params according to rss tlv */
+ memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
+ sizeof(rss_tlv->rss_key));
+ vf_op_params->rss_obj = &vf->rss_conf_obj;
+ vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+ __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+ __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+ __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+ __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+ __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
+
+ if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+ (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+ BNX2X_ERR("about to hit a FW assert. aborting...\n");
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
+
+mbx_resp:
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
@@ -1569,25 +1765,28 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
bnx2x_vf_mbx_acquire(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_INIT:
bnx2x_vf_mbx_init_vf(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_SETUP_Q:
bnx2x_vf_mbx_setup_q(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_SET_Q_FILTERS:
bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_TEARDOWN_Q:
bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_CLOSE:
bnx2x_vf_mbx_close_vf(bp, vf, mbx);
- break;
+ return;
case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx);
- break;
+ return;
+ case CHANNEL_TLV_UPDATE_RSS:
+ bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+ return;
}
} else {
@@ -1603,26 +1802,24 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
for (i = 0; i < 20; i++)
DP_CONT(BNX2X_MSG_IOV, "%x ",
mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
+ }
- /* test whether we can respond to the VF (do we have an address
- * for it?)
- */
- if (vf->state == VF_ACQUIRED) {
- /* mbx_resp uses the op_rc of the VF */
- vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
+ /* can we respond to VF (do we have an address for it?) */
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
+ /* mbx_resp uses the op_rc of the VF */
+ vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
- /* notify the VF that we do not support this request */
- bnx2x_vf_mbx_resp(bp, vf);
- } else {
- /* can't send a response since this VF is unknown to us
- * just ack the FW to release the mailbox and unlock
- * the channel.
- */
- storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
- mmiowb();
- bnx2x_unlock_vf_pf_channel(bp, vf,
- mbx->first_tlv.tl.type);
- }
+ /* notify the VF that we do not support this request */
+ bnx2x_vf_mbx_resp(bp, vf);
+ } else {
+ /* can't send a response since this VF is unknown to us
+ * just ack the FW to release the mailbox and unlock
+ * the channel.
+ */
+ storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
+ /* Firmware ack should be written before unlocking channel */
+ mmiowb();
+ bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f3ad174a3a6..1179fe06d0c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -51,6 +51,7 @@ struct hw_sb_info {
#define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200
+#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
u8 padding[3];
};
+/* receive side scaling tlv */
+struct vfpf_rss_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u32 rss_flags;
+#define VFPF_RSS_MODE_DISABLED (1 << 0)
+#define VFPF_RSS_MODE_REGULAR (1 << 1)
+#define VFPF_RSS_SET_SRCH (1 << 2)
+#define VFPF_RSS_IPV4 (1 << 3)
+#define VFPF_RSS_IPV4_TCP (1 << 4)
+#define VFPF_RSS_IPV4_UDP (1 << 5)
+#define VFPF_RSS_IPV6 (1 << 6)
+#define VFPF_RSS_IPV6_TCP (1 << 7)
+#define VFPF_RSS_IPV6_UDP (1 << 8)
+ u8 rss_result_mask;
+ u8 ind_table_size;
+ u8 rss_key_size;
+ u8 padding;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
} resc;
};
+#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
+ * stats will be coalesced on
+ * the leading RSS queue
+ */
+
/* Init VF */
struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr;
aligned_u64 stats_addr;
+ u16 stats_stride;
+ u32 flags;
+ u32 padding[2];
};
/* Setup Queue */
@@ -293,13 +323,14 @@ union vfpf_tlvs {
struct vfpf_q_op_tlv q_op;
struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters;
- struct vfpf_release_tlv release;
- struct channel_list_end_tlv list_end;
+ struct vfpf_release_tlv release;
+ struct vfpf_rss_tlv update_rss;
+ struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
- struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_general_resp_tlv general_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +386,18 @@ enum channel_tlvs {
CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_ACTIVATE_Q,
+ CHANNEL_TLV_DEACTIVATE_Q,
CHANNEL_TLV_TEARDOWN_Q,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
+ CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d78d4cf140e..99394bd49a1 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1184,6 +1184,7 @@ error:
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ctx_blk_size = cp->ethdev->ctx_blk_size;
int total_mem, blks, i;
@@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size;
- if (!BNX2X_CHIP_IS_57710(cp->chip_id))
+ if (!CHIP_IS_E1(bp))
cp->ctx_align = 0;
else
cp->ctx_align = ctx_blk_size;
@@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages;
@@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->max_cid_space += dev->max_fcoe_conn;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
@@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
- if (CNIC_SUPPORTS_FCOE(cp)) {
+ if (CNIC_SUPPORTS_FCOE(bp)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
@@ -1382,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
u32 type, union l5cm_specific_data *l5_data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1];
u16 type_16;
@@ -1389,10 +1392,10 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
- BNX2X_HW_CID(cp, cid)));
+ BNX2X_HW_CID(bp, cid)));
type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
- type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID;
kwqe.hdr.type = cpu_to_le16(type_16);
@@ -1427,13 +1430,34 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
rcu_read_unlock();
}
+static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
+ int en_tcp_dack)
+{
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+ u16 tstorm_flags = 0;
+
+ if (time_stamps) {
+ xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ }
+ if (en_tcp_dack)
+ tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
+
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
+
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
+}
+
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages;
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn;
@@ -1506,15 +1530,18 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
+ cnic_bnx2x_set_tcp_options(dev,
+ req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
+ req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
+
return 0;
}
static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -1653,6 +1680,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_conn_offload1 *req1 =
(struct iscsi_kwqe_conn_offload1 *) wqes[0];
struct iscsi_kwqe_conn_offload2 *req2 =
@@ -1661,11 +1689,11 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
u32 cid = ctx->cid;
- u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 hw_cid = BNX2X_HW_CID(bp, cid);
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
- u8 port = CNIC_PORT(cp);
+ u8 port = BP_PORT(bp);
ctx->ctx_flags = 0;
if (!req2->num_additional_wqes)
@@ -1719,8 +1747,8 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
ETH_P_8021Q;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
- cp->port_mode == CHIP_2_PORT_MODE) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
+ bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
port = 0;
}
@@ -1841,6 +1869,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
struct iscsi_kwqe_conn_offload1 *req1;
struct iscsi_kwqe_conn_offload2 *req2;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -1894,7 +1923,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
}
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
- kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
+ kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
done:
cqes[0] = (struct kcqe *) &kcqe;
@@ -1930,6 +1959,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
union l5cm_specific_data l5_data;
int ret;
@@ -1938,7 +1968,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
- hw_cid = BNX2X_HW_CID(cp, ctx->cid);
+ hw_cid = BNX2X_HW_CID(bp, ctx->cid);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
@@ -2035,9 +2065,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
xstorm_buf->pseudo_header_checksum =
swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
- if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
- tstorm_buf->params |=
- L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
if (kwqe3->ka_timeout) {
tstorm_buf->ka_enable = 1;
tstorm_buf->ka_timeout = kwqe3->ka_timeout;
@@ -2049,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
u8 *mac = dev->mac_addr;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@@ -2084,25 +2110,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
mac[0]);
}
-static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
-{
- struct cnic_local *cp = dev->cnic_priv;
- struct bnx2x *bp = netdev_priv(dev->netdev);
- u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
- u16 tstorm_flags = 0;
-
- if (tcp_ts) {
- xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
- tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
- }
-
- CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
-
- CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
-}
-
static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
@@ -2176,10 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
-
- cnic_bnx2x_set_tcp_timestamp(dev,
- kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
+ XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
@@ -2248,11 +2252,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_stat_ramrod_params *fcoe_stat;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ret;
u32 cid;
req = (struct fcoe_kwqe_stat *) kwqe;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_stat)
@@ -2271,6 +2276,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
{
int ret;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 cid;
struct fcoe_init_ramrod_params *fcoe_init;
struct fcoe_kwqe_init1 *req1;
@@ -2315,7 +2321,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
cp->kcq2.sw_prod_idx = 0;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
*work = 3;
@@ -2328,6 +2334,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
int ret = 0;
u32 cid = -1, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct fcoe_kwqe_conn_offload1 *req1;
struct fcoe_kwqe_conn_offload2 *req2;
struct fcoe_kwqe_conn_offload3 *req3;
@@ -2370,7 +2377,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
if (fctx) {
- u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 hw_cid = BNX2X_HW_CID(bp, cid);
u32 val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
@@ -2394,7 +2401,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
- cid = BNX2X_HW_CID(cp, cid);
+ cid = BNX2X_HW_CID(bp, cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (!ret)
@@ -2552,13 +2559,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ret;
u32 cid;
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
req = (struct fcoe_kwqe_destroy *) kwqe;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
@@ -2715,7 +2723,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
- struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int i, work, ret;
u32 opcode;
struct kwqe *kwqe;
@@ -2723,7 +2731,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
- if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (!BNX2X_CHIP_IS_E2_PLUS(bp))
return -EINVAL;
for (i = 0; i < num_wqes; ) {
@@ -3039,8 +3047,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
u16 index, u8 op, u8 update)
{
- struct cnic_local *cp = dev->cnic_priv;
- u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack;
@@ -3127,6 +3135,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -3138,7 +3147,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
CNIC_WR16(dev, cp->kcq1.io_addr,
cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
- if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) {
+ if (!CNIC_SUPPORTS_FCOE(bp)) {
cp->arm_int(dev, status_idx);
break;
}
@@ -3603,6 +3612,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
csk1->rcv_buf = DEF_RCV_BUF;
csk1->snd_buf = DEF_SND_BUF;
csk1->seed = DEF_SEED;
+ csk1->tcp_flags = 0;
*csk = csk1;
return 0;
@@ -4020,15 +4030,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
- case L5CM_RAMROD_CMD_ID_CLOSE:
- if (l4kcqe->status != 0) {
- netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
- "status 0x%x\n", l4kcqe->status);
+ case L5CM_RAMROD_CMD_ID_CLOSE: {
+ struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
+
+ if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
+ l4kcqe->status, l5kcqe->completion_status);
opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
/* Fall through */
} else {
break;
}
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4213,13 +4226,12 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
- u32 port = CNIC_PORT(cp);
+ u32 pfid = bp->pfid;
+ u32 port = BP_PORT(bp);
cnic_init_bnx2x_mac(dev);
- cnic_bnx2x_set_tcp_timestamp(dev, 1);
+ cnic_bnx2x_set_tcp_options(dev, 0, 1);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
@@ -4897,6 +4909,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
@@ -4925,7 +4938,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (BNX2X_CHIP_IS_E2_PLUS(bp))
pbd_e2->parsing_data = (UNICAST_ADDRESS <<
ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
else
@@ -4962,6 +4975,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BNX2_PAGE_SIZE);
@@ -4970,7 +4984,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
- int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+ int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
u32 val;
dma_addr_t ring_map = udev->l2_ring_map;
@@ -4979,7 +4993,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
- data->general.func_id = cp->pfid;
+ data->general.func_id = bp->pfid;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
@@ -5029,13 +5043,13 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
@@ -5051,7 +5065,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
&sb->sb.running_index[SM_RX_ID];
}
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -5073,12 +5087,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
- cp->port_mode = bp->common.chip_port_mode;
- cp->pfid = bp->pfid;
cp->func = bp->pf_num;
func = CNIC_FUNC(cp);
- pfid = cp->pfid;
+ pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
cp->iscsi_start_cid, 0);
@@ -5086,7 +5098,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return -ENOMEM;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
cp->fcoe_start_cid, 0);
@@ -5168,12 +5180,12 @@ static void cnic_init_rings(struct cnic_dev *dev)
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier();
- cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+ cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
off = BAR_USTRORM_INTMEM +
- (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
+ (BNX2X_CHIP_IS_E2_PLUS(bp) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
- USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
+ USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
@@ -5206,7 +5218,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
"iSCSI CLIENT_SETUP did not complete\n");
cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
cnic_ring_ctl(dev, cid, cli, 1);
- *cid_ptr = cid;
+ *cid_ptr = cid >> 4;
+ *(cid_ptr + 1) = cid * bp->db_size;
}
}
@@ -5271,6 +5284,13 @@ static int cnic_register_netdev(struct cnic_dev *dev)
if (err)
netdev_err(dev->netdev, "register_cnic failed\n");
+ /* Read iSCSI config again. On some bnx2x device, iSCSI config
+ * can change after firmware is downloaded.
+ */
+ dev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+ dev->max_iscsi_conn = 0;
+
return err;
}
@@ -5353,7 +5373,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_free_irq(dev);
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
idx_off = offsetof(struct hc_status_block_e2, index_values) +
(hc_index * sizeof(u16));
@@ -5370,7 +5390,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
*cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
+ CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev);
}
@@ -5544,7 +5564,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (CNIC_SUPPORTS_FCOE(cp)) {
+ if (CNIC_SUPPORTS_FCOE(bp)) {
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
}
@@ -5564,7 +5584,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->ack_int = cnic_ack_bnx2x_e2_msix;
cp->arm_int = cnic_arm_bnx2x_e2_msix;
} else {
@@ -5628,7 +5648,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
dev = cnic_from_netdev(netdev);
- if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
+ if (!dev && event == NETDEV_REGISTER) {
/* Check for the hot-plug device */
dev = is_cnic_dev(netdev);
if (dev) {
@@ -5644,7 +5664,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
- if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
+ if (event == NETDEV_UP) {
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
@@ -5693,21 +5713,8 @@ static struct notifier_block cnic_netdev_notifier = {
static void cnic_release(void)
{
- struct cnic_dev *dev;
struct cnic_uio_dev *udev;
- while (!list_empty(&cnic_dev_list)) {
- dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
- if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
- cnic_ulp_stop(dev);
- cnic_stop_hw(dev);
- }
-
- cnic_ulp_exit(dev);
- cnic_unregister_netdev(dev);
- list_del_init(&dev->list);
- cnic_free_dev(dev);
- }
while (!list_empty(&cnic_udev_list)) {
udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
list);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 62c670619ae..0121a5d5519 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -303,8 +303,6 @@ struct cnic_local {
u32 chip_id;
int func;
- u32 pfid;
- u8 port_mode;
u32 shmem_base;
@@ -364,47 +362,7 @@ struct bnx2x_bd_chain_next {
#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
-#define BNX2X_CHIP_NUM_57710 0x164e
-#define BNX2X_CHIP_NUM_57711 0x164f
-#define BNX2X_CHIP_NUM_57711E 0x1650
-#define BNX2X_CHIP_NUM_57712 0x1662
-#define BNX2X_CHIP_NUM_57712E 0x1663
-#define BNX2X_CHIP_NUM_57713 0x1651
-#define BNX2X_CHIP_NUM_57713E 0x1652
-#define BNX2X_CHIP_NUM_57800 0x168a
-#define BNX2X_CHIP_NUM_57810 0x168e
-#define BNX2X_CHIP_NUM_57840 0x168d
-
-#define BNX2X_CHIP_NUM(x) (x >> 16)
-#define BNX2X_CHIP_IS_57710(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
-#define BNX2X_CHIP_IS_57711(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
-#define BNX2X_CHIP_IS_57711E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
-#define BNX2X_CHIP_IS_E1H(x) \
- (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
-#define BNX2X_CHIP_IS_57712(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
-#define BNX2X_CHIP_IS_57712E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
-#define BNX2X_CHIP_IS_57713(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
-#define BNX2X_CHIP_IS_57713E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
-#define BNX2X_CHIP_IS_57800(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
-#define BNX2X_CHIP_IS_57810(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
-#define BNX2X_CHIP_IS_57840(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
-#define BNX2X_CHIP_IS_E2(x) \
- (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
- BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
-#define BNX2X_CHIP_IS_E3(x) \
- (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
- BNX2X_CHIP_IS_57840(x))
-#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
+#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
sizeof(struct eth_rx_bd))
@@ -439,31 +397,26 @@ struct bnx2x_bd_chain_next {
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
-#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
-#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
- 0 : (CNIC_FUNC(cp) & 1))
-#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
-#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
- (CNIC_E1HVN(cp) << 17) | (x))
+#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
+ (BP_VN(bp) << 17) | (x))
#define BNX2X_SW_CID(x) (x & 0x1ffff)
-#define BNX2X_CL_QZONE_ID(cp, cli) \
- (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \
- cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+#define BNX2X_CL_QZONE_ID(bp, cli) \
+ (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
+ cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
#ifndef MAX_STAT_COUNTER_ID
#define MAX_STAT_COUNTER_ID \
- (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \
- ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+ (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
+ ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
MAX_STAT_COUNTER_ID_E1))
#endif
-#define CNIC_SUPPORTS_FCOE(cp) \
- (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \
- !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+#define CNIC_SUPPORTS_FCOE(cp) \
+ (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
#define CNIC_RAMROD_TMO (HZ / 4)
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index ede3db35d75..95a8e4b11c9 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags {
u16 flags;
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ec9bb9ad4bb..0658b43e148 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.16"
-#define CNIC_MODULE_RELDATE "Dec 05, 2012"
+#define CNIC_MODULE_VERSION "2.5.18"
+#define CNIC_MODULE_RELDATE "Sept 01, 2013"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -238,8 +238,8 @@ struct cnic_sock {
u16 src_port;
u16 dst_port;
u16 vlan_id;
- unsigned char old_ha[6];
- unsigned char ha[6];
+ unsigned char old_ha[ETH_ALEN];
+ unsigned char ha[ETH_ALEN];
u32 mtu;
u32 cid;
u32 l5_cid;
@@ -308,7 +308,7 @@ struct cnic_dev {
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
int max_iscsi_conn;
int max_fcoe_conn;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0da2214ef1b..12d961c4ebc 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 132
+#define TG3_MIN_NUM 133
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 21, 2013"
+#define DRV_MODULE_RELDATE "Jul 29, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -3030,6 +3030,20 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
return false;
}
+static bool tg3_phy_led_bug(struct tg3 *tp)
+{
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5719:
+ case ASIC_REV_5720:
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ !tp->pci_fn)
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
@@ -3077,8 +3091,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
}
return;
} else if (do_low_power) {
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+ if (!tg3_phy_led_bug(tp))
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_FORCE_LED_OFF);
val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
@@ -4226,8 +4241,6 @@ static int tg3_power_down_prepare(struct tg3 *tp)
static void tg3_power_down(struct tg3 *tp)
{
- tg3_power_down_prepare(tp);
-
pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
pci_set_power_state(tp->pdev, PCI_D3hot);
}
@@ -6095,10 +6108,12 @@ static u64 tg3_refclk_read(struct tg3 *tp)
/* tp->lock must be held */
static void tg3_refclk_write(struct tg3 *tp, u64 newval)
{
- tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
+ u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
- tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
+ tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
}
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@ -6214,6 +6229,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp,
static int tg3_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+ u32 clock_ctl;
+ int rval = 0;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.index != 0)
+ return -EINVAL;
+
+ tg3_full_lock(tp, 0);
+ clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+ clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
+
+ if (on) {
+ u64 nsec;
+
+ nsec = rq->perout.start.sec * 1000000000ULL +
+ rq->perout.start.nsec;
+
+ if (rq->perout.period.sec || rq->perout.period.nsec) {
+ netdev_warn(tp->dev,
+ "Device supports only a one-shot timesync output, period must be 0\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ if (nsec & (1ULL << 63)) {
+ netdev_warn(tp->dev,
+ "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
+ tw32(TG3_EAV_WATCHDOG0_MSB,
+ TG3_EAV_WATCHDOG0_EN |
+ ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
+
+ tw32(TG3_EAV_REF_CLCK_CTL,
+ clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
+ } else {
+ tw32(TG3_EAV_WATCHDOG0_MSB, 0);
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
+ }
+
+err_out:
+ tg3_full_unlock(tp);
+ return rval;
+
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
@@ -6223,7 +6291,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.max_adj = 250000000,
.n_alarm = 0,
.n_ext_ts = 0,
- .n_per_out = 0,
+ .n_per_out = 1,
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
@@ -8538,10 +8606,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
- tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
}
@@ -8590,10 +8658,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping, GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
@@ -8601,10 +8668,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
@@ -10367,6 +10434,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_flag(tp, 5755_PLUS))
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
+
if (tg3_flag(tp, ENABLE_RSS))
tp->rx_mode |= RX_MODE_RSS_ENABLE |
RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@ -11502,7 +11572,7 @@ static int tg3_close(struct net_device *dev)
memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
tg3_carrier_off(tp);
@@ -11724,9 +11794,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (tg3_flag(tp, NO_NVRAM))
return -EINVAL;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
offset = eeprom->offset;
len = eeprom->len;
eeprom->len = 0;
@@ -11784,9 +11851,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *buf;
__be32 start, end;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL;
@@ -13515,7 +13579,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_phy_start(tp);
}
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
}
@@ -15917,7 +15981,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
if (tg3_flag(tp, 5780_CLASS)) {
tg3_flag_set(tp, 40BIT_DMA_BUG);
- tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+ tp->msi_cap = tp->pdev->msi_cap;
} else {
struct pci_dev *bridge = NULL;
@@ -16129,12 +16193,12 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
* So explicitly force the chip into D0 here.
*/
pci_read_config_dword(tp->pdev,
- tp->pm_cap + PCI_PM_CTRL,
+ tp->pdev->pm_cap + PCI_PM_CTRL,
&pm_reg);
pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
pci_write_config_dword(tp->pdev,
- tp->pm_cap + PCI_PM_CTRL,
+ tp->pdev->pm_cap + PCI_PM_CTRL,
pm_reg);
/* Also, force SERR#/PERR# in PCI command. */
@@ -17283,7 +17347,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tp = netdev_priv(dev);
tp->pdev = pdev;
tp->dev = dev;
- tp->pm_cap = pdev->pm_cap;
tp->rx_mode = TG3_DEF_RX_MODE;
tp->tx_mode = TG3_DEF_TX_MODE;
tp->irq_sync = 1;
@@ -17547,11 +17610,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
- if (tg3_flag(tp, 5717_PLUS)) {
- /* Resume a low-power mode */
- tg3_frob_aux_power(tp, false);
- }
-
tg3_timer_init(tp);
tg3_carrier_off(tp);
@@ -17755,6 +17813,23 @@ out:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+static void tg3_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+
+ rtnl_lock();
+ netif_device_detach(dev);
+
+ if (netif_running(dev))
+ dev_close(dev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ tg3_power_down(tp);
+
+ rtnl_unlock();
+}
+
/**
* tg3_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -17914,6 +17989,7 @@ static struct pci_driver tg3_driver = {
.remove = tg3_remove_one,
.err_handler = &tg3_err_handler,
.driver.pm = &tg3_pm_ops,
+ .shutdown = tg3_shutdown,
};
module_pci_driver(tg3_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index cd63d1189aa..70257808aa3 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -532,6 +532,7 @@
#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000
#define RX_MODE_RSS_ENABLE 0x00800000
#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
+#define RX_MODE_IPV4_FRAG_FIX 0x02000000
#define MAC_RX_STATUS 0x0000046c
#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
#define RX_STATUS_XOFF_RCVD 0x00000002
@@ -1818,12 +1819,21 @@
#define TG3_EAV_REF_CLCK_CTL 0x00006908
#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
+#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16)
+#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17)
+
+#define TG3_EAV_WATCHDOG0_LSB 0x00006918
+#define TG3_EAV_WATCHDOG0_MSB 0x0000691c
+#define TG3_EAV_WATCHDOG0_EN (1 << 31)
+#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff
+
#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
-/* 0x690c --> 0x7000 unused */
+
+/* 0x692c --> 0x7000 unused */
/* NVRAM Control registers */
#define NVRAM_CMD 0x00007000
@@ -3224,7 +3234,6 @@ struct tg3 {
u8 pci_lat_timer;
int pci_fn;
- int pm_cap;
int msi_cap;
int pcix_cap;
int pcie_readrq;
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 57cd1bff59f..3c07064b2bc 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
bna_bfi_rx_enet_start(rx);
}
-void
+static void
bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
{
}
@@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
bna_rxf_start(&rx->rxf);
}
-void
+static void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
{
}
@@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
}
}
-void
+static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
@@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
}
}
-void
+static void
bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
{
}
-void
+static void
bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index c37f706d999..43405f654b4 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 8030cc0396f..751d5c7b312 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
- depends on GENERIC_HARDIRQS && HAS_DMA
+ depends on HAS_DMA
select MACB
---help---
If you wish to compile a kernel for the AT91RM9200 and enable
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index bb5d63fb2e6..ce75de9bae9 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
/* Detect MAC & PHY and perform ethernet interface initialization */
static int __init at91ether_probe(struct platform_device *pdev)
{
- struct macb_platform_data *board_data = pdev->dev.platform_data;
+ struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
struct resource *regs;
struct net_device *dev;
struct phy_device *phydev;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index e866608d7d9..92578690f6d 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -27,6 +27,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
@@ -124,7 +125,7 @@ void macb_get_hwaddr(struct macb *bp)
u8 addr[6];
int i;
- pdata = bp->pdev->dev.platform_data;
+ pdata = dev_get_platdata(&bp->pdev->dev);
/* Check all 4 address register for vaild address */
for (i = 0; i < 4; i++) {
@@ -275,7 +276,7 @@ static int macb_mii_probe(struct net_device *dev)
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
- return -1;
+ return -ENXIO;
}
pdata = dev_get_platdata(&bp->pdev->dev);
@@ -314,6 +315,7 @@ static int macb_mii_probe(struct net_device *dev)
int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
+ struct device_node *np;
int err = -ENXIO, i;
/* Enable management port */
@@ -333,10 +335,7 @@ int macb_mii_init(struct macb *bp)
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->dev->dev;
- pdata = bp->pdev->dev.platform_data;
-
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
+ pdata = dev_get_platdata(&bp->pdev->dev);
bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!bp->mii_bus->irq) {
@@ -344,17 +343,45 @@ int macb_mii_init(struct macb *bp)
goto err_out_free_mdiobus;
}
- for (i = 0; i < PHY_MAX_ADDR; i++)
- bp->mii_bus->irq[i] = PHY_POLL;
-
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- if (mdiobus_register(bp->mii_bus))
+ np = bp->pdev->dev.of_node;
+ if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ found during dt phy registration */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out_unregister_bus;
+ }
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = mdiobus_register(bp->mii_bus);
+ }
+
+ if (err)
goto err_out_free_mdio_irq;
- if (macb_mii_probe(bp->dev) != 0) {
+ err = macb_mii_probe(bp->dev);
+ if (err)
goto err_out_unregister_bus;
- }
return 0;
@@ -1824,7 +1851,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = of_get_phy_mode(pdev->dev.of_node);
if (err < 0) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->is_rmii)
bp->phy_interface = PHY_INTERFACE_MODE_RMII;
else
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 7cb148c495c..78d6d6b970e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -353,11 +353,9 @@ struct xgmac_extra_stats {
/* Receive errors */
unsigned long rx_watchdog;
unsigned long rx_da_filter_fail;
- unsigned long rx_sa_filter_fail;
unsigned long rx_payload_error;
unsigned long rx_ip_header_error;
/* Tx/Rx IRQ errors */
- unsigned long tx_undeflow;
unsigned long tx_process_stopped;
unsigned long rx_buf_unav;
unsigned long rx_process_stopped;
@@ -393,6 +391,7 @@ struct xgmac_priv {
char rx_pause;
char tx_pause;
int wolopts;
+ struct work_struct tx_timeout_work;
};
/* XGMAC Configuration Settings */
@@ -409,6 +408,9 @@ struct xgmac_priv {
#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
+#define tx_dma_ring_space(p) \
+ dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
+
/* XGMAC Descriptor Access Helpers */
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
{
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
{
- u32 len = cpu_to_le32(p->flags);
+ u32 len = le32_to_cpu(p->buf_size);
return (len & DESC_BUFFER1_SZ_MASK) +
((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
}
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
p->flags = cpu_to_le32(tmpflags);
}
+static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
+{
+ u32 tmpflags = le32_to_cpu(p->flags);
+ tmpflags &= TXDESC_END_RING;
+ p->flags = cpu_to_le32(tmpflags);
+}
+
static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
}
+static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
+}
+
static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->buf1_addr);
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
{
u32 data;
- data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
- writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+ if (addr) {
+ data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+ writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+ } else {
+ writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
+ writel(0, ioaddr + XGMAC_ADDR_LOW(num));
+ }
}
static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
if (unlikely(skb == NULL))
break;
- priv->rx_skbuff[entry] = skb;
paddr = dma_map_single(priv->device, skb->data,
- bufsz, DMA_FROM_DEVICE);
+ priv->dma_buf_sz - NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ priv->rx_skbuff[entry] = skb;
desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
}
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
return;
for (i = 0; i < DMA_RX_RING_SZ; i++) {
- if (priv->rx_skbuff[i] == NULL)
+ struct sk_buff *skb = priv->rx_skbuff[i];
+ if (skb == NULL)
continue;
p = priv->dma_rx + i;
dma_unmap_single(priv->device, desc_get_buf_addr(p),
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
priv->rx_skbuff[i] = NULL;
}
}
static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
{
- int i, f;
+ int i;
struct xgmac_dma_desc *p;
if (!priv->tx_skbuff)
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
continue;
p = priv->dma_tx + i;
- dma_unmap_single(priv->device, desc_get_buf_addr(p),
- desc_get_buf_len(p), DMA_TO_DEVICE);
-
- for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
- p = priv->dma_tx + i++;
+ if (desc_get_tx_fs(p))
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ else
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
- }
- dev_kfree_skb_any(priv->tx_skbuff[i]);
+ if (desc_get_tx_ls(p))
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
}
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
*/
static void xgmac_tx_complete(struct xgmac_priv *priv)
{
- int i;
-
while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
unsigned int entry = priv->tx_tail;
struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
if (desc_get_owner(p))
break;
- /* Verify tx error by looking at the last segment */
- if (desc_get_tx_ls(p))
- desc_get_tx_status(priv, p);
-
netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
priv->tx_head, priv->tx_tail);
- dma_unmap_single(priv->device, desc_get_buf_addr(p),
- desc_get_buf_len(p), DMA_TO_DEVICE);
-
- priv->tx_skbuff[entry] = NULL;
- priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
-
- if (!skb) {
- continue;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
- DMA_TX_RING_SZ);
- p = priv->dma_tx + priv->tx_tail;
-
+ if (desc_get_tx_fs(p))
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ else
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ /* Check tx error on the last segment */
+ if (desc_get_tx_ls(p)) {
+ desc_get_tx_status(priv, p);
+ dev_kfree_skb(skb);
}
- dev_kfree_skb(skb);
+ priv->tx_skbuff[entry] = NULL;
+ priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
}
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
- MAX_SKB_FRAGS)
+ /* Ensure tx_tail is visible to xgmac_xmit */
+ smp_mb();
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
netif_wake_queue(priv->dev);
}
-/**
- * xgmac_tx_err:
- * @priv: pointer to the private device structure
- * Description: it cleans the descriptors and restarts the transmission
- * in case of errors.
- */
-static void xgmac_tx_err(struct xgmac_priv *priv)
+static void xgmac_tx_timeout_work(struct work_struct *work)
{
- u32 reg, value, inten;
+ u32 reg, value;
+ struct xgmac_priv *priv =
+ container_of(work, struct xgmac_priv, tx_timeout_work);
- netif_stop_queue(priv->dev);
+ napi_disable(&priv->napi);
- inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_lock(priv->dev);
+
reg = readl(priv->base + XGMAC_DMA_CONTROL);
writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
do {
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
priv->base + XGMAC_DMA_STATUS);
- writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_unlock(priv->dev);
netif_wake_queue(priv->dev);
+
+ napi_enable(&priv->napi);
+
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
static int xgmac_hw_init(struct net_device *dev)
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
- /* Enable interrupts */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+ writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
/* Mask power mgt interrupt */
writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
napi_enable(&priv->napi);
netif_start_queue(dev);
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
return 0;
}
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
dev_kfree_skb(skb);
- return -EIO;
+ return NETDEV_TX_OK;
}
priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len);
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, paddr)) {
- dev_kfree_skb(skb);
- return -EIO;
- }
+ if (dma_mapping_error(priv->device, paddr))
+ goto dma_err;
entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
desc = priv->dma_tx + entry;
- priv->tx_skbuff[entry] = NULL;
+ priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len);
if (i < (nfrags - 1))
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+ writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
- writel(1, priv->base + XGMAC_DMA_TX_POLL);
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
- MAX_SKB_FRAGS)
+ /* Ensure tx_head update is visible to tx completion */
+ smp_mb();
+ if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
netif_stop_queue(dev);
+ /* Ensure netif_stop_queue is visible to tx completion */
+ smp_mb();
+ if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
+ netif_start_queue(dev);
+ }
+ return NETDEV_TX_OK;
+dma_err:
+ entry = priv->tx_head;
+ for ( ; i > 0; i--) {
+ entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+ desc = priv->dma_tx + entry;
+ priv->tx_skbuff[entry] = NULL;
+ dma_unmap_page(priv->device, desc_get_buf_addr(desc),
+ desc_get_buf_len(desc), DMA_TO_DEVICE);
+ desc_clear_tx_owner(desc);
+ }
+ desc = first;
+ dma_unmap_single(priv->device, desc_get_buf_addr(desc),
+ desc_get_buf_len(desc), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
skb_put(skb, frame_len);
dma_unmap_single(priv->device, desc_get_buf_addr(p),
- frame_len, DMA_FROM_DEVICE);
+ priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = ip_checksum;
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
static void xgmac_tx_timeout(struct net_device *dev)
{
struct xgmac_priv *priv = netdev_priv(dev);
-
- /* Clear Tx resources and restart transmitting again */
- xgmac_tx_err(priv);
+ schedule_work(&priv->tx_timeout_work);
}
/**
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+ } else {
+ use_hash = false;
}
netdev_for_each_mc_addr(ha, dev) {
if (use_hash) {
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
}
out:
+ for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
+ xgmac_set_mac_addr(ioaddr, NULL, reg);
for (i = 0; i < XGMAC_NUM_HASH; i++)
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
{
u32 intr_status;
- bool tx_err = false;
struct net_device *dev = (struct net_device *)dev_id;
struct xgmac_priv *priv = netdev_priv(dev);
struct xgmac_extra_stats *x = &priv->xstats;
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
if (intr_status & DMA_STATUS_TPS) {
netdev_err(priv->dev, "transmit process stopped\n");
x->tx_process_stopped++;
- tx_err = true;
+ schedule_work(&priv->tx_timeout_work);
}
if (intr_status & DMA_STATUS_FBI) {
netdev_err(priv->dev, "fatal bus error\n");
x->fatal_bus_error++;
- tx_err = true;
}
-
- if (tx_err)
- xgmac_tx_err(priv);
}
/* TX/RX NORMAL interrupts */
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
XGMAC_STAT(rx_payload_error),
XGMAC_STAT(rx_ip_header_error),
XGMAC_STAT(rx_da_filter_fail),
- XGMAC_STAT(rx_sa_filter_fail),
XGMAC_STAT(fatal_bus_error),
XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->netdev_ops = &xgmac_netdev_ops;
SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
priv->device = &pdev->dev;
priv->dev = ndev;
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
if (device_can_wakeup(priv->device))
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 4058b856eb7..76ae09999b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -1157,7 +1157,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
*/
void *cxgb_alloc_mem(unsigned long size)
{
- void *p = kzalloc(size, GFP_KERNEL);
+ void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!p)
p = vzalloc(size);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2aafb809e06..dfd1e36f575 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -576,6 +576,7 @@ struct adapter {
struct l2t_data *l2t;
void *uld_handle[CXGB4_ULD_MAX];
struct list_head list_node;
+ struct list_head rcu_node;
struct tid_info tids;
void **tid_release_head;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 5a3256b083f..c73cabdbd4c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -60,6 +60,7 @@
#include <linux/workqueue.h>
#include <net/neighbour.h>
#include <net/netevent.h>
+#include <net/addrconf.h>
#include <asm/uaccess.h>
#include "cxgb4.h"
@@ -68,6 +69,11 @@
#include "t4fw_api.h"
#include "l2t.h"
+#include <../drivers/net/bonding/bonding.h>
+
+#ifdef DRV_VERSION
+#undef DRV_VERSION
+#endif
#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5 Network Driver"
@@ -400,6 +406,9 @@ static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
static DEFINE_MUTEX(uld_mutex);
+/* Adapter list to be accessed from atomic context */
+static LIST_HEAD(adap_rcu_list);
+static DEFINE_SPINLOCK(adap_rcu_lock);
static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
static const char *uld_str[] = { "RDMA", "iSCSI" };
@@ -1133,7 +1142,7 @@ out: release_firmware(fw);
*/
void *t4_alloc_mem(size_t size)
{
- void *p = kzalloc(size, GFP_KERNEL);
+ void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!p)
p = vzalloc(size);
@@ -3227,6 +3236,38 @@ static int tid_init(struct tid_info *t)
return 0;
}
+static int cxgb4_clip_get(const struct net_device *dev,
+ const struct in6_addr *lip)
+{
+ struct adapter *adap;
+ struct fw_clip_cmd c;
+
+ adap = netdev2adap(dev);
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+ *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+static int cxgb4_clip_release(const struct net_device *dev,
+ const struct in6_addr *lip)
+{
+ struct adapter *adap;
+ struct fw_clip_cmd c;
+
+ adap = netdev2adap(dev);
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+ *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
/**
* cxgb4_create_server - create an IP server
* @dev: the device
@@ -3246,6 +3287,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
struct sk_buff *skb;
struct adapter *adap;
struct cpl_pass_open_req *req;
+ int ret;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
@@ -3263,10 +3305,78 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
- return t4_mgmt_tx(adap, skb);
+ ret = t4_mgmt_tx(adap, skb);
+ return net_xmit_eval(ret);
}
EXPORT_SYMBOL(cxgb4_create_server);
+/* cxgb4_create_server6 - create an IPv6 server
+ * @dev: the device
+ * @stid: the server TID
+ * @sip: local IPv6 address to bind server to
+ * @sport: the server's TCP port
+ * @queue: queue to direct messages from this server to
+ *
+ * Create an IPv6 server for the given port and address.
+ * Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+ const struct in6_addr *sip, __be16 sport,
+ unsigned int queue)
+{
+ unsigned int chan;
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_pass_open_req6 *req;
+ int ret;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ adap = netdev2adap(dev);
+ req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+ req->local_port = sport;
+ req->peer_port = htons(0);
+ req->local_ip_hi = *(__be64 *)(sip->s6_addr);
+ req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
+ req->peer_ip_hi = cpu_to_be64(0);
+ req->peer_ip_lo = cpu_to_be64(0);
+ chan = rxq_to_chan(&adap->sge, queue);
+ req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+ SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ ret = t4_mgmt_tx(adap, skb);
+ return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_create_server6);
+
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6)
+{
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_close_listsvr_req *req;
+ int ret;
+
+ adap = netdev2adap(dev);
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
+ req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
+ LISTSVR_IPV6(0)) | QUEUENO(queue));
+ ret = t4_mgmt_tx(adap, skb);
+ return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_remove_server);
+
/**
* cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
* @mtus: the HW MTU table
@@ -3721,6 +3831,10 @@ static void attach_ulds(struct adapter *adap)
{
unsigned int i;
+ spin_lock(&adap_rcu_lock);
+ list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
+ spin_unlock(&adap_rcu_lock);
+
mutex_lock(&uld_mutex);
list_add_tail(&adap->list_node, &adapter_list);
for (i = 0; i < CXGB4_ULD_MAX; i++)
@@ -3746,6 +3860,10 @@ static void detach_ulds(struct adapter *adap)
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
+
+ spin_lock(&adap_rcu_lock);
+ list_del_rcu(&adap->rcu_node);
+ spin_unlock(&adap_rcu_lock);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@ -3809,6 +3927,168 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
}
EXPORT_SYMBOL(cxgb4_unregister_uld);
+/* Check if netdev on which event is occured belongs to us or not. Return
+ * suceess (1) if it belongs otherwise failure (0).
+ */
+static int cxgb4_netdev(struct net_device *netdev)
+{
+ struct adapter *adap;
+ int i;
+
+ spin_lock(&adap_rcu_lock);
+ list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
+ for (i = 0; i < MAX_NPORTS; i++)
+ if (adap->port[i] == netdev) {
+ spin_unlock(&adap_rcu_lock);
+ return 1;
+ }
+ spin_unlock(&adap_rcu_lock);
+ return 0;
+}
+
+static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
+ unsigned long event)
+{
+ int ret = NOTIFY_DONE;
+
+ rcu_read_lock();
+ if (cxgb4_netdev(event_dev)) {
+ switch (event) {
+ case NETDEV_UP:
+ ret = cxgb4_clip_get(event_dev,
+ (const struct in6_addr *)ifa->addr.s6_addr);
+ if (ret < 0) {
+ rcu_read_unlock();
+ return ret;
+ }
+ ret = NOTIFY_OK;
+ break;
+ case NETDEV_DOWN:
+ cxgb4_clip_release(event_dev,
+ (const struct in6_addr *)ifa->addr.s6_addr);
+ ret = NOTIFY_OK;
+ break;
+ default:
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static int cxgb4_inet6addr_handler(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct inet6_ifaddr *ifa = data;
+ struct net_device *event_dev;
+ int ret = NOTIFY_DONE;
+ struct bonding *bond = netdev_priv(ifa->idev->dev);
+ struct slave *slave;
+ struct pci_dev *first_pdev = NULL;
+
+ if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
+ event_dev = vlan_dev_real_dev(ifa->idev->dev);
+ ret = clip_add(event_dev, ifa, event);
+ } else if (ifa->idev->dev->flags & IFF_MASTER) {
+ /* It is possible that two different adapters are bonded in one
+ * bond. We need to find such different adapters and add clip
+ * in all of them only once.
+ */
+ read_lock(&bond->lock);
+ bond_for_each_slave(bond, slave) {
+ if (!first_pdev) {
+ ret = clip_add(slave->dev, ifa, event);
+ /* If clip_add is success then only initialize
+ * first_pdev since it means it is our device
+ */
+ if (ret == NOTIFY_OK)
+ first_pdev = to_pci_dev(
+ slave->dev->dev.parent);
+ } else if (first_pdev !=
+ to_pci_dev(slave->dev->dev.parent))
+ ret = clip_add(slave->dev, ifa, event);
+ }
+ read_unlock(&bond->lock);
+ } else
+ ret = clip_add(ifa->idev->dev, ifa, event);
+
+ return ret;
+}
+
+static struct notifier_block cxgb4_inet6addr_notifier = {
+ .notifier_call = cxgb4_inet6addr_handler
+};
+
+/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actul CLIP command.
+ */
+static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
+{
+ struct inet6_dev *idev = NULL;
+ struct inet6_ifaddr *ifa;
+ int ret = 0;
+
+ idev = __in6_dev_get(root_dev);
+ if (!idev)
+ return ret;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ ret = cxgb4_clip_get(dev,
+ (const struct in6_addr *)ifa->addr.s6_addr);
+ if (ret < 0)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ return ret;
+}
+
+static int update_root_dev_clip(struct net_device *dev)
+{
+ struct net_device *root_dev = NULL;
+ int i, ret = 0;
+
+ /* First populate the real net device's IPv6 addresses */
+ ret = update_dev_clip(dev, dev);
+ if (ret)
+ return ret;
+
+ /* Parse all bond and vlan devices layered on top of the physical dev */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+ if (!root_dev)
+ continue;
+
+ ret = update_dev_clip(root_dev, dev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void update_clip(const struct adapter *adap)
+{
+ int i;
+ struct net_device *dev;
+ int ret;
+
+ rcu_read_lock();
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ dev = adap->port[i];
+ ret = 0;
+
+ if (dev)
+ ret = update_root_dev_clip(dev);
+
+ if (ret < 0)
+ break;
+ }
+ rcu_read_unlock();
+}
+
/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
@@ -3854,6 +4134,7 @@ static int cxgb_up(struct adapter *adap)
t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
notify_ulds(adap, CXGB4_STATE_UP);
+ update_clip(adap);
out:
return err;
irq_err:
@@ -5868,13 +6149,19 @@ static int __init cxgb4_init_module(void)
pr_warn("could not create debugfs entry, continuing\n");
ret = pci_register_driver(&cxgb4_driver);
- if (ret < 0)
+ if (ret < 0) {
debugfs_remove(cxgb4_debugfs_root);
+ destroy_workqueue(workq);
+ }
+
+ register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+
return ret;
}
static void __exit cxgb4_cleanup_module(void)
{
+ unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
pci_unregister_driver(&cxgb4_driver);
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
flush_workqueue(workq);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4faf4d067ee..6f21f2451c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -154,6 +154,11 @@ struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue);
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+ const struct in6_addr *sip, __be16 sport,
+ unsigned int queue);
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6);
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 01d48444120..cd6874b571e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -320,6 +320,21 @@ struct cpl_act_open_req6 {
__be32 opt2;
};
+struct cpl_t5_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+};
+
struct cpl_act_open_rpl {
union opcode_tid ot;
__be32 atid_status;
@@ -405,7 +420,7 @@ struct cpl_close_listsvr_req {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
-#define LISTSVR_IPV6 (1 << 14)
+#define LISTSVR_IPV6(x) ((x) << 14)
__be16 rsvd;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index d1c755f78aa..6f77ac48774 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -616,6 +616,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_CLIP_CMD = 0x28,
FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
@@ -2062,6 +2063,28 @@ struct fw_rss_vi_config_cmd {
} u;
};
+struct fw_clip_cmd {
+ __be32 op_to_write;
+ __be32 alloc_to_len16;
+ __be64 ip_hi;
+ __be64 ip_lo;
+ __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC 31
+#define M_FW_CLIP_CMD_ALLOC 0x1
+#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
+#define G_FW_CLIP_CMD_ALLOC(x) \
+ (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE 30
+#define M_FW_CLIP_CMD_FREE 0x1
+#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
+#define G_FW_CLIP_CMD_FREE(x) \
+ (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+
enum fw_error_type {
FW_ERROR_TYPE_EXCEPTION = 0x0,
FW_ERROR_TYPE_HWMODULE = 0x1,
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e3d4ec836f8..ec88de4ac16 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -814,7 +814,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
if (pdev == NULL)
return -ENODEV;
- data = pdev->dev.platform_data;
+ data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index 9d4974bba24..239e1e46545 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
+ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
+ enic_ethtool.o enic_api.o
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index afe9b1662b8..e9f7c656ddd 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,12 +32,12 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.39"
-#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.50"
+#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 1
+#define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -96,6 +96,7 @@ struct enic {
#ifdef CONFIG_PCI_IOV
u16 num_vfs;
#endif
+ spinlock_t enic_api_lock;
struct enic_port_profile *pp;
/* work queue cache line section */
@@ -127,9 +128,57 @@ static inline struct device *enic_get_dev(struct enic *enic)
return &(enic->pdev->dev);
}
+static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
+{
+ return rq;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+ return enic->rq_count + wq;
+}
+
+static inline unsigned int enic_legacy_io_intr(void)
+{
+ return 0;
+}
+
+static inline unsigned int enic_legacy_err_intr(void)
+{
+ return 1;
+}
+
+static inline unsigned int enic_legacy_notify_intr(void)
+{
+ return 2;
+}
+
+static inline unsigned int enic_msix_rq_intr(struct enic *enic,
+ unsigned int rq)
+{
+ return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_wq_intr(struct enic *enic,
+ unsigned int wq)
+{
+ return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count;
+}
+
+static inline unsigned int enic_msix_notify_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count + 1;
+}
+
void enic_reset_addr_lists(struct enic *enic);
int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
+void enic_set_ethtool_ops(struct net_device *netdev);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
new file mode 100644
index 00000000000..e13efbdaa2e
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -0,0 +1,48 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_api.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_dev *vdev = enic->vdev;
+
+ spin_lock(&enic->enic_api_lock);
+ spin_lock(&enic->devcmd_lock);
+
+ vnic_dev_cmd_proxy_by_index_start(vdev, vf);
+ err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
+ vnic_dev_cmd_proxy_end(vdev);
+
+ spin_unlock(&enic->devcmd_lock);
+ spin_unlock(&enic->enic_api_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index);
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
new file mode 100644
index 00000000000..6b9f9255af2
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __ENIC_API_H__
+#define __ENIC_API_H__
+
+#include <linux/netdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait);
+
+#endif
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 08bded051b9..129b14a4efb 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -20,6 +20,7 @@
#define _ENIC_DEV_H_
#include "vnic_dev.h"
+#include "vnic_vic.h"
/*
* Calls the devcmd function given by argument vnicdevcmdfn.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
new file mode 100644
index 00000000000..47e3562f486
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -0,0 +1,257 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+struct enic_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define ENIC_TX_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_RX_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_tx_stats[] = {
+ ENIC_TX_STAT(tx_frames_ok),
+ ENIC_TX_STAT(tx_unicast_frames_ok),
+ ENIC_TX_STAT(tx_multicast_frames_ok),
+ ENIC_TX_STAT(tx_broadcast_frames_ok),
+ ENIC_TX_STAT(tx_bytes_ok),
+ ENIC_TX_STAT(tx_unicast_bytes_ok),
+ ENIC_TX_STAT(tx_multicast_bytes_ok),
+ ENIC_TX_STAT(tx_broadcast_bytes_ok),
+ ENIC_TX_STAT(tx_drops),
+ ENIC_TX_STAT(tx_errors),
+ ENIC_TX_STAT(tx_tso),
+};
+
+static const struct enic_stat enic_rx_stats[] = {
+ ENIC_RX_STAT(rx_frames_ok),
+ ENIC_RX_STAT(rx_frames_total),
+ ENIC_RX_STAT(rx_unicast_frames_ok),
+ ENIC_RX_STAT(rx_multicast_frames_ok),
+ ENIC_RX_STAT(rx_broadcast_frames_ok),
+ ENIC_RX_STAT(rx_bytes_ok),
+ ENIC_RX_STAT(rx_unicast_bytes_ok),
+ ENIC_RX_STAT(rx_multicast_bytes_ok),
+ ENIC_RX_STAT(rx_broadcast_bytes_ok),
+ ENIC_RX_STAT(rx_drop),
+ ENIC_RX_STAT(rx_no_bufs),
+ ENIC_RX_STAT(rx_errors),
+ ENIC_RX_STAT(rx_rss),
+ ENIC_RX_STAT(rx_crc_errors),
+ ENIC_RX_STAT(rx_frames_64),
+ ENIC_RX_STAT(rx_frames_127),
+ ENIC_RX_STAT(rx_frames_255),
+ ENIC_RX_STAT(rx_frames_511),
+ ENIC_RX_STAT(rx_frames_1023),
+ ENIC_RX_STAT(rx_frames_1518),
+ ENIC_RX_STAT(rx_frames_to_max),
+};
+
+static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
+static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+
+static int enic_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(netdev)) {
+ ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static void enic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_devcmd_fw_info *fw_info;
+
+ enic_dev_fw_info(enic, &fw_info);
+
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void enic_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < enic_n_tx_stats; i++) {
+ memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < enic_n_rx_stats; i++) {
+ memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int enic_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return enic_n_tx_stats + enic_n_rx_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void enic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_stats *vstats;
+ unsigned int i;
+
+ enic_dev_stats_dump(enic, &vstats);
+
+ for (i = 0; i < enic_n_tx_stats; i++)
+ *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
+ for (i = 0; i < enic_n_rx_stats; i++)
+ *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
+}
+
+static u32 enic_get_msglevel(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ return enic->msg_enable;
+}
+
+static void enic_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct enic *enic = netdev_priv(netdev);
+ enic->msg_enable = value;
+}
+
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+ unsigned int i, intr;
+
+ tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ intr = enic_legacy_io_intr();
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->wq_count; i++) {
+ intr = enic_msix_wq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ rx_coalesce_usecs);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
+static const struct ethtool_ops enic_ethtool_ops = {
+ .get_settings = enic_get_settings,
+ .get_drvinfo = enic_get_drvinfo,
+ .get_msglevel = enic_get_msglevel,
+ .set_msglevel = enic_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = enic_get_strings,
+ .get_sset_count = enic_get_sset_count,
+ .get_ethtool_stats = enic_get_ethtool_stats,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
+};
+
+void enic_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 992ec2ee64d..7b756cf9474 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -31,7 +31,6 @@
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -73,57 +72,6 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
-struct enic_stat {
- char name[ETH_GSTRING_LEN];
- unsigned int offset;
-};
-
-#define ENIC_TX_STAT(stat) \
- { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
-#define ENIC_RX_STAT(stat) \
- { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
-
-static const struct enic_stat enic_tx_stats[] = {
- ENIC_TX_STAT(tx_frames_ok),
- ENIC_TX_STAT(tx_unicast_frames_ok),
- ENIC_TX_STAT(tx_multicast_frames_ok),
- ENIC_TX_STAT(tx_broadcast_frames_ok),
- ENIC_TX_STAT(tx_bytes_ok),
- ENIC_TX_STAT(tx_unicast_bytes_ok),
- ENIC_TX_STAT(tx_multicast_bytes_ok),
- ENIC_TX_STAT(tx_broadcast_bytes_ok),
- ENIC_TX_STAT(tx_drops),
- ENIC_TX_STAT(tx_errors),
- ENIC_TX_STAT(tx_tso),
-};
-
-static const struct enic_stat enic_rx_stats[] = {
- ENIC_RX_STAT(rx_frames_ok),
- ENIC_RX_STAT(rx_frames_total),
- ENIC_RX_STAT(rx_unicast_frames_ok),
- ENIC_RX_STAT(rx_multicast_frames_ok),
- ENIC_RX_STAT(rx_broadcast_frames_ok),
- ENIC_RX_STAT(rx_bytes_ok),
- ENIC_RX_STAT(rx_unicast_bytes_ok),
- ENIC_RX_STAT(rx_multicast_bytes_ok),
- ENIC_RX_STAT(rx_broadcast_bytes_ok),
- ENIC_RX_STAT(rx_drop),
- ENIC_RX_STAT(rx_no_bufs),
- ENIC_RX_STAT(rx_errors),
- ENIC_RX_STAT(rx_rss),
- ENIC_RX_STAT(rx_crc_errors),
- ENIC_RX_STAT(rx_frames_64),
- ENIC_RX_STAT(rx_frames_127),
- ENIC_RX_STAT(rx_frames_255),
- ENIC_RX_STAT(rx_frames_511),
- ENIC_RX_STAT(rx_frames_1023),
- ENIC_RX_STAT(rx_frames_1518),
- ENIC_RX_STAT(rx_frames_to_max),
-};
-
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
-{
- return rq;
-}
-
-static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
-{
- return enic->rq_count + wq;
-}
-
-static inline unsigned int enic_legacy_io_intr(void)
-{
- return 0;
-}
-
-static inline unsigned int enic_legacy_err_intr(void)
-{
- return 1;
-}
-
-static inline unsigned int enic_legacy_notify_intr(void)
-{
- return 2;
-}
-
-static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
-{
- return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
-{
- return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_err_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count;
-}
-
-static inline unsigned int enic_msix_notify_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count + 1;
-}
-
-static int enic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
-
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
-
- if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
- ecmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
- }
-
- ecmd->autoneg = AUTONEG_DISABLE;
-
- return 0;
-}
-
-static void enic_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct enic *enic = netdev_priv(netdev);
- struct vnic_devcmd_fw_info *fw_info;
-
- enic_dev_fw_info(enic, &fw_info);
-
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
- sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- unsigned int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < enic_n_tx_stats; i++) {
- memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < enic_n_rx_stats; i++) {
- memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int enic_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void enic_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct enic *enic = netdev_priv(netdev);
- struct vnic_stats *vstats;
- unsigned int i;
-
- enic_dev_stats_dump(enic, &vstats);
-
- for (i = 0; i < enic_n_tx_stats; i++)
- *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
- for (i = 0; i < enic_n_rx_stats; i++)
- *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
-}
-
-static u32 enic_get_msglevel(struct net_device *netdev)
-{
- struct enic *enic = netdev_priv(netdev);
- return enic->msg_enable;
-}
-
-static void enic_set_msglevel(struct net_device *netdev, u32 value)
-{
- struct enic *enic = netdev_priv(netdev);
- enic->msg_enable = value;
-}
-
-static int enic_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
-
- ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
- ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
-
- return 0;
-}
-
-static int enic_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
- u32 tx_coalesce_usecs;
- u32 rx_coalesce_usecs;
- unsigned int i, intr;
-
- tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
- rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
-
- switch (vnic_dev_get_intr_mode(enic->vdev)) {
- case VNIC_DEV_INTR_MODE_INTX:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
-
- intr = enic_legacy_io_intr();
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSI:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
-
- vnic_intr_coalescing_timer_set(&enic->intr[0],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSIX:
- for (i = 0; i < enic->wq_count; i++) {
- intr = enic_msix_wq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- }
-
- for (i = 0; i < enic->rq_count; i++) {
- intr = enic_msix_rq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- rx_coalesce_usecs);
- }
-
- break;
- default:
- break;
- }
-
- enic->tx_coalesce_usecs = tx_coalesce_usecs;
- enic->rx_coalesce_usecs = rx_coalesce_usecs;
-
- return 0;
-}
-
-static const struct ethtool_ops enic_ethtool_ops = {
- .get_settings = enic_get_settings,
- .get_drvinfo = enic_get_drvinfo,
- .get_msglevel = enic_get_msglevel,
- .set_msglevel = enic_set_msglevel,
- .get_link = ethtool_op_get_link,
- .get_strings = enic_get_strings,
- .get_sset_count = enic_get_sset_count,
- .get_ethtool_stats = enic_get_ethtool_stats,
- .get_coalesce = enic_get_coalesce,
- .set_coalesce = enic_set_coalesce,
-};
-
static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct enic *enic = vnic_dev_priv(wq->vdev);
@@ -396,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
completed_index, enic_wq_free_buf,
opaque);
- if (netif_queue_stopped(enic->netdev) &&
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
vnic_wq_desc_avail(&enic->wq[q_number]) >=
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
- netif_wake_queue(enic->netdev);
+ netif_wake_subqueue(enic->netdev, q_number);
spin_unlock(&enic->wq_lock[q_number]);
@@ -560,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
static irqreturn_t enic_isr_msix_wq(int irq, void *data)
{
struct enic *enic = data;
- unsigned int cq = enic_cq_wq(enic, 0);
- unsigned int intr = enic_msix_wq_intr(enic, 0);
+ unsigned int cq;
+ unsigned int intr;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int wq_work_done;
+ unsigned int wq_irq;
+
+ wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
+ cq = enic_cq_wq(enic, wq_irq);
+ intr = enic_msix_wq_intr(enic, wq_irq);
wq_work_done = vnic_cq_service(&enic->cq[cq],
wq_work_to_do, enic_wq_service, NULL);
@@ -779,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct vnic_wq *wq = &enic->wq[0];
+ struct vnic_wq *wq;
unsigned long flags;
+ unsigned int txq_map;
if (skb->len <= 0) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+ wq = &enic->wq[txq_map];
+
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take
* more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@@ -799,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- spin_lock_irqsave(&enic->wq_lock[0], flags);
+ spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_BUSY;
}
enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_OK;
}
@@ -1293,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, q_number);
+ if (netdev->features & NETIF_F_RXHASH) {
+ skb->rxhash = rss_hash;
+ if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
+ skb->l4_rxhash = true;
+ }
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
skb->csum = htons(checksum);
@@ -1637,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
enic_set_rx_mode(netdev);
- netif_wake_queue(netdev);
+ netif_tx_wake_all_queues(netdev);
for (i = 0; i < enic->rq_count; i++)
napi_enable(&enic->napi[i]);
@@ -2001,6 +1750,7 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
+ spin_lock(&enic->enic_api_lock);
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2009,6 +1759,8 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
+ spin_unlock(&enic->enic_api_lock);
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
}
@@ -2297,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* instance data is initialized to zero.
*/
- netdev = alloc_etherdev(sizeof(struct enic));
+ netdev = alloc_etherdev_mqs(sizeof(struct enic),
+ ENIC_RQ_MAX, ENIC_WQ_MAX);
if (!netdev)
return -ENOMEM;
@@ -2327,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 40-bit first, and
+ * limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@@ -2345,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 40);
+ "for consistent allocations, aborting\n", 64);
goto err_out_release_regions;
}
using_dac = 1;
@@ -2421,6 +2174,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
spin_lock_init(&enic->devcmd_lock);
+ spin_lock_init(&enic->enic_api_lock);
/*
* Set ingress vlan rewrite mode before vnic initialization
@@ -2462,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
+ netif_set_real_num_tx_queues(netdev, enic->wq_count);
+ netif_set_real_num_rx_queues(netdev, enic->rq_count);
+
/* Setup notification timer, HW reset task, and wq locks
*/
@@ -2496,7 +2253,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &enic_netdev_ops;
netdev->watchdog_timeo = 2 * HZ;
- netdev->ethtool_ops = &enic_ethtool_ops;
+ enic_set_ethtool_ops(netdev);
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (ENIC_SETTING(enic, LOOP)) {
@@ -2510,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ENIC_SETTING(enic, TSO))
netdev->hw_features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+ if (ENIC_SETTING(enic, RSS))
+ netdev->hw_features |= NETIF_F_RXHASH;
if (ENIC_SETTING(enic, RXCSUM))
netdev->hw_features |= NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 25be2734c3f..69f60afd657 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -47,6 +47,9 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
int offload_mode, int cq_entry, int sop, int eop, int loopback)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+ u8 desc_skip_cnt = 1;
+ u8 compressed_send = 0;
+ u64 wrid = 0;
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
@@ -59,7 +62,8 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
(u16)vlan_tag,
(u8)loopback);
- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
+ (u8)cq_entry, compressed_send, wrid);
}
static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
@@ -120,6 +124,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
dma_addr_t dma_addr, unsigned int len)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+ u64 wrid = 0;
u8 type = os_buf_index ?
RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
@@ -127,7 +132,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
(u64)dma_addr | VNIC_PADDR_TARGET,
type, (u16)len);
- vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+ vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
}
struct enic;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 97455c573db..69dd92598b7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
{
return vdev->res[type].count;
}
+EXPORT_SYMBOL(vnic_dev_get_res_count);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
@@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
return (char __iomem *)vdev->res[type].vaddr;
}
}
+EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
@@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
kfree(vdev);
}
}
+EXPORT_SYMBOL(vnic_dev_unregister);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
@@ -969,6 +972,13 @@ err_out:
vnic_dev_unregister(vdev);
return NULL;
}
+EXPORT_SYMBOL(vnic_dev_register);
+
+struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
+{
+ return vdev->pdev;
+}
+EXPORT_SYMBOL(vnic_dev_get_pdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index f3d9b79ba77..e670029862a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
+struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_enable2(struct vnic_dev *vdev, int active);
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 23d555255cf..b9a0d78fd63 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -281,11 +281,25 @@ enum vnic_devcmd_cmd {
* 0 if no VIF-CONFIG-INFO TLV was ever received. */
CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /* Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
/* init_prov_info2:
* Variant of CMD_INIT_PROV_INFO, where it will not try to enable
* the vnic until CMD_ENABLE2 is issued.
* (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (u32)a1=sizeof provision info
+ */
CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
/* enable2:
@@ -339,16 +353,57 @@ enum vnic_devcmd_cmd {
CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
/*
- * cmd_set_mac_addr
- * set mac address
+ * Set the predefined mac address as default
* in:
* (u48)a0 = mac addr
- *
*/
CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info
+ */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /* Add a filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ */
+ CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+ /* Delete a filter.
+ * in: (u32) a0=filter identifier
+ */
+ CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+ /* Enable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+ /* Disable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+ /* Stats dump Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u64) a1=host buffer addr for status dump
+ * (u32) a2=length of the buffer
+ */
+ CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+ /* Clear stats for Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ */
+ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
};
/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
#define CMD_ENABLE2_ACTIVE 0x1
/* flags for CMD_OPEN */
@@ -364,6 +419,9 @@ enum vnic_devcmd_cmd {
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ 0x0
+
/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
@@ -390,6 +448,7 @@ enum vnic_devcmd_error {
ERR_EMAXRES = 10,
ERR_ENOTSUPPORTED = 11,
ERR_EINPROGRESS = 12,
+ ERR_MAX
};
/*
@@ -435,6 +494,115 @@ struct vnic_devcmd_provinfo {
u8 data[0];
};
+/* These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELDS_USNIC ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4))
+
+#define FILTER_FIELDS_IPV4_5TUPLE ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4) | \
+ FILTER_FIELD_VALID(5))
+
+#define FILTER_FIELDS_MAC_VLAN ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2))
+
+#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+
+struct filter_usnic_id {
+ u32 flags;
+ u16 vlan;
+ u16 ethtype;
+ u8 proto_version;
+ u32 usnic_id;
+} __packed;
+
+#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+ PROTO_UDP = 0,
+ PROTO_TCP = 1,
+};
+
+struct filter_ipv4_5tuple {
+ u32 flags;
+ u32 protocol;
+ u32 src_addr;
+ u32 dst_addr;
+ u16 src_port;
+ u16 dst_port;
+} __packed;
+
+#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+
+struct filter_mac_vlan {
+ u32 flags;
+ u16 vlan;
+ u8 mac_addr[6];
+} __packed;
+
+/* Specifies the filter_action type. */
+enum {
+ FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_MAX
+};
+
+struct filter_action {
+ u32 type;
+ union {
+ u32 rq_idx;
+ } u;
+} __packed;
+
+/* Specifies the filter type. */
+enum filter_type {
+ FILTER_USNIC_ID = 0,
+ FILTER_IPV4_5TUPLE = 1,
+ FILTER_MAC_VLAN = 2,
+ FILTER_MAX
+};
+
+struct filter {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ } u;
+} __packed;
+
+enum {
+ CLSF_TLV_FILTER = 0,
+ CLSF_TLV_ACTION = 1,
+};
+
+/* Maximum size of buffer to CMD_ADD_FILTER */
+#define FILTER_MAX_BUF_SIZE 100
+
+struct filter_tlv {
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
+};
+
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 7e1488fc8ab..36a2ed606c9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -30,12 +30,9 @@
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
- vdev = rq->vdev;
-
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!rq->bufs[i])
@@ -141,7 +138,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u32 fetch_index;
+ u32 fetch_index = 0;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 2056586f4d4..ee7bc95af27 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -72,6 +72,7 @@ struct vnic_rq_buf {
unsigned int len;
unsigned int index;
void *desc;
+ uint64_t wr_id;
};
struct vnic_rq {
@@ -110,7 +111,8 @@ static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
static inline void vnic_rq_post(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
- dma_addr_t dma_addr, unsigned int len)
+ dma_addr_t dma_addr, unsigned int len,
+ uint64_t wrid)
{
struct vnic_rq_buf *buf = rq->to_use;
@@ -118,6 +120,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
buf->os_buf_index = os_buf_index;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
rq->to_use = buf;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 5e0d7a2be9b..3e6b8d54daf 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -30,12 +30,9 @@
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
- vdev = wq->vdev;
-
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!wq->bufs[i])
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 7dd937ac11c..2c6c70804a3 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -58,6 +58,10 @@ struct vnic_wq_buf {
unsigned int index;
int sop;
void *desc;
+ uint64_t wr_id; /* Cookie */
+ uint8_t cq_entry; /* Gets completion event from hw */
+ uint8_t desc_skip_cnt; /* Num descs to occupy */
+ uint8_t compressed_send; /* Both hdr and payload in one desc */
};
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
@@ -102,14 +106,20 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
- unsigned int len, int sop, int eop)
+ unsigned int len, int sop, int eop,
+ uint8_t desc_skip_cnt, uint8_t cq_entry,
+ uint8_t compressed_send, uint64_t wrid)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
+ buf->cq_entry = cq_entry;
+ buf->compressed_send = compressed_send;
+ buf->desc_skip_cnt = desc_skip_cnt;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
if (eop) {
@@ -123,7 +133,7 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
}
wq->to_use = buf;
- wq->ring.desc_avail--;
+ wq->ring.desc_avail -= desc_skip_cnt;
}
static inline void vnic_wq_service(struct vnic_wq *wq,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a13b312b50f..5f5896e522d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1384,7 +1384,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
static int
dm9000_probe(struct platform_device *pdev)
{
- struct dm9000_plat_data *pdata = pdev->dev.platform_data;
+ struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct board_info *db; /* Point a board information structure */
struct net_device *ndev;
const unsigned char *mac_src;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 4c830030fb0..263b92c00cb 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1321,7 +1321,7 @@ de4x5_open(struct net_device *dev)
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
+ if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("\n Cannot get IRQ- reconfigure your hardware.\n");
disable_ast(dev);
@@ -2319,7 +2319,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
struct net_device *dev;
u_long iobase;
- dev = dev_get_drvdata(&pdev->dev);
+ dev = pci_get_drvdata(pdev);
iobase = dev->base_addr;
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c94152f1c6b..4e8cfa2ac80 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct tulip_private *tp;
/* See note below on the multiport cards. */
- static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+ static unsigned char last_phys_addr[ETH_ALEN] = {
+ 0x00, 'L', 'i', 'n', 'u', 'x'
+ };
static int last_irq;
static int multiport_cnt; /* For four-port boards w/one EEPROM */
int i, irq;
@@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->dev_addr[i] = last_phys_addr[i] + 1;
#if defined(CONFIG_SPARC)
addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == 6)
- memcpy(dev->dev_addr, addr, 6);
+ if (addr && len == ETH_ALEN)
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
#endif
#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 50d9c631593..bf3bf6f22c9 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -469,6 +469,17 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
}
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sundance_poll_controller(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ disable_irq(np->pci_dev->irq);
+ intr_handler(np->pci_dev->irq, dev);
+ enable_irq(np->pci_dev->irq);
+}
+#endif
+
static const struct net_device_ops netdev_ops = {
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
@@ -480,6 +491,9 @@ static const struct net_device_ops netdev_ops = {
.ndo_change_mtu = change_mtu,
.ndo_set_mac_address = sundance_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sundance_poll_controller,
+#endif
};
static int sundance_probe1(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c827b1b6b1c..db020230bd0 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.6.62.0u"
+#define DRV_VER "4.9.134.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -88,6 +88,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64
+#define BE_UMC_NUM_VLANS_SUPPORTED 15
#define BE_MAX_EQD 96u
#define BE_MAX_TX_FRAG_COUNT 30
@@ -99,14 +100,18 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
-#define BE3_MAX_RSS_QS 8
#define BE2_MAX_RSS_QS 4
-#define MAX_RSS_QS BE3_MAX_RSS_QS
-#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define BE3_MAX_RSS_QS 16
+#define BE3_MAX_TX_QS 16
+#define BE3_MAX_EVT_QS 16
+
+#define MAX_RX_QS 32
+#define MAX_EVT_QS 32
+#define MAX_TX_QS 32
-#define MAX_TX_QS 8
#define MAX_ROCE_EQS 5
-#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
+#define MAX_MSIX_VECTORS 32
+#define MIN_MSIX_VECTORS 1
#define BE_TX_BUDGET 256
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -189,6 +194,7 @@ struct be_eq_obj {
u32 cur_eqd; /* in usecs */
u8 idx; /* array index */
+ u8 msix_idx;
u16 tx_budget;
u16 spurious_intr;
struct napi_struct napi;
@@ -328,6 +334,7 @@ enum vf_state {
#define BE_FLAGS_LINK_STATUS_INIT 1
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
+#define BE_FLAGS_VLAN_PROMISC (1 << 4)
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_UC_PMAC_COUNT 30
#define BE_VF_UC_PMAC_COUNT 2
@@ -352,6 +359,18 @@ struct phy_info {
u32 supported;
};
+struct be_resources {
+ u16 max_vfs; /* Total VFs "really" supported by FW/HW */
+ u16 max_mcast_mac;
+ u16 max_tx_qs;
+ u16 max_rss_qs;
+ u16 max_rx_qs;
+ u16 max_uc_mac; /* Max UC MACs programmable */
+ u16 max_vlans; /* Number of vlans supported */
+ u16 max_evt_qs;
+ u32 if_cap_flags;
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -369,18 +388,19 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- u32 num_msix_vec;
- u32 num_evt_qs;
- struct be_eq_obj eq_obj[MAX_MSIX_VECTORS];
+ u16 cfg_num_qs; /* configured via set-channels */
+ u16 num_evt_qs;
+ u16 num_msix_vec;
+ struct be_eq_obj eq_obj[MAX_EVT_QS];
struct msix_entry msix_entries[MAX_MSIX_VECTORS];
bool isr_registered;
/* TX Rings */
- u32 num_tx_qs;
+ u16 num_tx_qs;
struct be_tx_obj tx_obj[MAX_TX_QS];
/* Rx rings */
- u32 num_rx_qs;
+ u16 num_rx_qs;
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
@@ -430,8 +450,8 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
- u32 num_vfs; /* Number of VFs provisioned by PF driver */
- u32 dev_num_vfs; /* Number of VFs supported by HW */
+ struct be_resources res; /* resources available for the func */
+ u16 num_vfs; /* Number of VFs provisioned by PF */
u8 virtfn;
struct be_vf_cfg *vf_cfg;
bool be3_native;
@@ -446,21 +466,13 @@ struct be_adapter {
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
- u16 max_mcast_mac;
- u16 max_tx_queues;
- u16 max_rss_queues;
- u16 max_rx_queues;
- u16 max_pmac_cnt;
- u16 max_vlans;
- u16 max_event_queues;
- u32 if_cap_flags;
u8 pf_number;
u64 rss_flags;
};
#define be_physfn(adapter) (!adapter->virtfn)
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
-#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \
+#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
be_physfn(adapter))
#define for_all_vfs(adapter, vf_cfg, i) \
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
@@ -469,6 +481,26 @@ struct be_adapter {
#define ON 1
#define OFF 0
+#define be_max_vlans(adapter) (adapter->res.max_vlans)
+#define be_max_uc(adapter) (adapter->res.max_uc_mac)
+#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
+#define be_max_vfs(adapter) (adapter->res.max_vfs)
+#define be_max_rss(adapter) (adapter->res.max_rss_qs)
+#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
+#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
+#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
+#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
+#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
+
+static inline u16 be_max_qs(struct be_adapter *adapter)
+{
+ /* If no RSS, need atleast the one def RXQ */
+ u16 num = max_t(u16, be_max_rss(adapter), 1);
+
+ num = min(num, be_max_eqs(adapter));
+ return min_t(u16, num, num_online_cpus());
+}
+
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
@@ -672,6 +704,8 @@ extern int be_load_fw(struct be_adapter *adapter, u8 *func);
extern bool be_is_wol_supported(struct be_adapter *adapter);
extern bool be_pause_supported(struct be_adapter *adapter);
extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+int be_update_queues(struct be_adapter *adapter);
+int be_poll(struct napi_struct *napi, int budget);
/*
* internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8ec5d74ad44..bd0e0c0bbcd 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -180,6 +180,9 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
dev_err(&adapter->pdev->dev,
"opcode %d-%d failed:status %d-%d\n",
opcode, subsystem, compl_status, extd_status);
+
+ if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+ return extd_status;
}
}
done:
@@ -258,7 +261,8 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
(struct be_async_event_grp5_pvid_state *)evt);
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
+ dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
+ event_type);
break;
}
}
@@ -279,7 +283,8 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+ dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
+ event_type);
break;
}
}
@@ -631,6 +636,12 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
+ unsigned long addr)
+{
+ wrb->tag0 = addr & 0xFFFFFFFF;
+ wrb->tag1 = upper_32_bits(addr);
+}
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
@@ -639,17 +650,12 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
{
struct be_sge *sge;
- unsigned long addr = (unsigned long)req_hdr;
- u64 req_addr = addr;
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
req_hdr->version = 0;
-
- wrb->tag0 = req_addr & 0xFFFFFFFF;
- wrb->tag1 = upper_32_bits(req_addr);
-
+ fill_wrb_tags(wrb, (ulong) req_hdr);
wrb->payload_length = cmd_len;
if (mem) {
wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -676,31 +682,6 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
}
}
-/* Converts interrupt delay in microseconds to multiplier value */
-static u32 eq_delay_to_mult(u32 usec_delay)
-{
-#define MAX_INTR_RATE 651042
- const u32 round = 10;
- u32 multiplier;
-
- if (usec_delay == 0)
- multiplier = 0;
- else {
- u32 interrupt_rate = 1000000 / usec_delay;
- /* Max delay, corresponding to the lowest interrupt rate */
- if (interrupt_rate == 0)
- multiplier = 1023;
- else {
- multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
- multiplier /= interrupt_rate;
- /* Round the multiplier to the closest value.*/
- multiplier = (multiplier + round/2) / round;
- multiplier = min(multiplier, (u32)1023);
- }
- }
- return multiplier;
-}
-
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
@@ -728,6 +709,78 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
return wrb;
}
+static bool use_mcc(struct be_adapter *adapter)
+{
+ return adapter->mcc_obj.q.created;
+}
+
+/* Must be used only in process context */
+static int be_cmd_lock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter)) {
+ spin_lock_bh(&adapter->mcc_lock);
+ return 0;
+ } else {
+ return mutex_lock_interruptible(&adapter->mbox_lock);
+ }
+}
+
+/* Must be used only in process context */
+static void be_cmd_unlock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter))
+ spin_unlock_bh(&adapter->mcc_lock);
+ else
+ return mutex_unlock(&adapter->mbox_lock);
+}
+
+static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+
+ if (use_mcc(adapter)) {
+ dest_wrb = wrb_from_mccq(adapter);
+ if (!dest_wrb)
+ return NULL;
+ } else {
+ dest_wrb = wrb_from_mbox(adapter);
+ }
+
+ memcpy(dest_wrb, wrb, sizeof(*wrb));
+ if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
+ fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
+
+ return dest_wrb;
+}
+
+/* Must be used only in process context */
+static int be_cmd_notify_wait(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+ int status;
+
+ status = be_cmd_lock(adapter);
+ if (status)
+ return status;
+
+ dest_wrb = be_cmd_copy(adapter, wrb);
+ if (!dest_wrb)
+ return -EBUSY;
+
+ if (use_mcc(adapter))
+ status = be_mcc_notify_wait(adapter);
+ else
+ status = be_mbox_notify_wait(adapter);
+
+ if (!status)
+ memcpy(wrb, dest_wrb, sizeof(*wrb));
+
+ be_cmd_unlock(adapter);
+ return status;
+}
+
/* Tell fw we're about to start firing cmds by writing a
* special pattern across the wrb hdr; uses mbox
*/
@@ -788,13 +841,12 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
return status;
}
-int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay)
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eq_create *req;
- struct be_dma_mem *q_mem = &eq->dma_mem;
- int status;
+ struct be_dma_mem *q_mem = &eqo->q.dma_mem;
+ int status, ver = 0;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -805,15 +857,18 @@ int be_cmd_eq_create(struct be_adapter *adapter,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ /* Support for EQ_CREATEv2 available only SH-R onwards */
+ if (!(BEx_chip(adapter) || lancer_chip(adapter)))
+ ver = 2;
+
+ req->hdr.version = ver;
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
/* 4byte eqe*/
AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
AMAP_SET_BITS(struct amap_eq_context, count, req->context,
- __ilog2_u32(eq->len/256));
- AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
- eq_delay_to_mult(eq_delay));
+ __ilog2_u32(eqo->q.len / 256));
be_dws_cpu_to_le(req->context, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -821,8 +876,10 @@ int be_cmd_eq_create(struct be_adapter *adapter,
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
- eq->id = le16_to_cpu(resp->eq_id);
- eq->created = true;
+ eqo->q.id = le16_to_cpu(resp->eq_id);
+ eqo->msix_idx =
+ (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
+ eqo->q.created = true;
}
mutex_unlock(&adapter->mbox_lock);
@@ -1010,9 +1067,9 @@ static u32 be_encoded_q_len(int q_len)
return len_encoded;
}
-int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1068,9 +1125,9 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
return status;
}
-int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1128,25 +1185,16 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_eth_tx_create *req;
struct be_queue_info *txq = &txo->q;
struct be_queue_info *cq = &txo->cq;
struct be_dma_mem *q_mem = &txq->dma_mem;
int status, ver = 0;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
-
- req = embedded_payload(wrb);
-
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1164,12 +1212,11 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req->cq_id = cpu_to_le16(cq->id);
req->queue_size = be_encoded_q_len(txq->len);
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
ver = req->hdr.version;
- status = be_mcc_notify_wait(adapter);
+ status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
- struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
+ struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
txq->id = le16_to_cpu(resp->cid);
if (ver == 2)
txo->db_offset = le32_to_cpu(resp->db_offset);
@@ -1178,9 +1225,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
txq->created = true;
}
-err:
- spin_unlock_bh(&adapter->mcc_lock);
-
return status;
}
@@ -1309,40 +1353,32 @@ err:
}
/* Create an rx filtering policy configuration on an i/f
- * Uses MCCQ
+ * Will use MBOX only if MCCQ has not been created.
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
u32 *if_handle, u32 domain)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_if_create *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
-
req->pmac_invalid = true;
- status = be_mcc_notify_wait(adapter);
+ status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
- struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
+ struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- }
-err:
- spin_unlock_bh(&adapter->mcc_lock);
+ /* Hack to retrieve VF's pmac-id on BE3 */
+ if (BE3_chip(adapter) && !be_physfn(adapter))
+ adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
+ }
return status;
}
@@ -1460,6 +1496,12 @@ static int be_mac_to_link_speed(int mac_speed)
return 1000;
case PHY_LINK_SPEED_10GBPS:
return 10000;
+ case PHY_LINK_SPEED_20GBPS:
+ return 20000;
+ case PHY_LINK_SPEED_25GBPS:
+ return 25000;
+ case PHY_LINK_SPEED_40GBPS:
+ return 40000;
}
return 0;
}
@@ -1520,7 +1562,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_cntl_addnl_attribs *req;
- int status;
+ int status = 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1773,6 +1815,12 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ } else if (flags & BE_FLAGS_VLAN_PROMISC) {
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
+
+ if (value == ON)
+ req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
} else {
struct netdev_hw_addr *ha;
int i = 0;
@@ -1785,8 +1833,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
*/
req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
- adapter->if_cap_flags);
-
+ be_if_cap_flags(adapter));
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
netdev_for_each_mc_addr(ha, adapter->netdev)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
@@ -2444,6 +2491,12 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
le16_to_cpu(resp_phy_info->fixed_speeds_supported);
adapter->phy.misc_params =
le32_to_cpu(resp_phy_info->misc_params);
+
+ if (BE2_chip(adapter)) {
+ adapter->phy.fixed_speeds_supported =
+ BE_SUPPORTED_SPEED_10GBPS |
+ BE_SUPPORTED_SPEED_1GBPS;
+ }
}
pci_free_consistent(adapter->pdev, cmd.size,
cmd.va, cmd.dma);
@@ -2606,9 +2659,44 @@ err:
return status;
}
-/* Uses synchronous MCCQ */
+/* Set privilege(s) for a function */
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_fn_privileges *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+ req->hdr.domain = domain;
+ if (lancer_chip(adapter))
+ req->privileges_lancer = cpu_to_le32(privileges);
+ else
+ req->privileges = cpu_to_le32(privileges);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
+ * pmac_id_valid: false => pmac_id or MAC address is requested.
+ * If pmac_id is returned, pmac_id_valid is returned as true
+ */
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id, u8 domain)
+ bool *pmac_id_valid, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2644,12 +2732,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
req->hdr.domain = domain;
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
- req->perm_override = 1;
+ if (*pmac_id_valid) {
+ req->mac_id = cpu_to_le32(*pmac_id);
+ req->iface_id = cpu_to_le16(adapter->if_handle);
+ req->perm_override = 0;
+ } else {
+ req->perm_override = 1;
+ }
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_mac_list *resp =
get_mac_list_cmd.va;
+
+ if (*pmac_id_valid) {
+ memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
+ ETH_ALEN);
+ goto out;
+ }
+
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
* or one or more true or pseudo permanant mac addresses.
@@ -2667,14 +2768,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
* is 6 bytes
*/
if (mac_addr_size == sizeof(u32)) {
- *pmac_id_active = true;
+ *pmac_id_valid = true;
mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
*pmac_id = le32_to_cpu(mac_id);
goto out;
}
}
/* If no active mac_id found, return first mac addr */
- *pmac_id_active = false;
+ *pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
}
@@ -2686,6 +2787,41 @@ out:
return status;
}
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+{
+ bool active = true;
+
+ if (BEx_chip(adapter))
+ return be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, curr_pmac_id);
+ else
+ /* Fetch the MAC address using pmac_id */
+ return be_cmd_get_mac_from_list(adapter, mac, &active,
+ &curr_pmac_id, 0);
+}
+
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
+{
+ int status;
+ bool pmac_valid = false;
+
+ memset(mac, 0, ETH_ALEN);
+
+ if (BEx_chip(adapter)) {
+ if (be_physfn(adapter))
+ status = be_cmd_mac_addr_query(adapter, mac, true, 0,
+ 0);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, 0);
+ } else {
+ status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
+ NULL, 0);
+ }
+
+ return status;
+}
+
/* Uses synchronous MCCQ */
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain)
@@ -2729,8 +2865,27 @@ err:
return status;
}
+/* Wrapper to delete any active MACs and provision the new mac.
+ * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
+ * current list are active.
+ */
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
+{
+ bool active_mac = false;
+ u8 old_mac[ETH_ALEN];
+ u32 pmac_id;
+ int status;
+
+ status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+ &pmac_id, dom);
+ if (!status && active_mac)
+ be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
+
+ return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
+}
+
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id)
+ u32 domain, u16 intf_id, u16 hsw_mode)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_hsw_config *req;
@@ -2757,6 +2912,13 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
}
+ if (!BEx_chip(adapter) && hsw_mode) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
+ AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
+ ctxt, hsw_mode);
+ }
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -2768,7 +2930,7 @@ err:
/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id)
+ u32 domain, u16 intf_id, u8 *mode)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_hsw_config *req;
@@ -2791,9 +2953,15 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
- AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
- intf_id);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, intf_id);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
+
+ if (!BEx_chip(adapter)) {
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
+ }
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -2804,7 +2972,11 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
pvid, &resp->context);
- *pvid = le16_to_cpu(vid);
+ if (pvid)
+ *pvid = le16_to_cpu(vid);
+ if (mode)
+ *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ port_fwd_type, &resp->context);
}
err:
@@ -2967,30 +3139,63 @@ err:
return status;
}
-static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
- u32 max_buf_size)
+static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
{
- struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
int i;
for (i = 0; i < desc_count; i++) {
- desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
- if (((void *)desc + desc->desc_len) >
- (void *)(buf + max_buf_size))
- return NULL;
+ if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
+ return (struct be_nic_res_desc *)hdr;
- if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
- desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
- return desc;
-
- desc = (void *)desc + desc->desc_len;
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
}
+ return NULL;
+}
+static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
+ u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_num == devfn)
+ return pcie;
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
return NULL;
}
+static void be_copy_nic_desc(struct be_resources *res,
+ struct be_nic_res_desc *desc)
+{
+ res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
+ res->max_vlans = le16_to_cpu(desc->vlan_count);
+ res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
+ res->max_tx_qs = le16_to_cpu(desc->txq_count);
+ res->max_rss_qs = le16_to_cpu(desc->rssq_count);
+ res->max_rx_qs = le16_to_cpu(desc->rq_count);
+ res->max_evt_qs = le16_to_cpu(desc->eq_count);
+ /* Clear flags that driver is not interested in */
+ res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
+ BE_IF_CAP_FLAGS_WANT;
+ /* Need 1 RXQ as the default RXQ */
+ if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
+ res->max_rss_qs -= 1;
+}
+
/* Uses Mbox */
-int be_cmd_get_func_config(struct be_adapter *adapter)
+int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_func_config *req;
@@ -3029,28 +3234,16 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_func_config *resp = cmd.va;
u32 desc_count = le32_to_cpu(resp->desc_count);
- struct be_nic_resource_desc *desc;
+ struct be_nic_res_desc *desc;
- desc = be_get_nic_desc(resp->func_param, desc_count,
- sizeof(resp->func_param));
+ desc = be_get_nic_desc(resp->func_param, desc_count);
if (!desc) {
status = -EINVAL;
goto err;
}
adapter->pf_number = desc->pf_num;
- adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
- adapter->max_vlans = le16_to_cpu(desc->vlan_count);
- adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
- adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
- adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
- adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
-
- adapter->max_event_queues = le16_to_cpu(desc->eq_count);
- adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
-
- /* Clear flags that driver is not interested in */
- adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT;
+ be_copy_nic_desc(res, desc);
}
err:
mutex_unlock(&adapter->mbox_lock);
@@ -3060,8 +3253,8 @@ err:
}
/* Uses mbox */
-int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3088,8 +3281,8 @@ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
}
/* Uses sync mcc */
-int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3121,54 +3314,51 @@ err:
}
/* Uses sync mcc, if MCCQ is already created otherwise mbox */
-int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u16 *txq_count, u8 domain)
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res, u8 domain)
{
+ struct be_cmd_resp_get_profile_config *resp;
+ struct be_pcie_res_desc *pcie;
+ struct be_nic_res_desc *nic;
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_dma_mem cmd;
+ u32 desc_count;
int status;
memset(&cmd, 0, sizeof(struct be_dma_mem));
- if (!lancer_chip(adapter))
- cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
- else
- cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ if (!cmd.va)
return -ENOMEM;
- }
if (!mccq->created)
status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
else
status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
- if (!status) {
- struct be_cmd_resp_get_profile_config *resp = cmd.va;
- u32 desc_count = le32_to_cpu(resp->desc_count);
- struct be_nic_resource_desc *desc;
+ if (status)
+ goto err;
- desc = be_get_nic_desc(resp->func_param, desc_count,
- sizeof(resp->func_param));
+ resp = cmd.va;
+ desc_count = le32_to_cpu(resp->desc_count);
+
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
+ if (pcie)
+ res->max_vfs = le16_to_cpu(pcie->num_vfs);
+
+ nic = be_get_nic_desc(resp->func_param, desc_count);
+ if (nic)
+ be_copy_nic_desc(res, nic);
- if (!desc) {
- status = -EINVAL;
- goto err;
- }
- if (cap_flags)
- *cap_flags = le32_to_cpu(desc->cap_flags);
- if (txq_count)
- *txq_count = le32_to_cpu(desc->txq_count);
- }
err:
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
return status;
}
-/* Uses sync mcc */
+/* Currently only Lancer uses this command and it supports version 0 only
+ * Uses sync mcc
+ */
int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain)
{
@@ -3189,12 +3379,10 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
wrb, NULL);
-
req->hdr.domain = domain;
req->desc_count = cpu_to_le32(1);
-
- req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
- req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
+ req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
+ req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
req->nic_desc.pf_num = adapter->pf_number;
req->nic_desc.vf_num = domain;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 1b3b9e88641..108ca8abf0a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -60,6 +60,8 @@ enum {
MCC_STATUS_NOT_SUPPORTED = 66
};
+#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
+
#define CQE_STATUS_COMPL_MASK 0xFFFF
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
#define CQE_STATUS_EXTD_MASK 0xFFFF
@@ -202,6 +204,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -306,7 +309,7 @@ struct be_cmd_req_eq_create {
struct be_cmd_resp_eq_create {
struct be_cmd_resp_hdr resp_hdr;
u16 eq_id; /* sword */
- u16 rsvd0; /* sword */
+ u16 msix_idx; /* available only in v2 */
} __packed;
/******************** Mac query ***************************/
@@ -965,7 +968,10 @@ enum {
PHY_LINK_SPEED_10MBPS = 0x1,
PHY_LINK_SPEED_100MBPS = 0x2,
PHY_LINK_SPEED_1GBPS = 0x3,
- PHY_LINK_SPEED_10GBPS = 0x4
+ PHY_LINK_SPEED_10GBPS = 0x4,
+ PHY_LINK_SPEED_20GBPS = 0x5,
+ PHY_LINK_SPEED_25GBPS = 0x6,
+ PHY_LINK_SPEED_40GBPS = 0x7
};
struct be_cmd_resp_link_status {
@@ -1480,6 +1486,11 @@ struct be_cmd_resp_get_fn_privileges {
u32 privilege_mask;
};
+struct be_cmd_req_set_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 privileges; /* Used by BE3, SH-R */
+ u32 privileges_lancer; /* Used by Lancer */
+};
/******************** GET/SET_MACLIST **************************/
#define BE_MAX_MAC 64
@@ -1524,12 +1535,17 @@ struct be_cmd_req_set_mac_list {
} __packed;
/*********************** HSW Config ***********************/
+#define PORT_FWD_TYPE_VEPA 0x3
+#define PORT_FWD_TYPE_VEB 0x2
+
struct amap_set_hsw_context {
u8 interface_id[16];
u8 rsvd0[14];
u8 pvid_valid;
- u8 rsvd1;
- u8 rsvd2[16];
+ u8 pport;
+ u8 rsvd1[6];
+ u8 port_fwd_type[3];
+ u8 rsvd2[7];
u8 pvid[16];
u8 rsvd3[32];
u8 rsvd4[32];
@@ -1554,7 +1570,9 @@ struct amap_get_hsw_req_context {
} __packed;
struct amap_get_hsw_resp_context {
- u8 rsvd1[16];
+ u8 rsvd0[6];
+ u8 port_fwd_type[3];
+ u8 rsvd1[7];
u8 pvid[16];
u8 rsvd2[32];
u8 rsvd3[32];
@@ -1709,11 +1727,13 @@ struct be_cmd_req_set_ext_fat_caps {
struct be_fat_conf_params set_params;
};
-#define RESOURCE_DESC_SIZE 88
+#define RESOURCE_DESC_SIZE_V0 72
+#define RESOURCE_DESC_SIZE_V1 88
+#define PCIE_RESOURCE_DESC_TYPE_V0 0x40
#define NIC_RESOURCE_DESC_TYPE_V0 0x41
+#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
#define NIC_RESOURCE_DESC_TYPE_V1 0x51
-#define MAX_RESOURCE_DESC 4
-#define MAX_RESOURCE_DESC_V1 32
+#define MAX_RESOURCE_DESC 264
/* QOS unit number */
#define QUN 4
@@ -1722,9 +1742,30 @@ struct be_cmd_req_set_ext_fat_caps {
/* No save */
#define NOSV 7
-struct be_nic_resource_desc {
+struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
+} __packed;
+
+struct be_pcie_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u16 rsvd1;
+ u8 pf_num;
+ u8 rsvd2;
+ u32 rsvd3;
+ u8 sriov_state;
+ u8 pf_state;
+ u8 pf_type;
+ u8 rsvd4;
+ u16 num_vfs;
+ u16 rsvd5;
+ u32 rsvd6[17];
+} __packed;
+
+struct be_nic_res_desc {
+ struct be_res_desc_hdr hdr;
u8 rsvd1;
u8 flags;
u8 vf_num;
@@ -1752,8 +1793,8 @@ struct be_nic_resource_desc {
u8 acpi_params;
u8 wol_param;
u16 rsvd7;
- u32 rsvd8[3];
-};
+ u32 rsvd8[7];
+} __packed;
struct be_cmd_req_get_func_config {
struct be_cmd_req_hdr hdr;
@@ -1762,7 +1803,7 @@ struct be_cmd_req_get_func_config {
struct be_cmd_resp_get_func_config {
struct be_cmd_resp_hdr hdr;
u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
#define ACTIVE_PROFILE_TYPE 0x2
@@ -1774,26 +1815,20 @@ struct be_cmd_req_get_profile_config {
};
struct be_cmd_resp_get_profile_config {
- struct be_cmd_req_hdr hdr;
- u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
-};
-
-struct be_cmd_resp_get_profile_config_v1 {
- struct be_cmd_req_hdr hdr;
+ struct be_cmd_resp_hdr hdr;
u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE];
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
struct be_cmd_req_set_profile_config {
struct be_cmd_req_hdr hdr;
u32 rsvd;
u32 desc_count;
- struct be_nic_resource_desc nic_desc;
+ struct be_nic_res_desc nic_desc;
};
struct be_cmd_resp_set_profile_config {
- struct be_cmd_req_hdr hdr;
+ struct be_cmd_resp_hdr hdr;
};
struct be_cmd_enable_disable_vf {
@@ -1842,8 +1877,7 @@ extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u32 *if_handle, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay);
+extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
extern int be_cmd_cq_create(struct be_adapter *adapter,
struct be_queue_info *cq, struct be_queue_info *eq,
bool no_delay, int num_cqe_dma_coalesce);
@@ -1927,15 +1961,22 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
u32 *privilege, u32 domain);
+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
+ u32 privileges, u32 vf_num);
extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id,
u8 domain);
+extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
+ u8 *mac);
+extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
+extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
+ u32 dom);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id);
+ u32 domain, u16 intf_id, u16 hsw_mode);
extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id);
+ u32 domain, u16 intf_id, u8 *mode);
extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd);
@@ -1948,10 +1989,10 @@ extern int lancer_initiate_dump(struct be_adapter *adapter);
extern bool dump_present(struct be_adapter *adapter);
extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
-extern int be_cmd_get_func_config(struct be_adapter *adapter);
-extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u16 *txq_count, u8 domain);
-
+int be_cmd_get_func_config(struct be_adapter *adapter,
+ struct be_resources *res);
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res, u8 domain);
extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain);
extern int be_cmd_get_if_id(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 4f8c941217c..b440a1fac77 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1119,6 +1119,29 @@ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return status;
}
+static void be_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ ch->combined_count = adapter->num_evt_qs;
+ ch->max_combined = be_max_qs(adapter);
+}
+
+static int be_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (ch->rx_count || ch->tx_count || ch->other_count ||
+ !ch->combined_count || ch->combined_count > be_max_qs(adapter))
+ return -EINVAL;
+
+ adapter->cfg_num_qs = ch->combined_count;
+
+ return be_update_queues(adapter);
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1145,4 +1168,6 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_channels = be_get_channels,
+ .set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3d91a5ec61a..2c38cc40211 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,6 +21,7 @@
#include "be_cmds.h"
#include <asm/div64.h>
#include <linux/aer.h>
+#include <linux/if_bridge.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -145,8 +146,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL | __GFP_ZERO);
+ mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -247,54 +248,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
static int be_mac_addr_set(struct net_device *netdev, void *p)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
struct sockaddr *addr = p;
- int status = 0;
- u8 current_mac[ETH_ALEN];
- u32 pmac_id = adapter->pmac_id[0];
- bool active_mac = true;
+ int status;
+ u8 mac[ETH_ALEN];
+ u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- /* For BE VF, MAC address is already activated by PF.
- * Hence only operation left is updating netdev->devaddr.
- * Update it if user is passing the same MAC which was used
- * during configuring VF MAC from PF(Hypervisor).
+ /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
+ * privilege or if PF did not provision the new MAC address.
+ * On BE3, this cmd will always fail if the VF doesn't have the
+ * FILTMGMT privilege. This failure is OK, only if the PF programmed
+ * the MAC for the VF.
*/
- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
- status = be_cmd_mac_addr_query(adapter, current_mac,
- false, adapter->if_handle, 0);
- if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
- goto done;
- else
- goto err;
- }
+ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+ adapter->if_handle, &adapter->pmac_id[0], 0);
+ if (!status) {
+ curr_pmac_id = adapter->pmac_id[0];
- if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
- goto done;
+ /* Delete the old programmed MAC. This call may fail if the
+ * old MAC was already deleted by the PF driver.
+ */
+ if (adapter->pmac_id[0] != old_pmac_id)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ old_pmac_id, 0);
+ }
- /* For Lancer check if any MAC is active.
- * If active, get its mac id.
+ /* Decide if the new MAC is successfully activated only after
+ * querying the FW
*/
- if (lancer_chip(adapter) && !be_physfn(adapter))
- be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
- &pmac_id, 0);
-
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle,
- &adapter->pmac_id[0], 0);
-
+ status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
if (status)
goto err;
- if (active_mac)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- pmac_id, 0);
-done:
+ /* The MAC change did not happen, either due to lack of privilege
+ * or PF didn't pre-provision.
+ */
+ if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+ status = -EPERM;
+ goto err;
+ }
+
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ dev_info(dev, "MAC address changed to %pM\n", mac);
return 0;
err:
- dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
+ dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
return status;
}
@@ -472,7 +473,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
ACCESS_ONCE(*acc) = newacc;
}
-void populate_erx_stats(struct be_adapter *adapter,
+static void populate_erx_stats(struct be_adapter *adapter,
struct be_rx_obj *rxo,
u32 erx_stat)
{
@@ -854,11 +855,11 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
unsigned int eth_hdr_len;
struct iphdr *ip;
- /* Lancer ASIC has a bug wherein packets that are 32 bytes or less
+ /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
* may cause a transmit stall on that port. So the work-around is to
- * pad such packets to a 36-byte length.
+ * pad short packets (<= 32 bytes) to a 36-byte length.
*/
- if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
+ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
if (skb_padto(skb, 36))
goto tx_drop;
skb->len = 36;
@@ -1001,7 +1002,7 @@ static int be_vid_config(struct be_adapter *adapter)
if (adapter->promiscuous)
return 0;
- if (adapter->vlans_added > adapter->max_vlans)
+ if (adapter->vlans_added > be_max_vlans(adapter))
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
@@ -1012,18 +1013,40 @@ static int be_vid_config(struct be_adapter *adapter)
status = be_cmd_vlan_config(adapter, adapter->if_handle,
vids, num, 1, 0);
- /* Set to VLAN promisc mode as setting VLAN filter failed */
if (status) {
- dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
- dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
- goto set_vlan_promisc;
+ /* Set to VLAN promisc mode as setting VLAN filter failed */
+ if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+ goto set_vlan_promisc;
+ dev_err(&adapter->pdev->dev,
+ "Setting HW VLAN filtering failed.\n");
+ } else {
+ if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
+ /* hw VLAN filtering re-enabled. */
+ status = be_cmd_rx_filter(adapter,
+ BE_FLAGS_VLAN_PROMISC, OFF);
+ if (!status) {
+ dev_info(&adapter->pdev->dev,
+ "Disabling VLAN Promiscuous mode.\n");
+ adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+ dev_info(&adapter->pdev->dev,
+ "Re-Enabling HW VLAN filtering\n");
+ }
+ }
}
return status;
set_vlan_promisc:
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- NULL, 0, 1, 1);
+ dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
+
+ status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
+ if (!status) {
+ dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
+ dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
+ adapter->flags |= BE_FLAGS_VLAN_PROMISC;
+ } else
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable VLAN Promiscuous mode.\n");
return status;
}
@@ -1032,17 +1055,13 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
- status = -EINVAL;
- goto ret;
- }
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
goto ret;
adapter->vlan_tag[vid] = 1;
- if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
status = be_vid_config(adapter);
if (!status)
@@ -1058,17 +1077,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
- status = -EINVAL;
- goto ret;
- }
-
/* Packets with VID 0 are always received by Lancer by default */
if (lancer_chip(adapter) && vid == 0)
goto ret;
adapter->vlan_tag[vid] = 0;
- if (adapter->vlans_added <= adapter->max_vlans)
+ if (adapter->vlans_added <= be_max_vlans(adapter))
status = be_vid_config(adapter);
if (!status)
@@ -1101,7 +1115,7 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > adapter->max_mcast_mac) {
+ netdev_mc_count(netdev) > be_max_mc(adapter)) {
be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
goto done;
}
@@ -1115,7 +1129,7 @@ static void be_set_rx_mode(struct net_device *netdev)
adapter->pmac_id[i], 0);
}
- if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
+ if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
adapter->promiscuous = true;
goto done;
@@ -1146,9 +1160,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
- bool active_mac = false;
- u32 pmac_id;
- u8 old_mac[ETH_ALEN];
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1156,20 +1167,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
return -EINVAL;
- if (lancer_chip(adapter)) {
- status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
- &pmac_id, vf + 1);
- if (!status && active_mac)
- be_cmd_pmac_del(adapter, vf_cfg->if_handle,
- pmac_id, vf + 1);
-
- status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
- } else {
- status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
- vf_cfg->pmac_id, vf + 1);
+ if (BEx_chip(adapter)) {
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
+ vf + 1);
status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
&vf_cfg->pmac_id, vf + 1);
+ } else {
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
}
if (status)
@@ -1195,8 +1201,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
vi->vf = vf;
vi->tx_rate = vf_cfg->tx_rate;
- vi->vlan = vf_cfg->vlan_tag;
- vi->qos = 0;
+ vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
+ vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
return 0;
@@ -1206,28 +1212,29 @@ static int be_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status = 0;
if (!sriov_enabled(adapter))
return -EPERM;
- if (vf >= adapter->num_vfs || vlan > 4095)
+ if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
- if (vlan) {
- if (adapter->vf_cfg[vf].vlan_tag != vlan) {
+ if (vlan || qos) {
+ vlan |= qos << VLAN_PRIO_SHIFT;
+ if (vf_cfg->vlan_tag != vlan) {
/* If this is new value, program it. Else skip. */
- adapter->vf_cfg[vf].vlan_tag = vlan;
-
- status = be_cmd_set_hsw_config(adapter, vlan,
- vf + 1, adapter->vf_cfg[vf].if_handle);
+ vf_cfg->vlan_tag = vlan;
+ status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
+ vf_cfg->if_handle, 0);
}
} else {
/* Reset Transparent Vlan Tagging. */
- adapter->vf_cfg[vf].vlan_tag = 0;
- vlan = adapter->vf_cfg[vf].def_vid;
+ vf_cfg->vlan_tag = 0;
+ vlan = vf_cfg->def_vid;
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- adapter->vf_cfg[vf].if_handle);
+ vf_cfg->if_handle, 0);
}
@@ -1490,8 +1497,9 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
-void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
- struct be_rx_compl_info *rxcp)
+static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
+ struct napi_struct *napi,
+ struct be_rx_compl_info *rxcp)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
@@ -1920,6 +1928,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ netif_napi_del(&eqo->napi);
}
be_queue_free(adapter, &eqo->q);
}
@@ -1931,9 +1940,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
struct be_eq_obj *eqo;
int i, rc;
- adapter->num_evt_qs = num_irqs(adapter);
+ adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
+ adapter->cfg_num_qs);
for_all_evt_queues(adapter, eqo, i) {
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
@@ -1946,7 +1958,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
if (rc)
return rc;
- rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
+ rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
}
@@ -2020,31 +2032,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
}
}
-static int be_num_txqs_want(struct be_adapter *adapter)
-{
- if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
- be_is_mc(adapter) ||
- (!lancer_chip(adapter) && !be_physfn(adapter)) ||
- BE2_chip(adapter))
- return 1;
- else
- return adapter->max_tx_queues;
-}
-
-static int be_tx_cqs_create(struct be_adapter *adapter)
+static int be_tx_qs_create(struct be_adapter *adapter)
{
struct be_queue_info *cq, *eq;
- int status;
struct be_tx_obj *txo;
- u8 i;
+ int status, i;
- adapter->num_tx_qs = be_num_txqs_want(adapter);
- if (adapter->num_tx_qs != MAX_TX_QS) {
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_qs);
- rtnl_unlock();
- }
+ adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq;
@@ -2060,16 +2054,7 @@ static int be_tx_cqs_create(struct be_adapter *adapter)
status = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (status)
return status;
- }
- return 0;
-}
-static int be_tx_qs_create(struct be_adapter *adapter)
-{
- struct be_tx_obj *txo;
- int i, status;
-
- for_all_tx_queues(adapter, txo, i) {
status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
sizeof(struct be_eth_wrb));
if (status)
@@ -2105,17 +2090,14 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int rc, i;
- /* We'll create as many RSS rings as there are irqs.
- * But when there's only one irq there's no use creating RSS rings
+ /* We can create as many RSS rings as there are EQs. */
+ adapter->num_rx_qs = adapter->num_evt_qs;
+
+ /* We'll use RSS only if atleast 2 RSS rings are supported.
+ * When RSS is used, we'll need a default RXQ for non-IP traffic.
*/
- adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
- num_irqs(adapter) + 1 : 1;
- if (adapter->num_rx_qs != MAX_RX_QS) {
- rtnl_lock();
- netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_qs);
- rtnl_unlock();
- }
+ if (adapter->num_rx_qs > 1)
+ adapter->num_rx_qs++;
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2379,38 +2361,24 @@ static void be_msix_disable(struct be_adapter *adapter)
if (msix_enabled(adapter)) {
pci_disable_msix(adapter->pdev);
adapter->num_msix_vec = 0;
+ adapter->num_msix_roce_vec = 0;
}
}
-static uint be_num_rss_want(struct be_adapter *adapter)
-{
- u32 num = 0;
-
- if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- (lancer_chip(adapter) ||
- (!sriov_want(adapter) && be_physfn(adapter)))) {
- num = adapter->max_rss_queues;
- num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
- }
- return num;
-}
-
static int be_msix_enable(struct be_adapter *adapter)
{
-#define BE_MIN_MSIX_VECTORS 1
- int i, status, num_vec, num_roce_vec = 0;
+ int i, status, num_vec;
struct device *dev = &adapter->pdev->dev;
- /* If RSS queues are not used, need a vec for default RX Q */
- num_vec = min(be_num_rss_want(adapter), num_online_cpus());
- if (be_roce_supported(adapter)) {
- num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
- (num_online_cpus() + 1));
- num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
- num_vec += num_roce_vec;
- num_vec = min(num_vec, MAX_MSIX_VECTORS);
- }
- num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
+ /* If RoCE is supported, program the max number of NIC vectors that
+ * may be configured via set-channels, along with vectors needed for
+ * RoCe. Else, just program the number we'll use initially.
+ */
+ if (be_roce_supported(adapter))
+ num_vec = min_t(int, 2 * be_max_eqs(adapter),
+ 2 * num_online_cpus());
+ else
+ num_vec = adapter->cfg_num_qs;
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -2418,7 +2386,7 @@ static int be_msix_enable(struct be_adapter *adapter)
status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
if (status == 0) {
goto done;
- } else if (status >= BE_MIN_MSIX_VECTORS) {
+ } else if (status >= MIN_MSIX_VECTORS) {
num_vec = status;
status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
num_vec);
@@ -2427,30 +2395,29 @@ static int be_msix_enable(struct be_adapter *adapter)
}
dev_warn(dev, "MSIx enable failed\n");
+
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
if (!be_physfn(adapter))
return status;
return 0;
done:
- if (be_roce_supported(adapter)) {
- if (num_vec > num_roce_vec) {
- adapter->num_msix_vec = num_vec - num_roce_vec;
- adapter->num_msix_roce_vec =
- num_vec - adapter->num_msix_vec;
- } else {
- adapter->num_msix_vec = num_vec;
- adapter->num_msix_roce_vec = 0;
- }
- } else
- adapter->num_msix_vec = num_vec;
- dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
+ if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
+ adapter->num_msix_roce_vec = num_vec / 2;
+ dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
+ adapter->num_msix_roce_vec);
+ }
+
+ adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
+
+ dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
+ adapter->num_msix_vec);
return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eqo)
{
- return adapter->msix_entries[eqo->idx].vector;
+ return adapter->msix_entries[eqo->msix_idx].vector;
}
static int be_msix_register(struct be_adapter *adapter)
@@ -2690,8 +2657,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
memset(mac, 0, ETH_ALEN);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (cmd.va == NULL)
return -1;
@@ -2735,13 +2702,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
be_vf_eth_addr_generate(adapter, mac);
for_all_vfs(adapter, vf_cfg, vf) {
- if (lancer_chip(adapter)) {
- status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
- } else {
+ if (BEx_chip(adapter))
status = be_cmd_pmac_add(adapter, mac,
vf_cfg->if_handle,
&vf_cfg->pmac_id, vf + 1);
- }
+ else
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
@@ -2759,7 +2726,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
int status, vf;
u8 mac[ETH_ALEN];
struct be_vf_cfg *vf_cfg;
- bool active;
+ bool active = false;
for_all_vfs(adapter, vf_cfg, vf) {
be_cmd_get_mac_from_list(adapter, mac, &active,
@@ -2788,11 +2755,12 @@ static void be_vf_clear(struct be_adapter *adapter)
pci_disable_sriov(adapter->pdev);
for_all_vfs(adapter, vf_cfg, vf) {
- if (lancer_chip(adapter))
- be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
- else
+ if (BEx_chip(adapter))
be_cmd_pmac_del(adapter, vf_cfg->if_handle,
vf_cfg->pmac_id, vf + 1);
+ else
+ be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
+ vf + 1);
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
@@ -2801,28 +2769,40 @@ done:
adapter->num_vfs = 0;
}
-static int be_clear(struct be_adapter *adapter)
+static void be_clear_queues(struct be_adapter *adapter)
{
- int i = 1;
+ be_mcc_queues_destroy(adapter);
+ be_rx_cqs_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ be_evt_queues_destroy(adapter);
+}
+static void be_cancel_worker(struct be_adapter *adapter)
+{
if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
cancel_delayed_work_sync(&adapter->work);
adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
}
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
+ int i;
+
+ be_cancel_worker(adapter);
if (sriov_enabled(adapter))
be_vf_clear(adapter);
- for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+ /* delete the primary mac along with the uc-mac list */
+ for (i = 0; i < (adapter->uc_macs + 1); i++)
be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
+ adapter->pmac_id[i], 0);
+ adapter->uc_macs = 0;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
- be_mcc_queues_destroy(adapter);
- be_rx_cqs_destroy(adapter);
- be_tx_queues_destroy(adapter);
- be_evt_queues_destroy(adapter);
+ be_clear_queues(adapter);
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
@@ -2833,17 +2813,21 @@ static int be_clear(struct be_adapter *adapter)
static int be_vfs_if_create(struct be_adapter *adapter)
{
+ struct be_resources res = {0};
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
- int status;
+ int status = 0;
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
- if (!BE3_chip(adapter))
- be_cmd_get_profile_config(adapter, &cap_flags,
- NULL, vf + 1);
+ if (!BE3_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res,
+ vf + 1);
+ if (!status)
+ cap_flags = res.if_cap_flags;
+ }
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@ -2880,6 +2864,7 @@ static int be_vf_setup(struct be_adapter *adapter)
u16 def_vlan, lnk_speed;
int status, old_vfs, vf;
struct device *dev = &adapter->pdev->dev;
+ u32 privileges;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -2888,10 +2873,10 @@ static int be_vf_setup(struct be_adapter *adapter)
dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
adapter->num_vfs = old_vfs;
} else {
- if (num_vfs > adapter->dev_num_vfs)
+ if (num_vfs > be_max_vfs(adapter))
dev_info(dev, "Device supports %d VFs and not %d\n",
- adapter->dev_num_vfs, num_vfs);
- adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
+ be_max_vfs(adapter), num_vfs);
+ adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
if (!adapter->num_vfs)
return 0;
}
@@ -2923,6 +2908,18 @@ static int be_vf_setup(struct be_adapter *adapter)
}
for_all_vfs(adapter, vf_cfg, vf) {
+ /* Allow VFs to programs MAC/VLAN filters */
+ status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
+ if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter,
+ privileges |
+ BE_PRIV_FILTMGMT,
+ vf + 1);
+ if (!status)
+ dev_info(dev, "VF%d has FILTMGMT privilege\n",
+ vf);
+ }
+
/* BE3 FW, by default, caps VF TX-rate to 100mbps.
* Allow full available bandwidth
*/
@@ -2935,7 +2932,7 @@ static int be_vf_setup(struct be_adapter *adapter)
vf_cfg->tx_rate = lnk_speed;
status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle);
+ vf + 1, vf_cfg->if_handle, NULL);
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
@@ -2958,6 +2955,53 @@ err:
return status;
}
+/* On BE2/BE3 FW does not suggest the supported limits */
+static void BEx_get_resources(struct be_adapter *adapter,
+ struct be_resources *res)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ bool use_sriov = false;
+
+ if (BE3_chip(adapter) && be_physfn(adapter)) {
+ int max_vfs;
+
+ max_vfs = pci_sriov_get_totalvfs(pdev);
+ res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+ use_sriov = res->max_vfs && num_vfs;
+ }
+
+ if (be_physfn(adapter))
+ res->max_uc_mac = BE_UC_PMAC_COUNT;
+ else
+ res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
+
+ if (adapter->function_mode & FLEX10_MODE)
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else if (adapter->function_mode & UMC_ENABLED)
+ res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
+ else
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ res->max_mcast_mac = BE_MAX_MC;
+
+ if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
+ !be_physfn(adapter))
+ res->max_tx_qs = 1;
+ else
+ res->max_tx_qs = BE3_MAX_TX_QS;
+
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !use_sriov && be_physfn(adapter))
+ res->max_rss_qs = (adapter->be3_native) ?
+ BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ res->max_rx_qs = res->max_rss_qs + 1;
+
+ res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+
+ res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
+ if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
+ res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
+}
+
static void be_setup_init(struct be_adapter *adapter)
{
adapter->vlan_prio_bmap = 0xff;
@@ -2971,118 +3015,56 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->cmd_privileges = MIN_PRIVILEGES;
}
-static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
- bool *active_mac, u32 *pmac_id)
+static int be_get_resources(struct be_adapter *adapter)
{
- int status = 0;
-
- if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
- if (!lancer_chip(adapter) && !be_physfn(adapter))
- *active_mac = true;
- else
- *active_mac = false;
+ struct device *dev = &adapter->pdev->dev;
+ struct be_resources res = {0};
+ int status;
- return status;
+ if (BEx_chip(adapter)) {
+ BEx_get_resources(adapter, &res);
+ adapter->res = res;
}
- if (lancer_chip(adapter)) {
- status = be_cmd_get_mac_from_list(adapter, mac,
- active_mac, pmac_id, 0);
- if (*active_mac) {
- status = be_cmd_mac_addr_query(adapter, mac, false,
- if_handle, *pmac_id);
- }
- } else if (be_physfn(adapter)) {
- /* For BE3, for PF get permanent MAC */
- status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
- *active_mac = false;
- } else {
- /* For BE3, for VF get soft MAC assigned by PF*/
- status = be_cmd_mac_addr_query(adapter, mac, false,
- if_handle, 0);
- *active_mac = true;
+ /* For BE3 only check if FW suggests a different max-txqs value */
+ if (BE3_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res, 0);
+ if (!status && res.max_tx_qs)
+ adapter->res.max_tx_qs =
+ min(adapter->res.max_tx_qs, res.max_tx_qs);
}
- return status;
-}
-
-static void be_get_resources(struct be_adapter *adapter)
-{
- u16 dev_num_vfs;
- int pos, status;
- bool profile_present = false;
- u16 txq_count = 0;
+ /* For Lancer, SH etc read per-function resource limits from FW.
+ * GET_FUNC_CONFIG returns per function guaranteed limits.
+ * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
+ */
if (!BEx_chip(adapter)) {
- status = be_cmd_get_func_config(adapter);
- if (!status)
- profile_present = true;
- } else if (BE3_chip(adapter) && be_physfn(adapter)) {
- be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
- }
-
- if (profile_present) {
- /* Sanity fixes for Lancer */
- adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
- BE_UC_PMAC_COUNT);
- adapter->max_vlans = min_t(u16, adapter->max_vlans,
- BE_NUM_VLANS_SUPPORTED);
- adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
- BE_MAX_MC);
- adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
- MAX_TX_QS);
- adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
- BE3_MAX_RSS_QS);
- adapter->max_event_queues = min_t(u16,
- adapter->max_event_queues,
- BE3_MAX_RSS_QS);
-
- if (adapter->max_rss_queues &&
- adapter->max_rss_queues == adapter->max_rx_queues)
- adapter->max_rss_queues -= 1;
-
- if (adapter->max_event_queues < adapter->max_rss_queues)
- adapter->max_rss_queues = adapter->max_event_queues;
-
- } else {
- if (be_physfn(adapter))
- adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
- else
- adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
-
- if (adapter->function_mode & FLEX10_MODE)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ status = be_cmd_get_func_config(adapter, &res);
+ if (status)
+ return status;
- adapter->max_mcast_mac = BE_MAX_MC;
- adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
- adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
- MAX_TX_QS);
- adapter->max_rss_queues = (adapter->be3_native) ?
- BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
- adapter->max_event_queues = BE3_MAX_RSS_QS;
+ /* If RoCE may be enabled stash away half the EQs for RoCE */
+ if (be_roce_supported(adapter))
+ res.max_evt_qs /= 2;
+ adapter->res = res;
- adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS;
+ if (be_physfn(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res, 0);
+ if (status)
+ return status;
+ adapter->res.max_vfs = res.max_vfs;
+ }
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
- adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
+ dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
+ be_max_txqs(adapter), be_max_rxqs(adapter),
+ be_max_rss(adapter), be_max_eqs(adapter),
+ be_max_vfs(adapter));
+ dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
+ be_max_uc(adapter), be_max_mc(adapter),
+ be_max_vlans(adapter));
}
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
- &dev_num_vfs);
- if (BE3_chip(adapter))
- dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
- adapter->dev_num_vfs = dev_num_vfs;
- }
+ return 0;
}
/* Routine to query per function resource limits */
@@ -3095,100 +3077,171 @@ static int be_get_config(struct be_adapter *adapter)
&adapter->function_caps,
&adapter->asic_rev);
if (status)
- goto err;
+ return status;
- be_get_resources(adapter);
+ status = be_get_resources(adapter);
+ if (status)
+ return status;
/* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
- sizeof(u32), GFP_KERNEL);
- if (!adapter->pmac_id) {
- status = -ENOMEM;
- goto err;
- }
+ adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
+ GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
-err:
- return status;
+ /* Sanitize cfg_num_qs based on HW and platform limits */
+ adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
+
+ return 0;
}
-static int be_setup(struct be_adapter *adapter)
+static int be_mac_setup(struct be_adapter *adapter)
{
- struct device *dev = &adapter->pdev->dev;
- u32 en_flags;
- u32 tx_fc, rx_fc;
- int status;
u8 mac[ETH_ALEN];
- bool active_mac;
+ int status;
- be_setup_init(adapter);
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ status = be_cmd_get_perm_mac(adapter, mac);
+ if (status)
+ return status;
- if (!lancer_chip(adapter))
- be_cmd_req_native_mode(adapter);
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ } else {
+ /* Maybe the HW was reset; dev_addr must be re-programmed */
+ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+ }
- status = be_get_config(adapter);
+ /* On BE3 VFs this cmd may fail due to lack of privilege.
+ * Ignore the failure as in this case pmac_id is fetched
+ * in the IFACE_CREATE cmd.
+ */
+ be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ return 0;
+}
+
+static void be_schedule_worker(struct be_adapter *adapter)
+{
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+}
+
+static int be_setup_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ status = be_evt_queues_create(adapter);
if (status)
goto err;
- status = be_msix_enable(adapter);
+ status = be_tx_qs_create(adapter);
if (status)
goto err;
- status = be_evt_queues_create(adapter);
+ status = be_rx_cqs_create(adapter);
if (status)
goto err;
- status = be_tx_cqs_create(adapter);
+ status = be_mcc_queues_create(adapter);
if (status)
goto err;
- status = be_rx_cqs_create(adapter);
+ status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
if (status)
goto err;
- status = be_mcc_queues_create(adapter);
+ status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
if (status)
goto err;
- be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
- /* In UMC mode FW does not return right privileges.
- * Override with correct privilege equivalent to PF.
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev, "queue_setup failed\n");
+ return status;
+}
+
+int be_update_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (netif_running(netdev))
+ be_close(netdev);
+
+ be_cancel_worker(adapter);
+
+ /* If any vectors have been shared with RoCE we cannot re-program
+ * the MSIx table.
*/
- if (be_is_mc(adapter))
- adapter->cmd_privileges = MAX_PRIVILEGES;
+ if (!adapter->num_msix_roce_vec)
+ be_msix_disable(adapter);
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ be_clear_queues(adapter);
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
- en_flags |= BE_IF_FLAGS_RSS;
+ if (!msix_enabled(adapter)) {
+ status = be_msix_enable(adapter);
+ if (status)
+ return status;
+ }
+
+ status = be_setup_queues(adapter);
+ if (status)
+ return status;
- en_flags = en_flags & adapter->if_cap_flags;
+ be_schedule_worker(adapter);
- status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
- &adapter->if_handle, 0);
- if (status != 0)
+ if (netif_running(netdev))
+ status = be_open(netdev);
+
+ return status;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 tx_fc, rx_fc, en_flags;
+ int status;
+
+ be_setup_init(adapter);
+
+ if (!lancer_chip(adapter))
+ be_cmd_req_native_mode(adapter);
+
+ status = be_get_config(adapter);
+ if (status)
goto err;
- memset(mac, 0, ETH_ALEN);
- active_mac = false;
- status = be_get_mac_addr(adapter, mac, adapter->if_handle,
- &active_mac, &adapter->pmac_id[0]);
- if (status != 0)
+ status = be_msix_enable(adapter);
+ if (status)
goto err;
- if (!active_mac) {
- status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
- if (status != 0)
- goto err;
- }
+ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
+ en_flags |= BE_IF_FLAGS_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
+ if (status)
+ goto err;
- if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+ rtnl_lock();
+ status = be_setup_queues(adapter);
+ rtnl_unlock();
+ if (status)
+ goto err;
- status = be_tx_qs_create(adapter);
+ be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (be_is_mc(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+
+ status = be_mac_setup(adapter);
if (status)
goto err;
@@ -3205,8 +3258,8 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter)) {
- if (adapter->dev_num_vfs)
+ if (be_physfn(adapter) && num_vfs) {
+ if (be_max_vfs(adapter))
be_vf_setup(adapter);
else
dev_warn(dev, "device doesn't support SRIOV\n");
@@ -3216,8 +3269,7 @@ static int be_setup(struct be_adapter *adapter)
if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
- adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+ be_schedule_worker(adapter);
return 0;
err:
be_clear(adapter);
@@ -3241,7 +3293,7 @@ static void be_netpoll(struct net_device *netdev)
#endif
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -3298,7 +3350,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
-struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
+static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
int header_size,
const struct firmware *fw)
{
@@ -3760,6 +3812,74 @@ fw_exit:
return status;
}
+static int be_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem;
+ int status = 0;
+ u16 mode = 0;
+
+ if (!sriov_enabled(adapter))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
+
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ mode == BRIDGE_MODE_VEPA ?
+ PORT_FWD_TYPE_VEPA :
+ PORT_FWD_TYPE_VEB);
+ if (status)
+ goto err;
+
+ dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+ }
+err:
+ dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+}
+
+static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 filter_mask)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ int status = 0;
+ u8 hsw_mode;
+
+ if (!sriov_enabled(adapter))
+ return 0;
+
+ /* BE and Lancer chips support VEB mode only */
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ hsw_mode = PORT_FWD_TYPE_VEB;
+ } else {
+ status = be_cmd_get_hsw_config(adapter, NULL, 0,
+ adapter->if_handle, &hsw_mode);
+ if (status)
+ return 0;
+ }
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ hsw_mode == PORT_FWD_TYPE_VEPA ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -3778,13 +3898,13 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
+ .ndo_bridge_setlink = be_ndo_bridge_setlink,
+ .ndo_bridge_getlink = be_ndo_bridge_getlink,
};
static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *eqo;
- int i;
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
@@ -3807,9 +3927,6 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
-
- for_all_evt_queues(adapter, eqo, i)
- netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -3916,9 +4033,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- &rx_filter->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
+ rx_filter->size, &rx_filter->dma,
+ GFP_KERNEL);
if (rx_filter->va == NULL) {
status = -ENOMEM;
goto free_mbox;
@@ -3964,8 +4081,8 @@ static int be_stats_init(struct be_adapter *adapter)
/* BE3 and Skyhawk */
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
- cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL | __GFP_ZERO);
+ cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
if (cmd->va == NULL)
return -1;
return 0;
@@ -4072,6 +4189,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
level = be_get_fw_log_level(adapter);
adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ adapter->cfg_num_qs = netif_get_num_default_rss_queues();
return 0;
}
@@ -4164,7 +4282,8 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
- if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ if (be_physfn(adapter) &&
+ MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
be_cmd_get_die_temperature(adapter);
for_all_rx_queues(adapter, rxo, i) {
@@ -4253,7 +4372,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
status = pci_enable_pcie_error_reporting(pdev);
if (status)
- dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
+ dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
status = be_ctrl_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index f3d126dcc10..9cd5415fe01 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -60,7 +60,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
*/
num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
- dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS);
+ dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS);
/* provide start index of the vector,
* so in case of linear usage,
* it can use the base as starting point.
@@ -93,7 +93,7 @@ void be_roce_dev_add(struct be_adapter *adapter)
}
}
-void _be_roce_dev_remove(struct be_adapter *adapter)
+static void _be_roce_dev_remove(struct be_adapter *adapter)
{
if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
ocrdma_drv->remove(adapter->ocrdma_dev);
@@ -110,7 +110,7 @@ void be_roce_dev_remove(struct be_adapter *adapter)
}
}
-void _be_roce_dev_open(struct be_adapter *adapter)
+static void _be_roce_dev_open(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
@@ -126,7 +126,7 @@ void be_roce_dev_open(struct be_adapter *adapter)
}
}
-void _be_roce_dev_close(struct be_adapter *adapter)
+static void _be_roce_dev_close(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 27657299846..2cd1129e19a 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -29,7 +29,7 @@ enum be_interrupt_mode {
BE_INTERRUPT_MODE_MSI = 2,
};
-#define MAX_ROCE_MSIX_VECTORS 16
+#define MAX_MSIX_VECTORS 32
struct be_dev_info {
u8 __iomem *db;
u64 unmapped_db;
@@ -45,7 +45,7 @@ struct be_dev_info {
struct {
int num_vectors;
int start_vector;
- u32 vector_list[MAX_ROCE_MSIX_VECTORS];
+ u32 vector_list[MAX_MSIX_VECTORS];
} msix;
};
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index cf579fb39bc..4de8cfd149c 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1030,8 +1030,8 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (pdev->dev.platform_data) {
- struct ethoc_platform_data *pdata = pdev->dev.platform_data;
+ if (dev_get_platdata(&pdev->dev)) {
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 934e1ae279f..212f44b3a77 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -778,10 +778,9 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev,
- sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ priv->descs = dma_zalloc_coherent(priv->dev,
+ sizeof(struct ftgmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 4658f4cc196..8be5b40c0a1 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,10 +732,10 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev,
- sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ priv->descs = dma_zalloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr,
+ GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ae236009f1a..0120217a16d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -296,6 +296,9 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
+
struct platform_device *pdev;
int opened;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index c610a2716be..b2793b91cc5 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -69,7 +69,6 @@ static void set_multicast_list(struct net_device *ndev);
#endif
#define DRIVER_NAME "fec"
-#define FEC_NAPI_WEIGHT 64
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
@@ -239,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
static int mii_cnt;
-static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
+static inline
+struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{
- struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
- if (is_ex)
- return (struct bufdesc *)(ex + 1);
+ struct bufdesc *new_bd = bdp + 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= fep->tx_bd_base) {
+ base = fep->tx_bd_base;
+ ring_size = fep->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ } else {
+ base = fep->rx_bd_base;
+ ring_size = fep->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
+ ex_base : ex_new_bd);
else
- return bdp + 1;
+ return (new_bd >= (base + ring_size)) ?
+ base : new_bd;
}
-static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
+static inline
+struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{
- struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
- if (is_ex)
- return (struct bufdesc *)(ex - 1);
+ struct bufdesc *new_bd = bdp - 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= fep->tx_bd_base) {
+ base = fep->tx_bd_base;
+ ring_size = fep->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ } else {
+ base = fep->rx_bd_base;
+ ring_size = fep->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd < ex_base) ?
+ (ex_new_bd + ring_size) : ex_new_bd);
else
- return bdp - 1;
+ return (new_bd < base) ? (new_bd + ring_size) : new_bd;
}
static void *swap_buffer(void *bufaddr, int len)
@@ -380,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
- bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp_pre = fec_enet_get_prevdesc(bdp, fep);
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
fep->delay_work.trig_tx = true;
@@ -389,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
/* If this was the last BD in the ring, start at the beginning again. */
- if (status & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
fep->cur_tx = bdp;
@@ -417,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY;
else
bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
fep->cur_rx = fep->rx_bd_base;
@@ -436,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < fep->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
@@ -445,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
fep->tx_skbuff[i] = NULL;
}
bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
}
@@ -510,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
- * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
else
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
- * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@@ -727,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
bdp = fep->dirty_tx;
/* get next bdp of dirty_tx */
- if (bdp->cbd_sc & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
@@ -800,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
- if (status & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
/* Since we have freed up a buffer, the ring is no longer full
*/
@@ -993,10 +1018,8 @@ rx_processing_done:
}
/* Update BD pointer to next entry */
- if (status & BD_ENET_RX_WRAP)
- bdp = fep->rx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
@@ -1059,7 +1082,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
unsigned char *iap, tmpaddr[ETH_ALEN];
/*
@@ -1099,10 +1122,10 @@ static void fec_get_mac(struct net_device *ndev)
* 4) FEC mac registers set by bootloader
*/
if (!is_valid_ether_addr(iap)) {
- *((unsigned long *) &tmpaddr[0]) =
- be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
- *((unsigned short *) &tmpaddr[4]) =
- be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ *((__be32 *) &tmpaddr[0]) =
+ cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
+ *((__be16 *) &tmpaddr[4]) =
+ cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
@@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct bufdesc *bdp;
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
skb = fep->rx_skbuff[i];
if (bdp->cbd_bufaddr)
@@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++)
+ for (i = 0; i < fep->tx_ring_size; i++)
kfree(fep->tx_bounce[i]);
}
@@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
struct bufdesc *bdp;
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb) {
fec_enet_free_buffers(ndev);
@@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < fep->tx_ring_size; i++) {
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
bdp->cbd_sc = 0;
@@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
/* Get the Ethernet address */
fec_get_mac(ndev);
+ /* init the tx & rx ring size */
+ fep->tx_ring_size = TX_RING_SIZE;
+ fep->rx_ring_size = RX_RING_SIZE;
+
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
if (fep->bufdesc_ex)
fep->tx_bd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
+ (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
else
- fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+ fep->tx_bd_base = cbd_base + fep->rx_ring_size;
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1980,7 +2007,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
/* enable hw VLAN support */
@@ -2055,10 +2082,6 @@ fec_probe(struct platform_device *pdev)
if (of_id)
pdev->id_entry = of_id->data;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
-
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
if (!ndev)
@@ -2076,6 +2099,7 @@ fec_probe(struct platform_device *pdev)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fep->hwp = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(fep->hwp)) {
ret = PTR_ERR(fep->hwp);
@@ -2091,7 +2115,7 @@ fec_probe(struct platform_device *pdev)
ret = of_get_phy_mode(pdev->dev.of_node);
if (ret < 0) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata)
fep->phy_interface = pdata->phy;
else
@@ -2125,10 +2149,25 @@ fec_probe(struct platform_device *pdev)
fep->bufdesc_ex = 0;
}
- clk_prepare_enable(fep->clk_ahb);
- clk_prepare_enable(fep->clk_ipg);
- clk_prepare_enable(fep->clk_enet_out);
- clk_prepare_enable(fep->clk_ptp);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk;
+
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
@@ -2159,14 +2198,10 @@ fec_probe(struct platform_device *pdev)
ret = irq;
goto failed_irq;
}
- ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
- if (ret) {
- while (--i >= 0) {
- irq = platform_get_irq(pdev, i);
- free_irq(irq, ndev);
- }
+ ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
+ 0, pdev->name, ndev);
+ if (ret)
goto failed_irq;
- }
}
ret = fec_enet_mii_init(pdev);
@@ -2190,19 +2225,19 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
failed_clk:
failed_ioremap:
free_netdev(ndev);
@@ -2215,25 +2250,21 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- int i;
cancel_delayed_work_sync(&(fep->delay_work.delay_work));
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
del_timer_sync(&fep->time_keep);
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- int irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- clk_disable_unprepare(fep->clk_ptp);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ipg);
+ clk_disable_unprepare(fep->clk_ahb);
free_netdev(ndev);
return 0;
@@ -2250,9 +2281,12 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ipg);
+ clk_disable_unprepare(fep->clk_ahb);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -2273,15 +2307,44 @@ fec_resume(struct device *dev)
return ret;
}
- clk_prepare_enable(fep->clk_enet_out);
- clk_prepare_enable(fep->clk_ahb);
- clk_prepare_enable(fep->clk_ipg);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk_ahb;
+
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
+
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
}
return 0;
+
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 360a578c2bb..e0528900db0 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -123,12 +123,10 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
- struct device *dev = &of->dev;
- struct mii_bus *bus = dev_get_drvdata(dev);
+ struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
- dev_set_drvdata(dev, NULL);
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 8de53a14a6f..6b60582ce8c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -583,7 +583,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct sk_buff *skb)
{
struct sk_buff *new_skb;
- struct fs_enet_private *fep = netdev_priv(dev);
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
@@ -1000,6 +999,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
struct fs_enet_private *fep;
struct fs_platform_info *fpi;
const u32 *data;
+ struct clk *clk;
+ int err;
const u8 *mac_addr;
const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
@@ -1037,6 +1038,20 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->use_rmii = 1;
}
+ /* make clock lookup non-fatal (the driver is shared among platforms),
+ * but require enable to succeed when a clock was specified/found,
+ * keep a reference to the clock upon successful acquisition
+ */
+ clk = devm_clk_get(&ofdev->dev, "per");
+ if (!IS_ERR(clk)) {
+ err = clk_prepare_enable(clk);
+ if (err) {
+ ret = err;
+ goto out_free_fpi;
+ }
+ fpi->clk_per = clk;
+ }
+
privsize = sizeof(*fep) +
sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring);
@@ -1108,6 +1123,8 @@ out_free_dev:
free_netdev(ndev);
out_put:
of_node_put(fpi->phy_node);
+ if (fpi->clk_per)
+ clk_disable_unprepare(fpi->clk_per);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1124,6 +1141,8 @@ static int fs_enet_remove(struct platform_device *ofdev)
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
of_node_put(fep->fpi->phy_node);
+ if (fep->fpi->clk_per)
+ clk_disable_unprepare(fep->fpi->clk_per);
free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c93a05654b4..c4f65067cf7 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -409,7 +409,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
priv->regs = priv->map + data->mii_offset;
new_bus->parent = &pdev->dev;
- dev_set_drvdata(&pdev->dev, new_bus);
+ platform_set_drvdata(pdev, new_bus);
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
@@ -468,8 +468,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
- dev_set_drvdata(device, NULL);
-
iounmap(priv->map);
mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 8d2db7b808b..c4eaadeb572 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np,
return -EINVAL;
}
- grp->grp_id = priv->num_grps;
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
@@ -1017,7 +1016,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* We need to delay at least 3 TX clocks */
udelay(2);
- tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval = 0;
+ if (!priv->pause_aneg_en && priv->tx_pause_en)
+ tempval |= MACCFG1_TX_FLOW;
+ if (!priv->pause_aneg_en && priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
gfar_write(&regs->maccfg1, tempval);
/* Initialize MACCFG2. */
@@ -1461,7 +1467,7 @@ static int init_phy(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
uint gigabit_support =
priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- SUPPORTED_1000baseT_Full : 0;
+ GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
priv->oldlink = 0;
@@ -2052,6 +2058,24 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
return skip_txbd(bdp, 1, base, ring_size);
}
+/* eTSEC12: csum generation not supported for some fcb offsets */
+static inline bool gfar_csum_errata_12(struct gfar_private *priv,
+ unsigned long fcb_addr)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ (fcb_addr % 0x20) > 0x18);
+}
+
+/* eTSEC76: csum generation for frames larger than 2500 may
+ * cause excess delays before start of transmission
+ */
+static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+ unsigned int len)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
+ (len > 2500));
+}
+
/* This is called by the kernel when a frame is ready for transmission.
* It is pointed to by the dev->hard_start_xmit function pointer
*/
@@ -2064,23 +2088,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct txfcb *fcb = NULL;
struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
- int i, rq = 0, do_tstamp = 0;
+ int i, rq = 0;
+ int do_tstamp, do_csum, do_vlan;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
-
- /* TOE=1 frames larger than 2500 bytes may see excess delays
- * before start of transmission.
- */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->len > 2500)) {
- int ret;
-
- ret = skb_checksum_help(skb);
- if (ret)
- return ret;
- }
+ unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
@@ -2088,21 +2100,23 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
+ do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
+ do_vlan = vlan_tx_tag_present(skb);
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
+
+ if (do_csum || do_vlan)
+ fcb_len = GMAC_FCB_LEN;
+
/* check if time stamp should be generated */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en)) {
- do_tstamp = 1;
- fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
- }
+ if (unlikely(do_tstamp))
+ fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- vlan_tx_tag_present(skb) ||
- unlikely(do_tstamp)) &&
- (skb_headroom(skb) < fcb_length)) {
+ if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
struct sk_buff *skb_new;
- skb_new = skb_realloc_headroom(skb, fcb_length);
+ skb_new = skb_realloc_headroom(skb, fcb_len);
if (!skb_new) {
dev->stats.tx_errors++;
kfree_skb(skb);
@@ -2133,7 +2147,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- tx_queue->stats.tx_bytes += skb->len;
+ bytes_sent = skb->len;
+ tx_queue->stats.tx_bytes += bytes_sent;
+ /* keep Tx bytes on wire for BQL accounting */
+ GFAR_CB(skb)->bytes_sent = bytes_sent;
tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
@@ -2153,12 +2170,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
+ unsigned int frag_len;
/* Point at the next BD, wrapping as needed */
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
- length = skb_shinfo(skb)->frags[i].size;
+ frag_len = skb_shinfo(skb)->frags[i].size;
- lstatus = txbdp->lstatus | length |
+ lstatus = txbdp->lstatus | frag_len |
BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
@@ -2168,7 +2186,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
bufaddr = skb_frag_dma_map(priv->dev,
&skb_shinfo(skb)->frags[i],
0,
- length,
+ frag_len,
DMA_TO_DEVICE);
/* set the TxBD length and buffer pointer */
@@ -2185,36 +2203,38 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(skb->data, 0, GMAC_TXPAL_LEN);
}
- /* Set up checksumming */
- if (CHECKSUM_PARTIAL == skb->ip_summed) {
+ /* Add TxFCB if required */
+ if (fcb_len) {
fcb = gfar_add_fcb(skb);
- /* as specified by errata */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
- ((unsigned long)fcb % 0x20) > 0x18)) {
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
+ /* Set up checksumming */
+ if (do_csum) {
+ gfar_tx_checksum(skb, fcb, fcb_len);
+
+ if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
+ unlikely(gfar_csum_errata_76(priv, skb->len))) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb);
- } else {
- lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_checksum(skb, fcb, fcb_length);
+ if (do_vlan || do_tstamp) {
+ /* put back a new fcb for vlan/tstamp TOE */
+ fcb = gfar_add_fcb(skb);
+ } else {
+ /* Tx TOE not used */
+ lstatus &= ~(BD_LFLAG(TXBD_TOE));
+ fcb = NULL;
+ }
}
}
- if (vlan_tx_tag_present(skb)) {
- if (unlikely(NULL == fcb)) {
- fcb = gfar_add_fcb(skb);
- lstatus |= BD_LFLAG(TXBD_TOE);
- }
-
+ if (do_vlan)
gfar_tx_vlan(skb, fcb);
- }
/* Setup tx hardware time stamping if requested */
if (unlikely(do_tstamp)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- if (fcb == NULL)
- fcb = gfar_add_fcb(skb);
fcb->ptp = 1;
- lstatus |= BD_LFLAG(TXBD_TOE);
}
txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
@@ -2226,15 +2246,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the full frame length.
*/
if (unlikely(do_tstamp)) {
- txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_length);
+ (skb_headlen(skb) - fcb_len);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
- netdev_tx_sent_queue(txq, skb->len);
+ netdev_tx_sent_queue(txq, bytes_sent);
/* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
@@ -2554,7 +2574,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
}
- bytes_sent += skb->len;
+ bytes_sent += GFAR_CB(skb)->bytes_sent;
dev_kfree_skb_any(skb);
@@ -3014,6 +3034,41 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
return IRQ_HANDLED;
}
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+ u32 val = 0;
+
+ if (!phydev->duplex)
+ return val;
+
+ if (!priv->pause_aneg_en) {
+ if (priv->tx_pause_en)
+ val |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ val |= MACCFG1_RX_FLOW;
+ } else {
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_TX)
+ val |= MACCFG1_TX_FLOW;
+ if (flowctrl & FLOW_CTRL_RX)
+ val |= MACCFG1_RX_FLOW;
+ }
+
+ return val;
+}
+
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the phydev structure, and this
@@ -3032,6 +3087,7 @@ static void adjust_link(struct net_device *dev)
lock_tx_qs(priv);
if (phydev->link) {
+ u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -3080,6 +3136,10 @@ static void adjust_link(struct net_device *dev)
priv->oldspeed = phydev->speed;
}
+ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+ gfar_write(&regs->maccfg1, tempval1);
gfar_write(&regs->maccfg2, tempval);
gfar_write(&regs->ecntrl, ecntrl);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04b552cd419..04112b98ff5 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -146,6 +146,10 @@ extern const char gfar_driver_version[];
| SUPPORTED_Autoneg \
| SUPPORTED_MII)
+#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
+ | SUPPORTED_Pause \
+ | SUPPORTED_Asym_Pause)
+
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -571,7 +575,7 @@ struct rxfcb {
};
struct gianfar_skb_cb {
- int alignamount;
+ unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */
};
#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
@@ -1009,7 +1013,6 @@ struct gfar_irqinfo {
* @napi: the napi poll function
* @priv: back pointer to the priv structure
* @regs: the ioremapped register space for this group
- * @grp_id: group id for this group
* @irqinfo: TX/RX/ER irq data for this group
*/
@@ -1018,11 +1021,10 @@ struct gfar_priv_grp {
struct napi_struct napi;
struct gfar_private *priv;
struct gfar __iomem *regs;
- unsigned int grp_id;
+ unsigned int rstat;
unsigned long num_rx_queues;
unsigned long rx_bit_map;
/* cacheline 3 */
- unsigned int rstat;
unsigned int tstat;
unsigned long num_tx_queues;
unsigned long tx_bit_map;
@@ -1102,7 +1104,11 @@ struct gfar_private {
/* Wake-on-LAN enabled */
wol_en:1,
/* Enable priorty based Tx scheduling in Hw */
- prio_sched_en:1;
+ prio_sched_en:1,
+ /* Flow control flags */
+ pause_aneg_en:1,
+ tx_pause_en:1,
+ rx_pause_en:1;
/* The total tx and rx ring size for the enabled queues */
unsigned int total_tx_ring_size;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 21cd88124ca..d3d7ede27ef 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -535,6 +535,78 @@ static int gfar_sringparam(struct net_device *dev,
return err;
}
+static void gfar_gpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ epause->autoneg = !!priv->pause_aneg_en;
+ epause->rx_pause = !!priv->rx_pause_en;
+ epause->tx_pause = !!priv->tx_pause_en;
+}
+
+static int gfar_spauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 oldadv, newadv;
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ priv->rx_pause_en = priv->tx_pause_en = 0;
+ if (epause->rx_pause) {
+ priv->rx_pause_en = 1;
+
+ if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTRL_RX & TX */
+ newadv = ADVERTISED_Pause;
+ } else /* FLOW_CTLR_RX */
+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTLR_TX */
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ priv->pause_aneg_en = 1;
+ else
+ priv->pause_aneg_en = 0;
+
+ oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg)
+ /* inform link partner of our
+ * new flow ctrl settings
+ */
+ return phy_start_aneg(phydev);
+
+ if (!epause->autoneg) {
+ u32 tempval;
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ if (priv->tx_pause_en)
+ tempval |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ gfar_write(&regs->maccfg1, tempval);
+ }
+ }
+
+ return 0;
+}
+
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1806,6 +1878,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_coalesce = gfar_scoalesce,
.get_ringparam = gfar_gringparam,
.set_ringparam = gfar_sringparam,
+ .get_pauseparam = gfar_gpauseparam,
+ .set_pauseparam = gfar_spauseparam,
.get_strings = gfar_gstrings,
.get_sset_count = gfar_sset_count,
.get_ethtool_stats = gfar_fill_stats,
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 098f133908a..e006a09ba89 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -452,7 +452,9 @@ static int gianfar_ptp_probe(struct platform_device *dev)
err = -ENODEV;
etsects->caps = ptp_gianfar_caps;
- etsects->cksel = DEFAULT_CKSEL;
+
+ if (get_of_u32(node, "fsl,cksel", &etsects->cksel))
+ etsects->cksel = DEFAULT_CKSEL;
if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) ||
get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3c43dac894e..5930c39672d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3911,14 +3911,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
static int ucc_geth_remove(struct platform_device* ofdev)
{
- struct device *device = &ofdev->dev;
- struct net_device *dev = dev_get_drvdata(device);
+ struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
- dev_set_drvdata(device, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index e3c7c697fc4..91227d03274 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1097,7 +1097,7 @@ static int hp100_open(struct net_device *dev)
/* New: if bus is PCI or EISA, interrupts might be shared interrupts */
if (request_irq(dev->irq, hp100_interrupt,
lp->bus == HP100_BUS_PCI || lp->bus ==
- HP100_BUS_EISA ? IRQF_SHARED : IRQF_DISABLED,
+ HP100_BUS_EISA ? IRQF_SHARED : 0,
"hp100", dev)) {
printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
return -EAGAIN;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 93346f00486..79aef681ac8 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -133,8 +133,8 @@ struct rfd_struct
unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
unsigned short next; /* linkoffset to next RFD */
unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
- unsigned char dest[6]; /* ethernet-address, destination */
- unsigned char source[6]; /* ethernet-address, source */
+ unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */
+ unsigned char source[ETH_ALEN]; /* ethernet-address, source */
unsigned short length; /* 802.3 frame-length */
unsigned short zero_dummy; /* dummy */
};
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 35853b43d66..2d1c6bdd361 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -102,6 +102,19 @@ static int ehea_probe_adapter(struct platform_device *dev);
static int ehea_remove(struct platform_device *dev);
+static struct of_device_id ehea_module_device_table[] = {
+ {
+ .name = "lhea",
+ .compatible = "IBM,lhea",
+ },
+ {
+ .type = "network",
+ .compatible = "IBM,lhea-ethernet",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ehea_module_device_table);
+
static struct of_device_id ehea_device_table[] = {
{
.name = "lhea",
@@ -109,7 +122,6 @@ static struct of_device_id ehea_device_table[] = {
},
{},
};
-MODULE_DEVICE_TABLE(of, ehea_device_table);
static struct platform_driver ehea_driver = {
.driver = {
@@ -1285,7 +1297,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
ehea_qp_aff_irq_handler,
- IRQF_DISABLED, port->int_aff_name, port);
+ 0, port->int_aff_name, port);
if (ret) {
netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
port->qp_eq->attr.ist1);
@@ -1303,8 +1315,7 @@ static int ehea_reg_interrupts(struct net_device *dev)
"%s-queue%d", dev->name, i);
ret = ibmebus_request_irq(pr->eq->attr.ist1,
ehea_recv_irq_handler,
- IRQF_DISABLED, pr->int_send_name,
- pr);
+ 0, pr->int_send_name, pr);
if (ret) {
netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
i, pr->eq->attr.ist1);
@@ -3320,7 +3331,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
}
ret = ibmebus_request_irq(adapter->neq->attr.ist1,
- ehea_interrupt_neq, IRQF_DISABLED,
+ ehea_interrupt_neq, 0,
"ehea_neq", adapter);
if (ret) {
dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index d300a0c0eaf..6b5c7222342 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2312,7 +2312,7 @@ static int emac_check_deps(struct emac_instance *dev,
if (deps[i].ofdev == NULL)
continue;
if (deps[i].drvdata == NULL)
- deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
+ deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
if (deps[i].drvdata != NULL)
there++;
}
@@ -2799,9 +2799,9 @@ static int emac_probe(struct platform_device *ofdev)
/* display more info about what's missing ? */
goto err_reg_unmap;
}
- dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
+ dev->mal = platform_get_drvdata(dev->mal_dev);
if (dev->mdio_dev != NULL)
- dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
+ dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
/* Register with MAL */
dev->commac.ops = &emac_commac_ops;
@@ -2892,7 +2892,7 @@ static int emac_probe(struct platform_device *ofdev)
* fully initialized
*/
wmb();
- dev_set_drvdata(&ofdev->dev, dev);
+ platform_set_drvdata(ofdev, dev);
/* There's a new kid in town ! Let's tell everybody */
wake_up_all(&emac_probe_wait);
@@ -2951,12 +2951,10 @@ static int emac_probe(struct platform_device *ofdev)
static int emac_remove(struct platform_device *ofdev)
{
- struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct emac_instance *dev = platform_get_drvdata(ofdev);
DBG(dev, "remove" NL);
- dev_set_drvdata(&ofdev->dev, NULL);
-
unregister_netdev(dev->ndev);
cancel_work_sync(&dev->reset_work);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 856ea66c922..dac564c2544 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,8 +637,8 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL | __GFP_ZERO);
+ mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 70fd5596884..5d41aee69d1 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -106,7 +106,7 @@ struct ibmveth_stat ibmveth_stats[] = {
/* simple methods of getting data from the current rxq entry */
static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
{
- return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
+ return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
}
static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
@@ -132,7 +132,7 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
- return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
+ return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
}
static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 43a794fab9f..84066bafe05 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -164,14 +164,26 @@ struct ibmveth_adapter {
u64 tx_send_failed;
};
+/*
+ * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
+ * so we don't need to byteswap the two elements. However since we use
+ * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
+ * do end up with endian specific ordering of the elements and that
+ * needs correcting.
+ */
struct ibmveth_buf_desc_fields {
+#ifdef __BIG_ENDIAN
+ u32 flags_len;
+ u32 address;
+#else
+ u32 address;
u32 flags_len;
+#endif
#define IBMVETH_BUF_VALID 0x80000000
#define IBMVETH_BUF_TOGGLE 0x40000000
#define IBMVETH_BUF_NO_CSUM 0x02000000
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
- u32 address;
};
union ibmveth_buf_desc {
@@ -180,7 +192,7 @@ union ibmveth_buf_desc {
};
struct ibmveth_rx_q_entry {
- u32 flags_off;
+ __be32 flags_off;
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
@@ -188,7 +200,8 @@ struct ibmveth_rx_q_entry {
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
- u32 length;
+ __be32 length;
+ /* correlator is only used by the OS, no need to byte swap */
u64 correlator;
};
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 1fde90b9668..bdf5023724e 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -1004,7 +1004,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
/* Check to see if the NIC has been initialized via nic_open,
* before trying to read statistic registers.
*/
- if (!test_bit(__LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
return &sp->stats;
sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index f0e7ed20a75..149ac85b5f9 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -241,4 +241,22 @@ config IXGBEVF
will be called ixgbevf. MSI-X interrupt support is required
for this driver to work correctly.
+config I40E
+ tristate "Intel(R) Ethernet Controller XL710 Family support"
+ depends on PCI
+ ---help---
+ This driver supports Intel(R) Ethernet Controller XL710 Family of
+ devices. For more information on how to identify your adapter, go
+ to the Adapter & Driver ID Guide at:
+
+ <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ To compile this driver as a module, choose M here. The module
+ will be called i40e.
+
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index c8210e68866..5bae933efc7 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
+obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5115ae76a5d..ada6e210279 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
}
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
+ c + 0);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
+ c + 8);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
+ c + 16);
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 82a967c9559..73a8aeefb92 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1019,8 +1019,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1077,8 +1077,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 4c303e2a7cb..8fed74e3fa5 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1011,6 +1011,11 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Must release MDIO ownership and mutex after MAC reset. */
switch (hw->mac.type) {
+ case e1000_82573:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82573(hw);
+ break;
case e1000_82574:
case e1000_82583:
/* Release mutex only if the hw semaphore is acquired */
@@ -2057,6 +2062,7 @@ const struct e1000_info e1000_82583_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ffbc08f56c4..ad0edd11015 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -90,9 +90,6 @@ struct e1000_info;
#define E1000_MNG_VLAN_NONE (-1)
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
-
#define DEFAULT_JUMBO 9234
/* Time to wait before putting the device into D3 if there's no link (in ms). */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 59c22bf1870..d14c8f53384 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev,
speed = adapter->link_speed;
ecmd->duplex = adapter->link_duplex - 1;
}
- } else {
+ } else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
@@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int ret_val = 0;
+
+ pm_runtime_get_sync(netdev->dev.parent);
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
@@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev,
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
/* MDI setting is only allowed when autoneg enabled because
@@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev,
* duplex is forced.
*/
if (ecmd->eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper)
- return -EOPNOTSUPP;
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = -EOPNOTSUPP;
+ goto out;
+ }
if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(ecmd->autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
}
@@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev,
u32 speed = ethtool_cmd_speed(ecmd);
/* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
- clear_bit(__E1000_RESETTING, &adapter->state);
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
}
@@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev,
e1000e_reset(adapter);
}
+out:
+ pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
- return 0;
+ return ret_val;
}
static void e1000_get_pauseparam(struct net_device *netdev,
@@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
@@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
}
out:
+ pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return retval;
}
@@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev,
u32 *regs_buff = p;
u16 phy_data;
+ pm_runtime_get_sync(netdev->dev.parent);
+
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
@@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev,
e1e_rphy(hw, MII_STAT1000, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+
+ pm_runtime_put_sync(netdev->dev.parent);
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
ret_val = e1000_read_nvm(hw, first_word,
last_word - first_word + 1,
@@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
}
}
+ pm_runtime_put_sync(netdev->dev.parent);
+
if (ret_val) {
/* a read error occurred, throw away the result */
memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
@@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000e_update_nvm_checksum(hw);
out:
+ pm_runtime_put_sync(netdev->dev.parent);
kfree(eeprom_buff);
return ret_val;
}
@@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
}
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
e1000e_down(adapter);
/* We can't just free everything and then setup again, because the
@@ -739,6 +764,7 @@ err_setup_rx:
e1000e_free_tx_resources(temp_tx);
err_setup:
e1000e_up(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
free_temp:
vfree(temp_tx);
vfree(temp_rx);
@@ -896,6 +922,14 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
else
mask &= ~(1 << 30);
}
+ if (mac->type == e1000_pch2lan) {
+ /* SHRAH[0,1,2] different than previous */
+ if (i == 7)
+ mask &= 0xFFF4FFFF;
+ /* SHRAH[3] different than SHRAH[0,1,2] */
+ if (i == 10)
+ mask |= (1 << 30);
+ }
REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
0xFFFFFFFF);
@@ -1639,7 +1673,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
- if (jiffies >= (time + 20)) {
+ if (time_after(jiffies, time + 20)) {
ret_val = 14; /* error code for time out error */
break;
}
@@ -1732,6 +1766,8 @@ static void e1000_diag_test(struct net_device *netdev,
u8 autoneg;
bool if_running = netif_running(netdev);
+ pm_runtime_get_sync(netdev->dev.parent);
+
set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
@@ -1817,6 +1853,8 @@ static void e1000_diag_test(struct net_device *netdev,
}
msleep_interruptible(4 * 1000);
+
+ pm_runtime_put_sync(netdev->dev.parent);
}
static void e1000_get_wol(struct net_device *netdev,
@@ -1891,6 +1929,8 @@ static int e1000_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (!hw->mac.ops.blink_led)
return 2; /* cycle on/off twice per second */
@@ -1902,6 +1942,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
hw->mac.ops.led_off(hw);
hw->mac.ops.cleanup_led(hw);
+ pm_runtime_put_sync(netdev->dev.parent);
break;
case ETHTOOL_ID_ON:
@@ -1912,6 +1953,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
hw->mac.ops.led_off(hw);
break;
}
+
return 0;
}
@@ -1950,11 +1992,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
adapter->itr_setting = adapter->itr & ~3;
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (adapter->itr_setting != 0)
e1000e_write_itr(adapter, adapter->itr);
else
e1000e_write_itr(adapter, 0);
+ pm_runtime_put_sync(netdev->dev.parent);
+
return 0;
}
@@ -1968,7 +2014,9 @@ static int e1000_nway_reset(struct net_device *netdev)
if (!adapter->hw.mac.autoneg)
return -EINVAL;
+ pm_runtime_get_sync(netdev->dev.parent);
e1000e_reinit_locked(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
return 0;
}
@@ -1982,7 +2030,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
int i;
char *p = NULL;
+ pm_runtime_get_sync(netdev->dev.parent);
+
e1000e_get_stats64(netdev, &net_stats);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
@@ -2033,7 +2086,11 @@ static int e1000_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXFH: {
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 mrqc = er32(MRQC);
+ u32 mrqc;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+ mrqc = er32(MRQC);
+ pm_runtime_put_sync(netdev->dev.parent);
if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
@@ -2096,9 +2153,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
+ if (ret_val) {
+ pm_runtime_put_sync(netdev->dev.parent);
return -EBUSY;
+ }
/* EEE Capability */
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2117,14 +2178,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+ if (ret_val)
+ goto release;
if (hw->phy.type == e1000_phy_82579)
phy_data <<= 8;
-release:
- hw->phy.ops.release(hw);
- if (ret_val)
- return -ENODATA;
-
/* Result of the EEE auto negotiation - there is no register that
* has the status of the EEE negotiation so do a best-guess based
* on whether Tx or Rx LPI indications have been received.
@@ -2136,7 +2194,14 @@ release:
edata->tx_lpi_enabled = true;
edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
- return 0;
+release:
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ ret_val = -ENODATA;
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ return ret_val;
}
static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
@@ -2169,12 +2234,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+ pm_runtime_get_sync(netdev->dev.parent);
+
/* reset the link */
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
e1000e_reset(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+
return 0;
}
@@ -2212,19 +2281,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
-static int e1000e_ethtool_begin(struct net_device *netdev)
-{
- return pm_runtime_get_sync(netdev->dev.parent);
-}
-
-static void e1000e_ethtool_complete(struct net_device *netdev)
-{
- pm_runtime_put_sync(netdev->dev.parent);
-}
-
static const struct ethtool_ops e1000_ethtool_ops = {
- .begin = e1000e_ethtool_begin,
- .complete = e1000e_ethtool_complete,
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index a6f903a9b77..b7f38435d1f 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -90,6 +90,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
#define E1000_REVISION_4 4
@@ -227,6 +231,10 @@ union e1000_rx_desc_extended {
};
#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
@@ -251,7 +259,8 @@ union e1000_rx_desc_packet_split {
} middle;
struct {
__le16 header_status;
- __le16 length[3]; /* length of buffers 1-3 */
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
} upper;
__le64 reserved;
} wb; /* writeback */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9dde390f7e7..42f0f671751 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
u32 phy_id = 0;
s32 ret_val;
u16 retry_count;
+ u32 mac_reg = 0;
for (retry_count = 0; retry_count < 2; retry_count++) {
ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
@@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (hw->phy.id) {
if (hw->phy.id == phy_id)
- return true;
+ goto out;
} else if (phy_id) {
hw->phy.id = phy_id;
hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
- return true;
+ goto out;
}
/* In case the PHY needs to be in mdio slow mode,
@@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
ret_val = e1000e_get_phy_id(hw);
hw->phy.ops.acquire(hw);
- return !ret_val;
+ if (ret_val)
+ return false;
+out:
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Unforce SMBus mode in PHY */
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
+
+ return true;
}
/**
@@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
u32 mac_reg, fwsm = er32(FWSM);
s32 ret_val;
- u16 phy_reg;
/* Gate automatic PHY configuration by hardware on managed and
* non-managed 82579 and newer adapters.
@@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
+ /* Wait 50 milliseconds for MAC to finish any retries
+ * that it might be trying to perform from previous
+ * attempts to acknowledge any phy read requests.
+ */
+ msleep(50);
+
/* fall-through */
case e1000_pch2lan:
- if (e1000_phy_is_accessible_pchlan(hw)) {
- if (hw->mac.type == e1000_pch_lpt) {
- /* Unforce SMBus mode in PHY */
- e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
- phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Unforce SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
- }
+ if (e1000_phy_is_accessible_pchlan(hw))
break;
- }
/* fall-through */
case e1000_pchlan:
@@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
if (hw->phy.ops.check_reset_block(hw)) {
e_dbg("Required LANPHYPC toggle blocked by ME\n");
+ ret_val = -E1000_ERR_PHY;
break;
}
@@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
ew32(FEXTNVM3, mac_reg);
- if (hw->mac.type == e1000_pch_lpt) {
- /* Toggling LANPHYPC brings the PHY out of SMBus mode
- * So ensure that the MAC is also out of SMBus mode
- */
- mac_reg = er32(CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
- }
-
/* Toggle LANPHYPC Value bit */
mac_reg = er32(CTRL);
mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
usleep_range(5000, 10000);
} while (!(er32(CTRL_EXT) &
E1000_CTRL_EXT_LPCD) && count--);
+ usleep_range(30000, 60000);
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * so ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ ret_val = -E1000_ERR_PHY;
}
break;
default:
@@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
}
hw->phy.ops.release(hw);
-
- /* Reset the PHY before any access to it. Doing so, ensures
- * that the PHY is in a known good state before we read/write
- * PHY registers. The generic reset is sufficient here,
- * because we haven't determined the PHY type yet.
- */
- ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (!ret_val) {
+ /* Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ }
out:
/* Ungate automatic PHY configuration on non-managed 82579 */
@@ -793,29 +810,31 @@ release:
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
* preventing further DMA write requests. Workaround the issue by disabling
* the de-assertion of the clock request when in 1Gpbs mode.
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ * speeds in order to avoid Tx hangs.
**/
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
{
u32 fextnvm6 = er32(FEXTNVM6);
+ u32 status = er32(STATUS);
s32 ret_val = 0;
+ u16 reg;
- if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
- u16 kmrn_reg;
-
+ if (link && (status & E1000_STATUS_SPEED_1000)) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val =
e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
- &kmrn_reg);
+ &reg);
if (ret_val)
goto release;
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg &
+ reg &
~E1000_KMRNCTRLSTA_K1_ENABLE);
if (ret_val)
goto release;
@@ -827,12 +846,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg);
+ reg);
release:
hw->phy.ops.release(hw);
} else {
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
- ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+ if (!link || ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
+ goto update_fextnvm6;
+
+ ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear link status transmit timeout */
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+ if (status & E1000_STATUS_SPEED_100) {
+ /* Set inband Tx timeout to 5x10us for 100Half */
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Do not extend the K1 entry latency for 100Half */
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ } else {
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */
+ reg |= 50 <<
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Extend the K1 entry latency for 10 Mbps */
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ }
+
+ ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
+ if (ret_val)
+ return ret_val;
+
+update_fextnvm6:
+ ew32(FEXTNVM6, fextnvm6);
}
return ret_val;
@@ -993,7 +1045,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
@@ -1317,7 +1371,10 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
return;
}
- if (index < hw->mac.rar_entry_count) {
+ /* RAR[1-6] are owned by manageability. Skip those and program the
+ * next address into the SHRA register array.
+ */
+ if (index < (u32)(hw->mac.rar_entry_count - 6)) {
s32 ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1908,8 +1965,8 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
- /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
+ for (i = 0; i < (hw->mac.rar_entry_count); i++) {
mac_reg = er32(RAL(i));
hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
(u16)(mac_reg & 0xFFFF));
@@ -1953,10 +2010,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
if (enable) {
- /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ /* Write Rx addresses (rar_entry_count for RAL/H, and
* SHRAL/H) and initial CRC values to the MAC
*/
- for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+ for (i = 0; i < hw->mac.rar_entry_count; i++) {
u8 mac_addr[ETH_ALEN] = { 0 };
u32 addr_high, addr_low;
@@ -4168,7 +4225,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
u16 phy_reg, device_id = hw->adapter->pdev->device;
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (device_id == E1000_DEV_ID_PCH_I218_V3)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 80034a2b297..217090df33e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -93,11 +93,12 @@
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
-#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */
#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
#define PHY_PAGE_SHIFT 5
@@ -197,6 +198,11 @@
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+/* Inband Control */
+#define I217_INBAND_CTRL PHY_REG(770, 18)
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
+
/* PHY Low Power Idle Control */
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_100_ENABLE 0x2000
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 77f81cbb601..4ef786775ac 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -64,8 +64,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
-
static const struct e1000_info *e1000_info_tbl[] = {
[board_82571] = &e1000_82571_info,
[board_82572] = &e1000_82572_info,
@@ -2979,17 +2977,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 pages = 0;
/* Workaround Si errata on PCHx - configure jumbo frame flow */
- if (hw->mac.type >= e1000_pch2lan) {
- s32 ret_val;
-
- if (adapter->netdev->mtu > ETH_DATA_LEN)
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
- else
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-
- if (ret_val)
- e_dbg("failed to enable jumbo frame workaround mode\n");
- }
+ if ((hw->mac.type >= e1000_pch2lan) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN) &&
+ e1000_lv_jumbo_workaround_ich8lan(hw, true))
+ e_dbg("failed to enable jumbo frame workaround mode\n");
/* Program MC offset vector base */
rctl = er32(RCTL);
@@ -3826,6 +3817,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
}
+ pba = 14;
+ ew32(PBA, pba);
fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
break;
@@ -4034,6 +4027,12 @@ void e1000e_down(struct e1000_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
+ /* Disable Si errata workaround on PCHx for jumbo frame flow */
+ if ((hw->mac.type >= e1000_pch2lan) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN) &&
+ e1000_lv_jumbo_workaround_ich8lan(hw, false))
+ e_dbg("failed to disable jumbo frame workaround mode\n");
+
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
@@ -4683,11 +4682,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
- if ((er32(STATUS) & E1000_STATUS_LU) &&
+ if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
+ (er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
- pm_runtime_get_sync(&adapter->pdev->dev);
ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4698,7 +4697,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
- pm_runtime_put_sync(&adapter->pdev->dev);
} else {
/* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
@@ -4870,7 +4868,7 @@ static void e1000_watchdog_task(struct work_struct *work)
*/
if ((hw->phy.type == e1000_phy_igp_3 ||
hw->phy.type == e1000_phy_bm) &&
- (hw->mac.autoneg == true) &&
+ hw->mac.autoneg &&
(adapter->link_speed == SPEED_10 ||
adapter->link_speed == SPEED_100) &&
(adapter->link_duplex == HALF_DUPLEX)) {
@@ -5995,15 +5993,24 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
*/
e1000e_release_hw_control(adapter);
+ pci_clear_master(pdev);
+
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
* downstream port of the pci-e switch.
+ *
+ * We don't have the associated upstream bridge while assigning
+ * the PCI device into guest. For example, the KVM on power is
+ * one of the cases.
*/
if (adapter->flags & FLAG_IS_QUAD_PORT) {
struct pci_dev *us_dev = pdev->bus->self;
u16 devctl;
+ if (!us_dev)
+ return 0;
+
pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));
@@ -6017,38 +6024,73 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
return 0;
}
-#ifdef CONFIG_PCIEASPM
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+/**
+ * e1000e_disable_aspm - Disable ASPM states
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_dis_mask = 0;
+ u16 pdev_aspmc, parent_aspmc;
+
+ switch (state) {
+ case PCIE_LINK_STATE_L0S:
+ case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
+ /* fall-through - can't have L1 without L0s */
+ case PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
+ break;
+ default:
+ return;
+ }
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspmc);
+ parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ /* Nothing to do if the ASPM states to be disabled already are */
+ if (!(pdev_aspmc & aspm_dis_mask) &&
+ (!parent || !(parent_aspmc & aspm_dis_mask)))
+ return;
+
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
+ "L0s" : "",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
+ "L1" : "");
+
+#ifdef CONFIG_PCIEASPM
pci_disable_link_state_locked(pdev, state);
-}
-#else
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- u16 aspm_ctl = 0;
- if (state & PCIE_LINK_STATE_L0S)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & PCIE_LINK_STATE_L1)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
+ /* Double-check ASPM control. If not disabled by the above, the
+ * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
+ * not enabled); override by writing PCI config space directly.
+ */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (!(aspm_dis_mask & pdev_aspmc))
+ return;
+#endif
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
- if (pdev->bus->self)
- pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
- aspm_ctl);
-}
-#endif
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
- (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
- (state & PCIE_LINK_STATE_L1) ? "L1" : "");
-
- __e1000e_disable_aspm(pdev, state);
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_dis_mask);
}
#ifdef CONFIG_PM
@@ -6723,10 +6765,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->hw.fc.current_mode = e1000_fc_default;
adapter->hw.phy.autoneg_advertised = 0x2f;
- /* ring size defaults */
- adapter->rx_ring->count = E1000_DEFAULT_RXD;
- adapter->tx_ring->count = E1000_DEFAULT_TXD;
-
/* Initial Wake on LAN setting - If APM wake is enabled in
* the EEPROM, enable the ACPI Magic Packet filter
*/
@@ -6976,6 +7014,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
new file mode 100644
index 00000000000..479b2c4e552
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -0,0 +1,44 @@
+################################################################################
+#
+# Intel Ethernet Controller XL710 Family Linux Driver
+# Copyright(c) 2013 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) Ethernet Connection XL710 (i40e.ko) driver
+#
+
+obj-$(CONFIG_I40E) += i40e.o
+
+i40e-objs := i40e_main.o \
+ i40e_ethtool.o \
+ i40e_adminq.o \
+ i40e_common.o \
+ i40e_hmc.o \
+ i40e_lan_hmc.o \
+ i40e_nvm.o \
+ i40e_debugfs.o \
+ i40e_diag.o \
+ i40e_txrx.o \
+ i40e_virtchnl_pf.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
new file mode 100644
index 00000000000..b5252eb8a6c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -0,0 +1,558 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_H_
+#define _I40E_H_
+
+#include <net/tcp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/pkt_sched.h>
+#include <linux/ipv6.h>
+#include <linux/version.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+#include "i40e_virtchnl_pf.h"
+#include "i40e_txrx.h"
+
+/* Useful i40e defaults */
+#define I40E_BASE_PF_SEID 16
+#define I40E_BASE_VSI_SEID 512
+#define I40E_BASE_VEB_SEID 288
+#define I40E_MAX_VEB 16
+
+#define I40E_MAX_NUM_DESCRIPTORS 4096
+#define I40E_MAX_REGISTER 0x0038FFFF
+#define I40E_DEFAULT_NUM_DESCRIPTORS 512
+#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
+#define I40E_MIN_NUM_DESCRIPTORS 64
+#define I40E_MIN_MSIX 2
+#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
+#define I40E_DEFAULT_QUEUES_PER_VF 4
+#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
+#define I40E_FDIR_RING 0
+#define I40E_FDIR_RING_COUNT 32
+#define I40E_MAX_AQ_BUF_SIZE 4096
+#define I40E_AQ_LEN 32
+#define I40E_AQ_WORK_LIMIT 16
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DEFAULT_MSG_ENABLE 4
+
+#define I40E_NVM_VERSION_LO_SHIFT 0
+#define I40E_NVM_VERSION_LO_MASK (0xf << I40E_NVM_VERSION_LO_SHIFT)
+#define I40E_NVM_VERSION_MID_SHIFT 4
+#define I40E_NVM_VERSION_MID_MASK (0xff << I40E_NVM_VERSION_MID_SHIFT)
+#define I40E_NVM_VERSION_HI_SHIFT 12
+#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
+
+/* magic for getting defines into strings */
+#define STRINGIFY(foo) #foo
+#define XSTRINGIFY(bar) STRINGIFY(bar)
+
+#ifndef ARCH_HAS_PREFETCH
+#define prefetch(X)
+#endif
+
+#define I40E_RX_DESC(R, i) \
+ ((ring_is_16byte_desc_enabled(R)) \
+ ? (union i40e_32byte_rx_desc *) \
+ (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
+ : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+#define I40E_TX_DESC(R, i) \
+ (&(((struct i40e_tx_desc *)((R)->desc))[i]))
+#define I40E_TX_CTXTDESC(R, i) \
+ (&(((struct i40e_tx_context_desc *)((R)->desc))[i]))
+#define I40E_TX_FDIRDESC(R, i) \
+ (&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
+
+/* default to trying for four seconds */
+#define I40E_TRY_LINK_TIMEOUT (4 * HZ)
+
+/* driver state flags */
+enum i40e_state_t {
+ __I40E_TESTING,
+ __I40E_CONFIG_BUSY,
+ __I40E_CONFIG_DONE,
+ __I40E_DOWN,
+ __I40E_NEEDS_RESTART,
+ __I40E_SERVICE_SCHED,
+ __I40E_ADMINQ_EVENT_PENDING,
+ __I40E_MDD_EVENT_PENDING,
+ __I40E_VFLR_EVENT_PENDING,
+ __I40E_RESET_RECOVERY_PENDING,
+ __I40E_RESET_INTR_RECEIVED,
+ __I40E_REINIT_REQUESTED,
+ __I40E_PF_RESET_REQUESTED,
+ __I40E_CORE_RESET_REQUESTED,
+ __I40E_GLOBAL_RESET_REQUESTED,
+ __I40E_FILTER_OVERFLOW_PROMISC,
+};
+
+enum i40e_interrupt_policy {
+ I40E_INTERRUPT_BEST_CASE,
+ I40E_INTERRUPT_MEDIUM,
+ I40E_INTERRUPT_LOWEST
+};
+
+struct i40e_lump_tracking {
+ u16 num_entries;
+ u16 search_hint;
+ u16 list[0];
+#define I40E_PILE_VALID_BIT 0x8000
+};
+
+#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
+#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512
+struct i40e_fdir_data {
+ u16 q_index;
+ u8 flex_off;
+ u8 pctype;
+ u16 dest_vsi;
+ u8 dest_ctl;
+ u8 fd_status;
+ u16 cnt_index;
+ u32 fd_id;
+ u8 *raw_packet;
+};
+
+#define I40E_DCB_PRIO_TYPE_STRICT 0
+#define I40E_DCB_PRIO_TYPE_ETS 1
+#define I40E_DCB_STRICT_PRIO_CREDITS 127
+#define I40E_MAX_USER_PRIORITY 8
+/* DCB per TC information data structure */
+struct i40e_tc_info {
+ u16 qoffset; /* Queue offset from base queue */
+ u16 qcount; /* Total Queues */
+ u8 netdev_tc; /* Netdev TC index if netdev associated */
+};
+
+/* TC configuration data structure */
+struct i40e_tc_configuration {
+ u8 numtc; /* Total number of enabled TCs */
+ u8 enabled_tc; /* TC map */
+ struct i40e_tc_info tc_info[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* struct that defines the Ethernet device */
+struct i40e_pf {
+ struct pci_dev *pdev;
+ struct i40e_hw hw;
+ unsigned long state;
+ unsigned long link_check_timeout;
+ struct msix_entry *msix_entries;
+ u16 num_msix_entries;
+ bool fc_autoneg_status;
+
+ u16 eeprom_version;
+ u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */
+ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
+ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
+ u16 num_req_vfs; /* num vfs requested for this vf */
+ u16 num_vf_qps; /* num queue pairs per vf */
+ u16 num_tc_qps; /* num queue pairs per TC */
+ u16 num_lan_qps; /* num lan queues this pf has set up */
+ u16 num_lan_msix; /* num queue vectors for the base pf vsi */
+ u16 rss_size; /* num queues in the RSS array */
+ u16 rss_size_max; /* HW defined max RSS queues */
+ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u8 atr_sample_rate;
+
+ enum i40e_interrupt_policy int_policy;
+ u16 rx_itr_default;
+ u16 tx_itr_default;
+ u16 msg_enable;
+ char misc_int_name[IFNAMSIZ + 9];
+ u16 adminq_work_limit; /* num of admin receive queue desc to process */
+ int service_timer_period;
+ struct timer_list service_timer;
+ struct work_struct service_task;
+
+ u64 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED (u64)(1 << 1)
+#define I40E_FLAG_MSI_ENABLED (u64)(1 << 2)
+#define I40E_FLAG_MSIX_ENABLED (u64)(1 << 3)
+#define I40E_FLAG_RX_1BUF_ENABLED (u64)(1 << 4)
+#define I40E_FLAG_RX_PS_ENABLED (u64)(1 << 5)
+#define I40E_FLAG_RSS_ENABLED (u64)(1 << 6)
+#define I40E_FLAG_MQ_ENABLED (u64)(1 << 7)
+#define I40E_FLAG_VMDQ_ENABLED (u64)(1 << 8)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT (u64)(1 << 9)
+#define I40E_FLAG_NEED_LINK_UPDATE (u64)(1 << 10)
+#define I40E_FLAG_IN_NETPOLL (u64)(1 << 13)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED (u64)(1 << 14)
+#define I40E_FLAG_CLEAN_ADMINQ (u64)(1 << 15)
+#define I40E_FLAG_FILTER_SYNC (u64)(1 << 16)
+#define I40E_FLAG_PROCESS_MDD_EVENT (u64)(1 << 18)
+#define I40E_FLAG_PROCESS_VFLR_EVENT (u64)(1 << 19)
+#define I40E_FLAG_SRIOV_ENABLED (u64)(1 << 20)
+#define I40E_FLAG_DCB_ENABLED (u64)(1 << 21)
+#define I40E_FLAG_FDIR_ENABLED (u64)(1 << 22)
+#define I40E_FLAG_FDIR_ATR_ENABLED (u64)(1 << 23)
+#define I40E_FLAG_MFP_ENABLED (u64)(1 << 27)
+
+ u16 num_tx_queues;
+ u16 num_rx_queues;
+
+ bool stat_offsets_loaded;
+ struct i40e_hw_port_stats stats;
+ struct i40e_hw_port_stats stats_offsets;
+ u32 tx_timeout_count;
+ u32 tx_timeout_recovery_level;
+ unsigned long tx_timeout_last_recovery;
+ u32 hw_csum_rx_error;
+ u32 led_status;
+ u16 corer_count; /* Core reset count */
+ u16 globr_count; /* Global reset count */
+ u16 empr_count; /* EMP reset count */
+ u16 pfr_count; /* PF reset count */
+
+ struct mutex switch_mutex;
+ u16 lan_vsi; /* our default LAN VSI */
+ u16 lan_veb; /* initial relay, if exists */
+#define I40E_NO_VEB 0xffff
+#define I40E_NO_VSI 0xffff
+ u16 next_vsi; /* Next unallocated VSI - 0-based! */
+ struct i40e_vsi **vsi;
+ struct i40e_veb *veb[I40E_MAX_VEB];
+
+ struct i40e_lump_tracking *qp_pile;
+ struct i40e_lump_tracking *irq_pile;
+
+ /* switch config info */
+ u16 pf_seid;
+ u16 main_vsi_seid;
+ u16 mac_seid;
+ struct i40e_aqc_get_switch_config_data *sw_config;
+ struct kobject *switch_kobj;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *i40e_dbg_pf;
+#endif /* CONFIG_DEBUG_FS */
+
+ /* sr-iov config info */
+ struct i40e_vf *vf;
+ int num_alloc_vfs; /* actual number of VFs allocated */
+ u32 vf_aq_requests;
+
+ /* DCBx/DCBNL capability for PF that indicates
+ * whether DCBx is managed by firmware or host
+ * based agent (LLDPAD). Also, indicates what
+ * flavor of DCBx protocol (IEEE/CEE) is supported
+ * by the device. For now we're supporting IEEE
+ * mode only.
+ */
+ u16 dcbx_cap;
+
+ u32 fcoe_hmc_filt_num;
+ u32 fcoe_hmc_cntx_num;
+ struct i40e_filter_control_settings filter_settings;
+};
+
+struct i40e_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+#define I40E_VLAN_ANY -1
+ s16 vlan;
+ u8 counter; /* number of instances of this filter */
+ bool is_vf; /* filter belongs to a VF */
+ bool is_netdev; /* filter belongs to a netdev */
+ bool changed; /* filter needs to be sync'd to the HW */
+};
+
+struct i40e_veb {
+ struct i40e_pf *pf;
+ u16 idx;
+ u16 veb_idx; /* index of VEB parent */
+ u16 seid;
+ u16 uplink_seid;
+ u16 stats_idx; /* index of VEB parent */
+ u8 enabled_tc;
+ u16 flags;
+ u16 bw_limit;
+ u8 bw_max_quanta;
+ bool is_abs_credits;
+ u8 bw_tc_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ u16 bw_tc_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ u8 bw_tc_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+ struct kobject *kobj;
+ bool stat_offsets_loaded;
+ struct i40e_eth_stats stats;
+ struct i40e_eth_stats stats_offsets;
+};
+
+/* struct that defines a VSI, associated with a dev */
+struct i40e_vsi {
+ struct net_device *netdev;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ bool netdev_registered;
+ bool stat_offsets_loaded;
+
+ u32 current_netdev_flags;
+ unsigned long state;
+#define I40E_VSI_FLAG_FILTER_CHANGED (1<<0)
+#define I40E_VSI_FLAG_VEB_OWNER (1<<1)
+ unsigned long flags;
+
+ struct list_head mac_filter_list;
+
+ /* VSI stats */
+ struct rtnl_link_stats64 net_stats;
+ struct rtnl_link_stats64 net_stats_offsets;
+ struct i40e_eth_stats eth_stats;
+ struct i40e_eth_stats eth_stats_offsets;
+ u32 tx_restart;
+ u32 tx_busy;
+ u32 rx_buf_failed;
+ u32 rx_page_failed;
+
+ /* These are arrays of rings, allocated at run-time */
+ struct i40e_ring *rx_rings;
+ struct i40e_ring *tx_rings;
+
+ u16 work_limit;
+ /* high bit set means dynamic, use accessor routines to read/write.
+ * hardware only supports 2us resolution for the ITR registers.
+ * these values always store the USER setting, and must be converted
+ * before programming to a register.
+ */
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ u16 max_frame;
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+
+ /* List of q_vectors allocated to this VSI */
+ struct i40e_q_vector *q_vectors;
+ int num_q_vectors;
+ int base_vector;
+
+ u16 seid; /* HW index of this VSI (absolute index) */
+ u16 id; /* VSI number */
+ u16 uplink_seid;
+
+ u16 base_queue; /* vsi's first queue in hw array */
+ u16 alloc_queue_pairs; /* Allocated Tx/Rx queues */
+ u16 num_queue_pairs; /* Used tx and rx pairs */
+ u16 num_desc;
+ enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
+ u16 vf_id; /* Virtual function ID for SRIOV VSIs */
+
+ struct i40e_tc_configuration tc_config;
+ struct i40e_aqc_vsi_properties_data info;
+
+ /* VSI BW limit (absolute across all TCs) */
+ u16 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 bw_max_quanta; /* Max Quanta when BW limit is enabled */
+
+ /* Relative TC credits across VSIs */
+ u8 bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit credits within VSI */
+ u16 bw_ets_limit_credits[I40E_MAX_TRAFFIC_CLASS];
+ /* TC BW limit max quanta within VSI */
+ u8 bw_ets_max_quanta[I40E_MAX_TRAFFIC_CLASS];
+
+ struct i40e_pf *back; /* Backreference to associated PF */
+ u16 idx; /* index in pf->vsi[] */
+ u16 veb_idx; /* index of VEB parent */
+ struct kobject *kobj; /* sysfs object */
+
+ /* VSI specific handlers */
+ irqreturn_t (*irq_handler)(int irq, void *data);
+} ____cacheline_internodealigned_in_smp;
+
+struct i40e_netdev_priv {
+ struct i40e_vsi *vsi;
+};
+
+/* struct that defines an interrupt vector */
+struct i40e_q_vector {
+ struct i40e_vsi *vsi;
+
+ u16 v_idx; /* index in the vsi->q_vector array. */
+ u16 reg_idx; /* register index of the interrupt */
+
+ struct napi_struct napi;
+
+ struct i40e_ring_container rx;
+ struct i40e_ring_container tx;
+
+ u8 num_ringpairs; /* total number of ring pairs in vector */
+
+ char name[IFNAMSIZ + 9];
+ cpumask_t affinity_mask;
+} ____cacheline_internodealigned_in_smp;
+
+/* lan device */
+struct i40e_device {
+ struct list_head list;
+ struct i40e_pf *pf;
+};
+
+/**
+ * i40e_fw_version_str - format the FW and NVM version strings
+ * @hw: ptr to the hardware info
+ **/
+static inline char *i40e_fw_version_str(struct i40e_hw *hw)
+{
+ static char buf[32];
+
+ snprintf(buf, sizeof(buf),
+ "f%d.%d a%d.%d n%02d.%02d.%02d e%08x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ (hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
+ >> I40E_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_MID_MASK)
+ >> I40E_NVM_VERSION_MID_SHIFT,
+ (hw->nvm.version & I40E_NVM_VERSION_LO_MASK)
+ >> I40E_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack);
+
+ return buf;
+}
+
+/**
+ * i40e_netdev_to_pf: Retrieve the PF struct for given netdev
+ * @netdev: the corresponding netdev
+ *
+ * Return the PF struct for the given netdev
+ **/
+static inline struct i40e_pf *i40e_netdev_to_pf(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ return vsi->back;
+}
+
+static inline void i40e_vsi_setup_irqhandler(struct i40e_vsi *vsi,
+ irqreturn_t (*irq_handler)(int, void *))
+{
+ vsi->irq_handler = irq_handler;
+}
+
+/**
+ * i40e_rx_is_programming_status - check for programming status descriptor
+ * @qw: the first quad word of the program status descriptor
+ *
+ * The value of in the descriptor length field indicate if this
+ * is a programming status descriptor for flow director or FCoE
+ * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
+ * it is a packet descriptor.
+ **/
+static inline bool i40e_rx_is_programming_status(u64 qw)
+{
+ return I40E_RX_PROG_STATUS_DESC_LENGTH ==
+ (qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
+}
+
+/* needed by i40e_ethtool.c */
+int i40e_up(struct i40e_vsi *vsi);
+void i40e_down(struct i40e_vsi *vsi);
+extern const char i40e_driver_name[];
+extern const char i40e_driver_version_str[];
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
+void i40e_update_stats(struct i40e_vsi *vsi);
+void i40e_update_eth_stats(struct i40e_vsi *vsi);
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
+int i40e_fetch_switch_configuration(struct i40e_pf *pf,
+ bool printconfig);
+
+/* needed by i40e_main.c */
+void i40e_add_fdir_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+void i40e_add_remove_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+void i40e_update_fdir_filter(struct i40e_fdir_data fdir_data,
+ struct i40e_ring *tx_ring);
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+ struct i40e_pf *pf, bool add);
+
+void i40e_set_ethtool_ops(struct net_device *netdev);
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev);
+void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev);
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink, u32 param1);
+int i40e_vsi_release(struct i40e_vsi *vsi);
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
+ struct i40e_vsi *start_vsi);
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc);
+void i40e_veb_release(struct i40e_veb *veb);
+
+i40e_status i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid);
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi);
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi);
+void i40e_pf_reset_stats(struct i40e_pf *pf);
+#ifdef CONFIG_DEBUG_FS
+void i40e_dbg_pf_init(struct i40e_pf *pf);
+void i40e_dbg_pf_exit(struct i40e_pf *pf);
+void i40e_dbg_init(void);
+void i40e_dbg_exit(void);
+#else
+static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {}
+static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
+static inline void i40e_dbg_init(void) {}
+static inline void i40e_dbg_exit(void) {}
+#endif /* CONFIG_DEBUG_FS*/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev);
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+
+#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
new file mode 100644
index 00000000000..cfef7fc32cd
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -0,0 +1,982 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ if (hw->mac.type == I40E_MAC_VF) {
+ hw->aq.asq.tail = I40E_VF_ATQT1;
+ hw->aq.asq.head = I40E_VF_ATQH1;
+ hw->aq.arq.tail = I40E_VF_ARQT1;
+ hw->aq.arq.head = I40E_VF_ARQH1;
+ } else {
+ hw->aq.asq.tail = I40E_PF_ATQT;
+ hw->aq.asq.head = I40E_PF_ATQH;
+ hw->aq.arq.tail = I40E_PF_ARQT;
+ hw->aq.arq.head = I40E_PF_ARQH;
+ }
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_virt_mem mem;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq_mem,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ hw->aq.asq.desc = hw->aq.asq_mem.va;
+ hw->aq.asq.dma_addr = hw->aq.asq_mem.pa;
+
+ ret_code = i40e_allocate_virt_mem(hw, &mem,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+ hw->aq.asq_mem.va = NULL;
+ hw->aq.asq_mem.pa = 0;
+ return ret_code;
+ }
+
+ hw->aq.asq.details = mem.va;
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+
+ ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq_mem,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ hw->aq.arq.desc = hw->aq.arq_mem.va;
+ hw->aq.arq.dma_addr = hw->aq.arq_mem.pa;
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+
+ i40e_free_dma_mem(hw, &hw->aq.asq_mem);
+ hw->aq.asq_mem.va = NULL;
+ hw->aq.asq_mem.pa = 0;
+ mem.va = hw->aq.asq.details;
+ i40e_free_virt_mem(hw, &mem);
+ hw->aq.asq.details = NULL;
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+ i40e_free_dma_mem(hw, &hw->aq.arq_mem);
+ hw->aq.arq_mem.va = NULL;
+ hw->aq.arq_mem.pa = 0;
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_aq_desc *desc;
+ struct i40e_virt_mem mem;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_arq_entries *
+ sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)mem.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ mem.va = hw->aq.arq.r.arq_bi;
+ i40e_free_virt_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ struct i40e_virt_mem mem;
+ struct i40e_dma_mem *bi;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = i40e_allocate_virt_mem(hw, &mem, (hw->aq.num_asq_entries *
+ sizeof(struct i40e_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)mem.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = i40e_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ I40E_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ mem.va = hw->aq.asq.r.asq_bi;
+ i40e_free_virt_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+ int i;
+
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ mem.va = hw->aq.arq.r.arq_bi;
+ i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+ struct i40e_virt_mem mem;
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* now free the buffer info list */
+ mem.va = hw->aq.asq.r.asq_bi;
+ i40e_free_virt_mem(hw, &mem);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static void i40e_config_asq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the transmit queue */
+ wr32(hw, I40E_VF_ATQBAH1, upper_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQBAL1, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
+ I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ } else {
+ /* configure the transmit queue */
+ wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.dma_addr));
+ wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
+ I40E_PF_ATQLEN_ATQENABLE_MASK));
+ }
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static void i40e_config_arq_regs(struct i40e_hw *hw)
+{
+ if (hw->mac.type == I40E_MAC_VF) {
+ /* configure the receive queue */
+ wr32(hw, I40E_VF_ARQBAH1, upper_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQBAL1, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
+ I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ } else {
+ /* configure the receive queue */
+ wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.dma_addr));
+ wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
+ I40E_PF_ARQLEN_ARQENABLE_MASK));
+ }
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_asq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static i40e_status i40e_init_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ i40e_config_arq_regs(hw);
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.asq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ if (hw->mac.type == I40E_MAC_VF)
+ wr32(hw, I40E_VF_ATQLEN1, 0);
+ else
+ wr32(hw, I40E_PF_ATQLEN, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.asq_mutex);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+ /* free the ring descriptors */
+ i40e_free_adminq_asq(hw);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ if (hw->aq.arq.count == 0)
+ return I40E_ERR_NOT_READY;
+
+ /* Stop firmware AdminQ processing */
+ if (hw->mac.type == I40E_MAC_VF)
+ wr32(hw, I40E_VF_ARQLEN1, 0);
+ else
+ wr32(hw, I40E_PF_ARQLEN, 0);
+
+ /* make sure lock is available */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+ /* free the ring descriptors */
+ i40e_free_adminq_arq(hw);
+
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+i40e_status i40e_init_adminq(struct i40e_hw *hw)
+{
+ u16 eetrack_lo, eetrack_hi;
+ i40e_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* initialize locks */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ ret_code = i40e_aq_get_firmware_version(hw,
+ &hw->aq.fw_maj_ver, &hw->aq.fw_min_ver,
+ &hw->aq.api_maj_ver, &hw->aq.api_min_ver,
+ NULL);
+ if (ret_code)
+ goto init_adminq_free_arq;
+
+ if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
+ hw->aq.api_min_ver != I40E_FW_API_VERSION_MINOR) {
+ ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+ goto init_adminq_free_arq;
+ }
+ i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+ i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+ hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ ret_code = i40e_aq_set_hmc_resource_profile(hw,
+ I40E_HMC_PROFILE_DEFAULT,
+ 0,
+ NULL);
+ ret_code = 0;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_arq:
+ i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ /* destroy the locks */
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @asq: pointer to the adminq send ring
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+ struct i40e_adminq_ring *asq = &(hw->aq.asq);
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = I40E_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ * i40e_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return (rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use);
+
+}
+
+/**
+ * i40e_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @opaque: pointer to info to be used in async cleanup
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status = 0;
+ struct i40e_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ u16 retval = 0;
+
+ if (hw->aq.asq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_exit;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff != NULL) {
+ dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+ u32 delay_len = 10;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (i40e_asq_done(hw))
+ break;
+ /* ugh! delay while spin_lock */
+ udelay(delay_len);
+ total_delay += delay_len;
+ } while (total_delay < I40E_ASQ_CMD_TIMEOUT);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (i40e_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff != NULL)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+asq_send_command_exit:
+ return status;
+}
+
+/**
+ * i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_EI | I40E_AQ_FLAG_SI);
+}
+
+/**
+ * i40e_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ i40e_status ret_code = 0;
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ struct i40e_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ /* set next_to_use to head */
+ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Queue is empty.\n");
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+ i40e_debug_aq(hw,
+ I40E_DEBUG_AQ_COMMAND,
+ (void *)desc,
+ hw->aq.arq.r.arq_bi[desc_idx].va);
+
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ i40e_debug(hw,
+ I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ } else {
+ memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc));
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_size = min(datalen, e->msg_size);
+ if (e->msg_buf != NULL && (e->msg_size != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_size);
+ }
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+ u32 reg = 0;
+
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+ reg = hw->aq.num_asq_entries;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_VF_ATQLEN1, reg);
+ } else {
+ reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_PF_ATQLEN, reg);
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+ reg = hw->aq.num_arq_entries;
+
+ if (hw->mac.type == I40E_MAC_VF) {
+ reg |= I40E_VF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_VF_ARQLEN1, reg);
+ } else {
+ reg |= I40E_PF_ATQLEN_ATQENABLE_MASK;
+ wr32(hw, I40E_PF_ARQLEN, reg);
+ }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
new file mode 100644
index 00000000000..22e5ed683e4
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+ void *desc; /* Descriptor ring memory */
+ void *details; /* ASQ details */
+
+ union {
+ struct i40e_dma_mem *asq_bi;
+ struct i40e_dma_mem *arq_bi;
+ } r;
+
+ u64 dma_addr; /* Physical address of the ring */
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).details))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_size;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+ struct i40e_adminq_ring arq; /* receive queue */
+ struct i40e_adminq_ring asq; /* send queue */
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ struct i40e_dma_mem asq_mem; /* send queue dynamic memory */
+ struct i40e_dma_mem arq_mem; /* receive queue dynamic memory */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 100000 /* usecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+ u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
new file mode 100644
index 00000000000..e61ebdd5a5f
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0000
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed because of prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ i40e_aqc_opc_set_cppm_configuration = 0x0103,
+ i40e_aqc_opc_set_arp_proxy_entry = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mng_laa = 0x0106,
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ i40e_aqc_opc_set_storm_control_config = 0x0280,
+ i40e_aqc_opc_get_storm_control_config = 0x0281,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_tunnel_key_structure = 0x0B10,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_get_deviceid = 0xFF00,
+ i40e_aqc_opc_debug_set_mode = 0xFF01,
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
+ i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+ i40e_aqc_opc_debug_modify_internals = 0xFF09,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+ __le32 rom_ver;
+ __le32 fw_build;
+ __le16 fw_major;
+ __le16 fw_minor;
+ __le16 api_major;
+ __le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (direct 0x0002) */
+struct i40e_aqc_driver_version {
+ u8 driver_major_ver;
+ u8 driver_minor_ver;
+ u8 driver_build_ver;
+ u8 driver_subbuild_ver;
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM 1
+#define I40E_AQ_RESOURCE_SDP 2
+#define I40E_AQ_RESOURCE_ACCESS_READ 1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE 2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000
+
+struct i40e_aqc_request_resource {
+ __le16 resource_id;
+ __le16 access_type;
+ __le32 timeout;
+ __le32 resource_number;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+ u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+ __le16 id;
+ u8 major_rev;
+ u8 minor_rev;
+ __le32 number;
+ __le32 logical_id;
+ __le32 phys_id;
+ u8 reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE 0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006
+#define I40E_AQ_CAP_ID_SRIOV 0x0012
+#define I40E_AQ_CAP_ID_VF 0x0013
+#define I40E_AQ_CAP_ID_VMDQ 0x0014
+#define I40E_AQ_CAP_ID_8021QBG 0x0015
+#define I40E_AQ_CAP_ID_8021QBR 0x0016
+#define I40E_AQ_CAP_ID_VSI 0x0017
+#define I40E_AQ_CAP_ID_DCB 0x0018
+#define I40E_AQ_CAP_ID_FCOE 0x0021
+#define I40E_AQ_CAP_ID_RSS 0x0040
+#define I40E_AQ_CAP_ID_RXQ 0x0041
+#define I40E_AQ_CAP_ID_TXQ 0x0042
+#define I40E_AQ_CAP_ID_MSIX 0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX 0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045
+#define I40E_AQ_CAP_ID_1588 0x0046
+#define I40E_AQ_CAP_ID_IWARP 0x0051
+#define I40E_AQ_CAP_ID_LED 0x0061
+#define I40E_AQ_CAP_ID_SDP 0x0062
+#define I40E_AQ_CAP_ID_MDIO 0x0063
+#define I40E_AQ_CAP_ID_FLEX10 0x00F1
+#define I40E_AQ_CAP_ID_CEM 0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+ __le16 command_flags;
+#define I40E_AQ_CPPM_EN_LTRC 0x0800
+#define I40E_AQ_CPPM_EN_DMCTH 0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
+#define I40E_AQ_CPPM_EN_HPTC 0x4000
+#define I40E_AQ_CPPM_EN_DMARC 0x8000
+ __le16 ttlx;
+ __le32 dmacr;
+ __le16 dmcth;
+ u8 hptc;
+ u8 reserved;
+ __le32 pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+ __le16 command_flags;
+#define I40E_AQ_ARP_INIT_IPV4 0x0008
+#define I40E_AQ_ARP_UNSUP_CTL 0x0010
+#define I40E_AQ_ARP_ENA 0x0020
+#define I40E_AQ_ARP_ADD_IPV4 0x0040
+#define I40E_AQ_ARP_DEL_IPV4 0x0080
+ __le16 table_id;
+ __le32 pfpm_proxyfc;
+ __le32 ip_addr;
+ u8 mac_addr[6];
+};
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+ __le16 table_idx_mac_addr_0;
+ __le16 table_idx_mac_addr_1;
+ __le16 table_idx_ipv6_0;
+ __le16 table_idx_ipv6_1;
+ __le16 control;
+#define I40E_AQ_NS_PROXY_ADD_0 0x0100
+#define I40E_AQ_NS_PROXY_DEL_0 0x0200
+#define I40E_AQ_NS_PROXY_ADD_1 0x0400
+#define I40E_AQ_NS_PROXY_DEL_1 0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+ u8 mac_addr_0[6];
+ u8 mac_addr_1[6];
+ u8 local_mac_addr[6];
+ u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+ u8 ipv6_addr_1[16];
+};
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+ __le16 command_flags;
+#define I40E_AQ_LAA_FLAG_WR 0x8000
+ u8 reserved[2];
+ __le32 sal;
+ __le16 sah;
+ u8 reserved2[6];
+};
+
+/* Manage MAC Address Read Command (0x0107) */
+struct i40e_aqc_mac_address_read {
+ __le16 command_flags;
+#define I40E_AQC_LAN_ADDR_VALID 0x10
+#define I40E_AQC_SAN_ADDR_VALID 0x20
+#define I40E_AQC_PORT_ADDR_VALID 0x40
+#define I40E_AQC_WOL_ADDR_VALID 0x80
+#define I40E_AQC_ADDR_VALID_MASK 0xf0
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+ u8 pf_lan_mac[6];
+ u8 pf_san_mac[6];
+ u8 port_mac[6];
+ u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+ __le16 command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
+#define I40E_AQC_WRITE_TYPE_PORT 0x8000
+#define I40E_AQC_WRITE_TYPE_MASK 0xc000
+ __le16 mac_sah;
+ __le32 mac_sal;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+ __le16 seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+ __le16 num_reported;
+ __le16 num_total;
+ u8 reserved[12];
+};
+
+struct i40e_aqc_switch_config_element_resp {
+ u8 element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC 1
+#define I40E_AQ_SW_ELEM_TYPE_PF 2
+#define I40E_AQ_SW_ELEM_TYPE_VF 3
+#define I40E_AQ_SW_ELEM_TYPE_EMP 4
+#define I40E_AQ_SW_ELEM_TYPE_BMC 5
+#define I40E_AQ_SW_ELEM_TYPE_PV 16
+#define I40E_AQ_SW_ELEM_TYPE_VEB 17
+#define I40E_AQ_SW_ELEM_TYPE_PA 18
+#define I40E_AQ_SW_ELEM_TYPE_VSI 19
+ u8 revision;
+#define I40E_AQ_SW_ELEM_REV_1 1
+ __le16 seid;
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ u8 reserved[3];
+ u8 connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR 0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_CONN_TYPE_CASCADED 0x3
+ __le16 scheduler_id;
+ __le16 element_info;
+};
+
+/* Get Switch Configuration (indirect 0x0200)
+ * an array of elements are returned in the response buffer
+ * the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+ struct i40e_aqc_get_switch_config_header_resp header;
+ struct i40e_aqc_switch_config_element_resp element[1];
+};
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+ __le16 seid;
+ __le16 vlan;
+ __le16 stat_index;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+ __le16 command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
+ __le16 bad_frame_vsi;
+ __le16 default_seid; /* reserved for command */
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+ u8 num_entries; /* reserved for command */
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+ u8 resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB 0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI 0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG 0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13
+ u8 reserved1;
+ __le16 guaranteed;
+ __le16 total;
+ __le16 used;
+ __le16 total_unalloced;
+ u8 reserved2[6];
+};
+
+/* Add VSI (indirect 0x210)
+ * this indirect command uses struct i40e_aqc_vsi_properties_data
+ * as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211) Get VSI (indirect 0x0212)
+ * use the generic i40e_aqc_switch_seid descriptor format
+ * use the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+ __le16 uplink_seid;
+ u8 connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
+ u8 reserved1;
+ u8 vf_id;
+ u8 reserved2;
+ __le16 vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT 0x0
+#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF 0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2 0x1
+#define I40E_AQ_VSI_TYPE_PF 0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
+#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+ __le16 seid;
+ __le16 vsi_number;
+ __le16 vsi_used;
+ __le16 vsi_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress table */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+ __le16 command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE 0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8
+ __le16 uplink_seid;
+ __le16 connected_seid;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+ /* reserved for update; for add also encodes error if rc == ENOSPC */
+ __le16 pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+ __le16 seid;
+ __le16 default_stag;
+ __le16 pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE 0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4
+ u8 reserved[8];
+ __le16 default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+ __le16 uplink_seid;
+ __le16 downlink_seid;
+ __le16 veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING 0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8
+ u8 enable_tcs;
+ u8 reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+ u8 reserved[6];
+ __le16 switch_seid;
+ /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+ __le16 veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+ __le16 num_addresses;
+ __le16 seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ __le16 flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008
+ __le16 queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \
+ I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+ /* response section */
+ u8 match_method;
+#define I40E_AQC_MM_PERFECT_MATCH 0x01
+#define I40E_AQC_MM_HASH_MATCH 0x02
+#define I40E_AQC_MM_ERR_NO_RES 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+ __le16 perfect_mac_used;
+ __le16 perfect_mac_free;
+ __le16 unicast_hash_free;
+ __le16 multicast_hash_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+ u8 mac_addr[6];
+ __le16 vlan_tag;
+ u8 flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10
+ u8 reserved[3];
+ /* reply section */
+ u8 error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF
+ u8 reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+ __le16 vlan_tag;
+ u8 vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL 0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << \
+ I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT 3
+#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL 0x1
+ u8 reserved;
+ u8 result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS 0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF
+ u8 reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+ u8 reserved[4];
+ __le16 vlans_used;
+ __le16 vlans_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+ __le16 promiscuous_flags;
+ __le16 valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
+#define I40E_AQC_SET_VSI_DEFAULT 0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+ __le16 seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+ __le16 flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001
+ __le16 seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ __le16 queue_number;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+ __le16 seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 tag;
+ u8 reserved[12];
+};
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+ __le16 pv_seid;
+ __le16 etag;
+ u8 num_unicast_etags;
+ u8 reserved[3];
+ __le32 addr_high; /* address of array of 2-byte s-tags */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+ u8 reserved[4];
+ __le16 mcast_etags_used;
+ __le16 mcast_etags_free;
+ __le32 addr_high;
+ __le32 addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+ __le16 seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+ __le16 old_tag;
+ __le16 new_tag;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+ u8 reserved[12];
+ __le16 tags_used;
+ __le16 tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+ u8 mac[6];
+ __le16 etype;
+ __le16 flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
+ __le16 seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+ __le16 queue;
+ u8 reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+ __le16 mac_etype_used;
+ __le16 etype_used;
+ __le16 mac_etype_free;
+ __le16 etype_free;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+ u8 num_filters;
+ u8 reserved;
+ __le16 seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
+ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+ u8 outer_mac[6];
+ u8 inner_mac[6];
+ __le16 inner_vlan;
+ union {
+ struct {
+ u8 reserved[12];
+ u8 data[4];
+ } v4;
+ struct {
+ u8 data[16];
+ } v6;
+ } ipaddr;
+ __le16 flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP_GRE 0x0002
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_GRE 0x0004
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_VNL 0x0007
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100
+ __le32 key_low;
+ __le32 key_high;
+ __le16 queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x3F << \
+ I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+ u8 reserved[14];
+ /* response section */
+ u8 allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF
+ u8 response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+ __le16 perfect_ovlan_used;
+ __le16 perfect_ovlan_free;
+ __le16 vlan_used;
+ __le16 vlan_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ * take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+ __le16 seid;
+ __le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \
+ I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5
+ __le16 num_entries;
+ __le16 destination; /* VSI for add, rule id for delete */
+ __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+ u8 reserved[2];
+ __le16 rule_id; /* only used on add */
+ __le16 mirror_rules_used;
+ __le16 mirror_rules_free;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* Set Storm Control Configuration (direct 0x0280)
+ * Get Storm Control Configuration (direct 0x0281)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_set_get_storm_control_config {
+ __le32 broadcast_threshold;
+ __le32 multicast_threshold;
+ __le32 control_flags;
+#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
+#define I40E_AQC_STORM_CONTROL_MDICW 0x02
+#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
+#define I40E_AQC_STORM_CONTROL_BDICW 0x08
+#define I40E_AQC_STORM_CONTROL_BIDU 0x10
+#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
+#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
+ I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ * the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+ u8 tc_bitmap;
+ u8 command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET 0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR 0x0
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+ __le16 vsi_seid;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+ __le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+ __le16 vsi_seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_credit; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credits[8]; /* FW writesback QS handles here */
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ * responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 tc_bw_credits[8];
+ u8 reserved1[4];
+ __le16 qs_handles[8];
+};
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 tc_suspended_bits;
+ u8 reserved[14];
+ __le16 qs_handles[8];
+ u8 reserved1[4];
+ __le16 port_bw_limit;
+ u8 reserved2[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved3[23];
+};
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ u8 share_credits[8];
+ __le16 credits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+ __le16 seid;
+ u8 reserved[2];
+ __le16 credit;
+ u8 reserved1[2];
+ u8 max_bw; /* 0-3, limit = 2^max */
+ u8 reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable Physical Port ETS (indirect 0x0413)
+ * Modify Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_flags;
+ u8 reserved2[17];
+ u8 tc_bw_share_credits[8];
+ u8 reserved3[96];
+};
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+ u8 tc_valid_bits;
+ u8 reserved[15];
+ __le16 tc_bw_credit[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved1[28];
+};
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits; /* bool */
+ u8 tc_bw_share_credits[8];
+ u8 reserved1[20];
+};
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[35];
+ __le16 port_bw_limit;
+ u8 reserved1[2];
+ u8 tc_bw_max; /* 0-3, limit = 2^max */
+ u8 reserved2[23];
+};
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+ u8 reserved[4];
+ u8 tc_valid_bits;
+ u8 reserved1;
+ u8 tc_strict_priority_bits;
+ u8 reserved2;
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+ u8 reserved3[32];
+};
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+ u8 tc_valid_bits;
+ u8 reserved[2];
+ u8 absolute_credits_enable; /* bool */
+ u8 tc_bw_share_credits[8];
+ __le16 tc_bw_limits[8];
+
+ /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+ __le16 tc_bw_max[2];
+};
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002
+
+enum i40e_aq_phy_type {
+ I40E_PHY_TYPE_SGMII = 0x0,
+ I40E_PHY_TYPE_1000BASE_KX = 0x1,
+ I40E_PHY_TYPE_10GBASE_KX4 = 0x2,
+ I40E_PHY_TYPE_10GBASE_KR = 0x3,
+ I40E_PHY_TYPE_40GBASE_KR4 = 0x4,
+ I40E_PHY_TYPE_XAUI = 0x5,
+ I40E_PHY_TYPE_XFI = 0x6,
+ I40E_PHY_TYPE_SFI = 0x7,
+ I40E_PHY_TYPE_XLAUI = 0x8,
+ I40E_PHY_TYPE_XLPPI = 0x9,
+ I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
+ I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_100BASE_TX = 0x11,
+ I40E_PHY_TYPE_1000BASE_T = 0x12,
+ I40E_PHY_TYPE_10GBASE_T = 0x13,
+ I40E_PHY_TYPE_10GBASE_SR = 0x14,
+ I40E_PHY_TYPE_10GBASE_LR = 0x15,
+ I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16,
+ I40E_PHY_TYPE_10GBASE_CR1 = 0x17,
+ I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
+ I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
+ I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+ u8 oui[3];
+ u8 reserved1;
+ u8 part_number[16];
+ u8 revision[4];
+ u8 reserved2[8];
+};
+
+struct i40e_aq_get_phy_abilities_resp {
+ __le32 phy_type; /* bitmap using the above enum for offsets */
+ u8 link_speed; /* bitmap using the above enum */
+ u8 abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
+#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
+#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
+#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
+#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
+#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+ __le16 eee_capability;
+#define I40E_AQ_EEE_100BASE_TX 0x0002
+#define I40E_AQ_EEE_1000BASE_T 0x0004
+#define I40E_AQ_EEE_10GBASE_T 0x0008
+#define I40E_AQ_EEE_1000BASE_KX 0x0010
+#define I40E_AQ_EEE_10GBASE_KX4 0x0020
+#define I40E_AQ_EEE_10GBASE_KR 0x0040
+ __le32 eeer_val;
+ u8 d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
+ u8 reserved[3];
+ u8 phy_id[4];
+ u8 module_type[3];
+ u8 qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS 16
+ struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+ __le32 phy_type;
+ u8 link_speed;
+ u8 abilities;
+ __le16 eee_capability;
+ __le32 eeer;
+ u8 low_power_ctrl;
+ u8 reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+ __le16 max_frame_size;
+ u8 params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+ u8 tx_timer_priority; /* bitmap */
+ __le16 tx_timer_value;
+ __le16 fc_refresh_threshold;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+ u8 command;
+#define I40E_AQ_PHY_RESTART_AN 0x02
+#define I40E_AQ_PHY_LINK_ENABLE 0x04
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+ __le16 command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK 0x3
+#define I40E_AQ_LSE_NOP 0x0
+#define I40E_AQ_LSE_DISABLE 0x2
+#define I40E_AQ_LSE_ENABLE 0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED 0x1
+ u8 phy_type; /* i40e_aq_phy_type */
+ u8 link_speed; /* i40e_aq_link_speed */
+ u8 link_info;
+#define I40E_AQ_LINK_UP 0x01
+#define I40E_AQ_LINK_FAULT 0x02
+#define I40E_AQ_LINK_FAULT_TX 0x04
+#define I40E_AQ_LINK_FAULT_RX 0x08
+#define I40E_AQ_LINK_FAULT_REMOTE 0x10
+#define I40E_AQ_MEDIA_AVAILABLE 0x40
+#define I40E_AQ_SIGNAL_DETECT 0x80
+ u8 an_info;
+#define I40E_AQ_AN_COMPLETED 0x01
+#define I40E_AQ_LP_AN_ABILITY 0x02
+#define I40E_AQ_PD_FAULT 0x04
+#define I40E_AQ_FEC_EN 0x08
+#define I40E_AQ_PHY_LOW_POWER 0x10
+#define I40E_AQ_LINK_PAUSE_TX 0x20
+#define I40E_AQ_LINK_PAUSE_RX 0x40
+#define I40E_AQ_QUALIFIED_MODULE 0x80
+ u8 ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02
+#define I40E_AQ_LINK_TX_SHIFT 0x02
+#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE 0x00
+#define I40E_AQ_LINK_TX_DRAINED 0x01
+#define I40E_AQ_LINK_TX_FLUSHED 0x03
+ u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
+ __le16 max_frame_size;
+ u8 config;
+#define I40E_AQ_CONFIG_CRC_ENA 0x04
+#define I40E_AQ_CONFIG_PACING_MASK 0x78
+ u8 reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002
+#define I40E_AQ_EVENT_MEDIA_NA 0x0004
+#define I40E_AQ_EVENT_LINK_FAULT 0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED 0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200
+ u8 reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+ __le32 local_an_reg0;
+ __le16 local_an_reg1;
+ u8 reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+ __le16 lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL 0x01
+#define I40E_AQ_LB_PHY_REMOTE 0x02
+#define I40E_AQ_LB_MAC_LOCAL 0x04
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Reset command (0x0622) */
+struct i40e_aqc_set_phy_reset {
+ u8 reset_flags;
+#define I40E_AQ_PHY_RESET_REQUEST 0x02
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+
+enum i40e_aq_phy_reg_type {
+ I40E_AQC_PHY_REG_INTERNAL = 0x1,
+ I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2,
+ I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+ u8 command_flags;
+#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_FLASH_ONLY 0x80
+ u8 module_pointer;
+ __le16 length;
+ __le32 offset;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+ __le32 address0;
+ __le32 data0;
+ __le32 address1;
+ __le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+ __le32 address;
+ __le32 length;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+ __le16 cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED 2
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+ __le32 mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE 0
+#define I40E_AQ_ALTERNATE_MODE_OEM 1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+ __le32 prtdcb_rupto;
+ __le32 otx_ctl;
+ u8 reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+ u8 type;
+ u8 reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3
+#define I40E_AQ_LLDP_MIB_LOCAL 0x0
+#define I40E_AQ_LLDP_MIB_REMOTE 0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1
+#define I40E_AQ_LLDP_TX_SHIFT 0x4
+#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+ u8 command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved1[1];
+ __le16 len;
+ u8 reserved2[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+ u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */
+ u8 reserved;
+ __le16 old_len;
+ __le16 new_offset;
+ __le16 new_len;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_START 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Apply MIB changes (0x0A07)
+ * uses the generic struc as it contains no data
+ */
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+ __le16 udp_port;
+ u8 header_len; /* in DWords, 1 to 15 */
+ u8 protocol_index;
+#define I40E_AQC_TUNNEL_TYPE_MAC 0x0
+#define I40E_AQC_TUNNEL_TYPE_UDP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+ u8 reserved[2];
+ u8 index; /* 0 to 15 */
+ u8 pf_filters;
+ u8 total_filters;
+ u8 reserved2[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+ __le16 udp_port;
+ u8 index; /* 0 to 15 */
+ u8 multiple_entries;
+ u8 tunnels_used;
+ u8 reserved;
+ u8 tunnels_free;
+ u8 reserved1[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+
+/* tunnel key structure 0x0B10 */
+struct i40e_aqc_tunnel_key_structure {
+ __le16 key1_off;
+ __le16 key1_len;
+ __le16 key2_off;
+ __le16 key2_len;
+ __le16 flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03
+ u8 resreved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+ __le32 param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1
+#define I40E_AQ_OEM_PARAM_MAC 2
+ __le32 param_value1;
+ u8 param_value2[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+ __le32 state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0
+#define I40E_AQ_OEM_STATE_LINK_UP 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+ u8 mode;
+#define I40E_AQ_TEST_PARTIAL 0
+#define I40E_AQ_TEST_FULL 1
+#define I40E_AQ_TEST_NVM 2
+ u8 reserved[3];
+ u8 command;
+#define I40E_AQ_TEST_OPEN 0
+#define I40E_AQ_TEST_CLOSE 1
+#define I40E_AQ_TEST_INC 2
+ u8 reserved2[3];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+ __le32 reserved;
+ __le32 address;
+ __le32 value_high;
+ __le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+ __le32 address;
+ __le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+ __le32 address;
+ __le32 value;
+ __le32 clear_mask;
+ __le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX 0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1
+#define I40E_AQ_CLUSTER_ID_TXSCHED 2
+#define I40E_AQ_CLUSTER_ID_HMC 3
+#define I40E_AQ_CLUSTER_ID_MAC0 4
+#define I40E_AQ_CLUSTER_ID_MAC1 5
+#define I40E_AQ_CLUSTER_ID_MAC2 6
+#define I40E_AQ_CLUSTER_ID_MAC3 7
+#define I40E_AQ_CLUSTER_ID_DCB 8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM 9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF 10
+
+struct i40e_aqc_debug_dump_internals {
+ u8 cluster_id;
+ u8 table_id;
+ __le16 data_size;
+ __le32 idx;
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+ u8 cluster_id;
+ u8 cluster_specific_params[7];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
new file mode 100644
index 00000000000..3b1cc214f9d
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -0,0 +1,59 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+ i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
+ i40e_mem_asq_buf = 1,
+ i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
+ i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
+ i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
+ i40e_mem_pd = 5, /* Page Descriptor */
+ i40e_mem_bp = 6, /* Backing Page - 4KB */
+ i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
+ i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
new file mode 100644
index 00000000000..1e4ea134975
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -0,0 +1,2041 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+
+ if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (hw->device_id) {
+ case I40E_SFP_XL710_DEVICE_ID:
+ case I40E_SFP_X710_DEVICE_ID:
+ case I40E_QEMU_DEVICE_ID:
+ case I40E_KX_A_DEVICE_ID:
+ case I40E_KX_B_DEVICE_ID:
+ case I40E_KX_C_DEVICE_ID:
+ case I40E_KX_D_DEVICE_ID:
+ case I40E_QSFP_A_DEVICE_ID:
+ case I40E_QSFP_B_DEVICE_ID:
+ case I40E_QSFP_C_DEVICE_ID:
+ hw->mac.type = I40E_MAC_XL710;
+ break;
+ case I40E_VF_DEVICE_ID:
+ case I40E_VF_HV_DEVICE_ID:
+ hw->mac.type = I40E_MAC_VF;
+ break;
+ default:
+ hw->mac.type = I40E_MAC_GENERIC;
+ break;
+ }
+ } else {
+ status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
+ hw->mac.type, status);
+ return status;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @cap: pointer to adminq command descriptor
+ * @buffer: pointer to command buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+ void *buffer)
+{
+ struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u8 *aq_buffer = (u8 *)buffer;
+ u32 data[4];
+ u32 i = 0;
+
+ if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ return;
+
+ i40e_debug(hw, mask,
+ "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+ aq_desc->opcode, aq_desc->flags, aq_desc->datalen,
+ aq_desc->retval);
+ i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ aq_desc->cookie_high, aq_desc->cookie_low);
+ i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ aq_desc->params.internal.param0,
+ aq_desc->params.internal.param1);
+ i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ aq_desc->params.external.addr_high,
+ aq_desc->params.external.addr_low);
+
+ if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ memset(data, 0, sizeof(data));
+ i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+ for (i = 0; i < le16_to_cpu(aq_desc->datalen); i++) {
+ data[((i % 16) / 4)] |=
+ ((u32)aq_buffer[i]) << (8 * (i % 4));
+ if ((i % 16) == 15) {
+ i40e_debug(hw, mask,
+ "\t0x%04X %08X %08X %08X %08X\n",
+ i - 15, data[0], data[1], data[2],
+ data[3]);
+ memset(data, 0, sizeof(data));
+ }
+ }
+ if ((i % 16) != 0)
+ i40e_debug(hw, mask, "\t0x%04X %08X %08X %08X %08X\n",
+ i - (i % 16), data[0], data[1], data[2],
+ data[3]);
+ }
+}
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function. The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+ u32 reg;
+
+ hw->phy.get_link_info = true;
+
+ /* Determine port number */
+ reg = rd32(hw, I40E_PFGEN_PORTNUM);
+ reg = ((reg & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) >>
+ I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT);
+ hw->port = (u8)reg;
+
+ i40e_set_mac_type(hw);
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ break;
+ default:
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ break;
+ }
+
+ status = i40e_init_nvm(hw);
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_read *cmd_data =
+ (struct i40e_aqc_mac_address_read *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
+
+ status = i40e_asq_send_command(hw, &desc, addrs,
+ sizeof(*addrs), cmd_details);
+ *flags = le16_to_cpu(cmd_data->command_flags);
+
+ return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_mac_address_write *cmd_data =
+ (struct i40e_aqc_mac_address_write *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_mac_address_write);
+ cmd_data->command_flags = cpu_to_le16(flags);
+ memcpy(&cmd_data->mac_sal, &mac_addr[0], 4);
+ memcpy(&cmd_data->mac_sah, &mac_addr[4], 2);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ i40e_status status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+ if (flags & I40E_AQC_LAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+ return status;
+}
+
+/**
+ * i40e_validate_mac_addr - Validate MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+i40e_status i40e_validate_mac_addr(u8 *mac_addr)
+{
+ i40e_status status = 0;
+
+ /* Make sure it is not a multicast address */
+ if (I40E_IS_MULTICAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is multicast\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ /* Not a broadcast address */
+ } else if (I40E_IS_BROADCAST(mac_addr)) {
+ hw_dbg(hw, "MAC address is broadcast\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ /* Reject the zero address */
+ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+ hw_dbg(hw, "MAC address is all zeros\n");
+ status = I40E_ERR_INVALID_MAC_ADDR;
+ }
+ return status;
+}
+
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+i40e_status i40e_pf_reset(struct i40e_hw *hw)
+{
+ u32 wait_cnt = 0;
+ u32 reg = 0;
+ u32 grst_del;
+
+ /* Poll for Global Reset steady state in case of recent GRST.
+ * The grst delay value is in 100ms units, and we'll wait a
+ * couple counts longer to be sure we don't just miss the end.
+ */
+ grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
+ >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+ for (wait_cnt = 0; wait_cnt < grst_del + 2; wait_cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ break;
+ msleep(100);
+ }
+ if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Global reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+
+ /* Determine the PF number based on the PCI fn */
+ hw->pf_id = (u8)hw->bus.func;
+
+ /* If there was a Global Reset in progress when we got here,
+ * we don't need to do the PF Reset
+ */
+ if (!wait_cnt) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ wr32(hw, I40E_PFGEN_CTRL,
+ (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ for (wait_cnt = 0; wait_cnt < 10; wait_cnt++) {
+ reg = rd32(hw, I40E_PFGEN_CTRL);
+ if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ hw_dbg(hw, "PF reset polling failed to complete.\n");
+ return I40E_ERR_RESET_FAILED;
+ }
+ }
+
+ i40e_clear_pxe_mode(hw);
+ return 0;
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ /* Clear single descriptor fetch/write-back mode */
+ reg = rd32(hw, I40E_GLLAN_RCTL_0);
+ wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK));
+}
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+ u32 gpio_val = 0;
+ u32 mode = 0;
+ u32 port;
+ int i;
+
+ for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+ if (!hw->func_caps.led[i])
+ continue;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+ >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ if (port != hw->port)
+ continue;
+
+ mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+ >> I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT;
+ break;
+ }
+
+ return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, else on (see EAS for mode details)
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode)
+{
+ u32 gpio_val = 0;
+ u32 led_mode = 0;
+ u32 port;
+ int i;
+
+ for (i = 0; i < I40E_HW_CAP_MAX_GPIO; i++) {
+ if (!hw->func_caps.led[i])
+ continue;
+
+ gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(i));
+ port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK)
+ >> I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+ if (port != hw->port)
+ continue;
+
+ led_mode = (mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+ I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+ gpio_val |= led_mode;
+ wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+ }
+}
+
+/* Admin command wrappers */
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_queue_shutdown *cmd =
+ (struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_queue_shutdown);
+
+ if (unloading)
+ cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_link_restart_an *cmd =
+ (struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_link_restart_an);
+
+ cmd->command = I40E_AQ_PHY_RESTART_AN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_link_status *resp =
+ (struct i40e_aqc_get_link_status *)&desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ i40e_status status;
+ u16 command_flags;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+ if (enable_lse)
+ command_flags = I40E_AQ_LSE_ENABLE;
+ else
+ command_flags = I40E_AQ_LSE_DISABLE;
+ resp->command_flags = cpu_to_le16(command_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status)
+ goto aq_get_link_info_exit;
+
+ /* save off old link status information */
+ memcpy(&hw->phy.link_info_old, hw_link_info,
+ sizeof(struct i40e_link_status));
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+ hw_link_info->link_info = resp->link_info;
+ hw_link_info->an_info = resp->an_info;
+ hw_link_info->ext_info = resp->ext_info;
+
+ if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
+ hw_link_info->lse_enable = true;
+ else
+ hw_link_info->lse_enable = false;
+
+ /* save link status information */
+ if (link)
+ *link = *hw_link_info;
+
+ /* flag cleared so helper functions don't call AQ again */
+ hw->phy.get_link_info = false;
+
+aq_get_link_info_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_get_update_vsi *cmd =
+ (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_vsi);
+
+ cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid);
+ cmd->connection_type = vsi_ctx->connection_type;
+ cmd->vf_id = vsi_ctx->vf_num;
+ cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ if (status)
+ goto aq_add_vsi_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_add_vsi_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: true to set filter, false to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set_filter)
+ cmd->promiscuous_flags
+ |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ else
+ cmd->promiscuous_flags
+ &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_vsi_parameters);
+
+ cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), NULL);
+
+ if (status)
+ goto aq_get_vsi_params_exit;
+
+ vsi_ctx->seid = le16_to_cpu(resp->seid);
+ vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_update_vsi_parameters);
+ cmd->seid = cpu_to_le16(vsi_ctx->seid);
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info), cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *scfg =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_switch_config);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ scfg->seid = cpu_to_le16(*start_seid);
+
+ status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+ *start_seid = le16_to_cpu(scfg->seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_version *resp =
+ (struct i40e_aqc_get_version *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status) {
+ if (fw_major_version != NULL)
+ *fw_major_version = le16_to_cpu(resp->fw_major);
+ if (fw_minor_version != NULL)
+ *fw_minor_version = le16_to_cpu(resp->fw_minor);
+ if (api_major_version != NULL)
+ *api_major_version = le16_to_cpu(resp->api_major);
+ if (api_minor_version != NULL)
+ *api_minor_version = le16_to_cpu(resp->api_minor);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @event: driver event: driver ok, start or stop
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_driver_version *cmd =
+ (struct i40e_aqc_driver_version *)&desc.params.raw;
+ i40e_status status;
+
+ if (dv == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+ desc.flags |= cpu_to_le16(I40E_AQ_FLAG_SI);
+ cmd->driver_major_ver = dv->major_version;
+ cmd->driver_minor_ver = dv->minor_version;
+ cmd->driver_build_ver = dv->build_version;
+ cmd->driver_subbuild_ver = dv->subbuild_version;
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if link is up, false if link is down.
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+bool i40e_get_link_status(struct i40e_hw *hw)
+{
+ i40e_status status = 0;
+ bool link_status = false;
+
+ if (hw->phy.get_link_info) {
+ status = i40e_aq_get_link_info(hw, true, NULL, NULL);
+
+ if (status)
+ goto i40e_get_link_status_exit;
+ }
+
+ link_status = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+i40e_get_link_status_exit:
+ return link_status;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: true for default port VSI, false for control port
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements. If the uplink SEID is 0, this will be a floating VEB.
+ **/
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_veb *cmd =
+ (struct i40e_aqc_add_veb *)&desc.params.raw;
+ struct i40e_aqc_add_veb_completion *resp =
+ (struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+ i40e_status status;
+ u16 veb_flags = 0;
+
+ /* SEIDs need to either both be set or both be 0 for floating VEB */
+ if (!!uplink_seid != !!downlink_seid)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+ cmd->uplink_seid = cpu_to_le16(uplink_seid);
+ cmd->downlink_seid = cpu_to_le16(downlink_seid);
+ cmd->enable_tcs = enabled_tc;
+ if (!uplink_seid)
+ veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+ if (default_port)
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+ else
+ veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+ cmd->veb_flags = cpu_to_le16(veb_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (!status && veb_seid)
+ *veb_seid = le16_to_cpu(resp->veb_seid);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating_veb: set to true if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_unallocated: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+ (struct i40e_aqc_get_veb_parameters_completion *)
+ &desc.params.raw;
+ i40e_status status;
+
+ if (veb_seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_veb_parameters);
+ cmd_resp->seid = cpu_to_le16(veb_seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ if (status)
+ goto get_veb_exit;
+
+ if (switch_id)
+ *switch_id = le16_to_cpu(cmd_resp->switch_id);
+ if (statistic_index)
+ *statistic_index = le16_to_cpu(cmd_resp->statistic_index);
+ if (vebs_used)
+ *vebs_used = le16_to_cpu(cmd_resp->vebs_used);
+ if (vebs_free)
+ *vebs_free = le16_to_cpu(cmd_resp->vebs_free);
+ if (floating) {
+ u16 flags = le16_to_cpu(cmd_resp->veb_flags);
+ if (flags & I40E_AQC_ADD_VEB_FLOATING)
+ *floating = true;
+ else
+ *floating = false;
+ }
+
+get_veb_exit:
+ return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_remove_macvlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_macvlan *cmd =
+ (struct i40e_aqc_macvlan *)&desc.params.raw;
+ i40e_status status;
+ u16 buf_size;
+
+ if (count == 0 || !v_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(struct i40e_aqc_add_remove_vlan_element_data);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_pf_vf_message *cmd =
+ (struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+ cmd->id = cpu_to_le32(vfid);
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI);
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ }
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aq_get_set_hmc_resource_profile *cmd =
+ (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_hmc_resource_profile);
+
+ cmd->pm_profile = (u8)profile;
+ cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd_resp =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+ cmd_resp->resource_id = cpu_to_le16(resource);
+ cmd_resp->access_type = cpu_to_le16(access);
+ cmd_resp->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by someone else, the command completes with
+ * busy return value and the timeout field indicates the maximum time
+ * the current owner of the resource has to free it.
+ */
+ if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+ *timeout = le32_to_cpu(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_request_resource *cmd =
+ (struct i40e_aqc_request_resource *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+ cmd->resource_id = cpu_to_le16(resource);
+ cmd->resource_number = cpu_to_le32(sdp_number);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_nvm_update *cmd =
+ (struct i40e_aqc_nvm_update *)&desc.params.raw;
+ i40e_status status;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000) {
+ status = I40E_ERR_PARAM;
+ goto i40e_aq_read_nvm_exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+ cmd->module_pointer = module_pointer;
+ cmd->offset = cpu_to_le32(offset);
+ cmd->length = cpu_to_le16(length);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (length > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+ return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02
+#define I40E_DEV_FUNC_CAP_NPAR 0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC 0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12
+#define I40E_DEV_FUNC_CAP_VF 0x13
+#define I40E_DEV_FUNC_CAP_VMDQ 0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16
+#define I40E_DEV_FUNC_CAP_VSI 0x17
+#define I40E_DEV_FUNC_CAP_DCB 0x18
+#define I40E_DEV_FUNC_CAP_FCOE 0x21
+#define I40E_DEV_FUNC_CAP_RSS 0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42
+#define I40E_DEV_FUNC_CAP_MSIX 0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46
+#define I40E_DEV_FUNC_CAP_MFP_MODE_1 0xF1
+#define I40E_DEV_FUNC_CAP_CEM 0xF2
+#define I40E_DEV_FUNC_CAP_IWARP 0x51
+#define I40E_DEV_FUNC_CAP_LED 0x61
+#define I40E_DEV_FUNC_CAP_SDP 0x62
+#define I40E_DEV_FUNC_CAP_MDIO 0x63
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+ u32 cap_count,
+ enum i40e_admin_queue_opc list_type_opc)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap;
+ u32 number, logical_id, phys_id;
+ struct i40e_hw_capabilities *p;
+ u32 reg_val;
+ u32 i = 0;
+ u16 id;
+
+ cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+ if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+ else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+ p = (struct i40e_hw_capabilities *)&hw->func_caps;
+ else
+ return;
+
+ for (i = 0; i < cap_count; i++, cap++) {
+ id = le16_to_cpu(cap->id);
+ number = le32_to_cpu(cap->number);
+ logical_id = le32_to_cpu(cap->logical_id);
+ phys_id = le32_to_cpu(cap->phys_id);
+
+ switch (id) {
+ case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+ p->switch_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MGMT_MODE:
+ p->management_mode = number;
+ break;
+ case I40E_DEV_FUNC_CAP_NPAR:
+ p->npar_enable = number;
+ break;
+ case I40E_DEV_FUNC_CAP_OS2BMC:
+ p->os2bmc = number;
+ break;
+ case I40E_DEV_FUNC_CAP_VALID_FUNC:
+ p->valid_functions = number;
+ break;
+ case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+ if (number == 1)
+ p->sr_iov_1_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VF:
+ p->num_vfs = number;
+ p->vf_base_id = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_VMDQ:
+ if (number == 1)
+ p->vmdq = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBG:
+ if (number == 1)
+ p->evb_802_1_qbg = true;
+ break;
+ case I40E_DEV_FUNC_CAP_802_1_QBH:
+ if (number == 1)
+ p->evb_802_1_qbh = true;
+ break;
+ case I40E_DEV_FUNC_CAP_VSI:
+ p->num_vsis = number;
+ break;
+ case I40E_DEV_FUNC_CAP_DCB:
+ if (number == 1) {
+ p->dcb = true;
+ p->enabled_tcmap = logical_id;
+ p->maxtc = phys_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_FCOE:
+ if (number == 1)
+ p->fcoe = true;
+ break;
+ case I40E_DEV_FUNC_CAP_RSS:
+ p->rss = true;
+ reg_val = rd32(hw, I40E_PFQF_CTL_0);
+ if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
+ p->rss_table_size = number;
+ else
+ p->rss_table_size = 128;
+ p->rss_table_entry_width = logical_id;
+ break;
+ case I40E_DEV_FUNC_CAP_RX_QUEUES:
+ p->num_rx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_TX_QUEUES:
+ p->num_tx_qp = number;
+ p->base_queue = phys_id;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX:
+ p->num_msix_vectors = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MSIX_VF:
+ p->num_msix_vectors_vf = number;
+ break;
+ case I40E_DEV_FUNC_CAP_MFP_MODE_1:
+ if (number == 1)
+ p->mfp_mode_1 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_CEM:
+ if (number == 1)
+ p->mgmt_cem = true;
+ break;
+ case I40E_DEV_FUNC_CAP_IWARP:
+ if (number == 1)
+ p->iwarp = true;
+ break;
+ case I40E_DEV_FUNC_CAP_LED:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->led[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_SDP:
+ if (phys_id < I40E_HW_CAP_MAX_GPIO)
+ p->sdp[phys_id] = true;
+ break;
+ case I40E_DEV_FUNC_CAP_MDIO:
+ if (number == 1) {
+ p->mdio_port_num = phys_id;
+ p->mdio_port_mode = logical_id;
+ }
+ break;
+ case I40E_DEV_FUNC_CAP_IEEE_1588:
+ if (number == 1)
+ p->ieee_1588 = true;
+ break;
+ case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+ p->fd = true;
+ p->fd_filters_guaranteed = number;
+ p->fd_filters_best_effort = logical_id;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* additional HW specific goodies that might
+ * someday be HW version specific
+ */
+ p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_list_capabilites *cmd;
+ i40e_status status = 0;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+ if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+ list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+ status = I40E_ERR_PARAM;
+ goto exit;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ *data_size = le16_to_cpu(desc.datalen);
+
+ if (status)
+ goto exit;
+
+ i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count),
+ list_type_opc);
+
+exit:
+ return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_get_mib *cmd =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ struct i40e_aqc_lldp_get_mib *resp =
+ (struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+ i40e_status status;
+
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+
+ cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+ cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+ I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ if (local_len != NULL)
+ *local_len = le16_to_cpu(resp->local_len);
+ if (remote_len != NULL)
+ *remote_len = le16_to_cpu(resp->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_update_mib *cmd =
+ (struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+ if (!enable_update)
+ cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_stop *cmd =
+ (struct i40e_aqc_lldp_stop *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+ if (shutdown_agent)
+ cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_start *cmd =
+ (struct i40e_aqc_lldp_start *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+ cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_switch_seid *cmd =
+ (struct i40e_aqc_switch_seid *)&desc.params.raw;
+ i40e_status status;
+
+ if (seid == 0)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+ void *buff, u16 buff_size,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_tx_sched_ind *cmd =
+ (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ i40e_status status;
+ bool cmd_param_flag = false;
+
+ switch (opcode) {
+ case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+ case i40e_aqc_opc_configure_vsi_tc_bw:
+ case i40e_aqc_opc_enable_switching_comp_ets:
+ case i40e_aqc_opc_modify_switching_comp_ets:
+ case i40e_aqc_opc_disable_switching_comp_ets:
+ case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+ case i40e_aqc_opc_configure_switching_comp_bw_config:
+ cmd_param_flag = true;
+ break;
+ case i40e_aqc_opc_query_vsi_bw_config:
+ case i40e_aqc_opc_query_vsi_ets_sla_config:
+ case i40e_aqc_opc_query_switching_comp_ets_config:
+ case i40e_aqc_opc_query_port_ets_config:
+ case i40e_aqc_opc_query_switching_comp_bw_config:
+ cmd_param_flag = false;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+ /* Indirect command */
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (cmd_param_flag)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_configure_vsi_tc_bw,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_vsi_ets_sla_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_port_ets_config,
+ cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+ i40e_aqc_opc_query_switching_comp_bw_config,
+ cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns 0 if the values passed are valid and within
+ * range else returns an error.
+ **/
+static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ u32 fcoe_cntx_size, fcoe_filt_size;
+ u32 pe_cntx_size, pe_filt_size;
+ u32 fcoe_fmax, pe_fmax;
+ u32 val;
+
+ /* Validate FCoE settings passed */
+ switch (settings->fcoe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->fcoe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* Validate PE settings passed */
+ switch (settings->pe_filt_num) {
+ case I40E_HASH_FILTER_SIZE_1K:
+ case I40E_HASH_FILTER_SIZE_2K:
+ case I40E_HASH_FILTER_SIZE_4K:
+ case I40E_HASH_FILTER_SIZE_8K:
+ case I40E_HASH_FILTER_SIZE_16K:
+ case I40E_HASH_FILTER_SIZE_32K:
+ case I40E_HASH_FILTER_SIZE_64K:
+ case I40E_HASH_FILTER_SIZE_128K:
+ case I40E_HASH_FILTER_SIZE_256K:
+ case I40E_HASH_FILTER_SIZE_512K:
+ case I40E_HASH_FILTER_SIZE_1M:
+ pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+ pe_filt_size <<= (u32)settings->pe_filt_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ switch (settings->pe_cntx_num) {
+ case I40E_DMA_CNTX_SIZE_512:
+ case I40E_DMA_CNTX_SIZE_1K:
+ case I40E_DMA_CNTX_SIZE_2K:
+ case I40E_DMA_CNTX_SIZE_4K:
+ case I40E_DMA_CNTX_SIZE_8K:
+ case I40E_DMA_CNTX_SIZE_16K:
+ case I40E_DMA_CNTX_SIZE_32K:
+ case I40E_DMA_CNTX_SIZE_64K:
+ case I40E_DMA_CNTX_SIZE_128K:
+ case I40E_DMA_CNTX_SIZE_256K:
+ pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+ pe_cntx_size <<= (u32)settings->pe_cntx_num;
+ break;
+ default:
+ return I40E_ERR_PARAM;
+ }
+
+ /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+ val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+ >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+ if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
+ val = rd32(hw, I40E_GLHMC_PEXFMAX);
+ pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
+ >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
+ if (pe_filt_size + pe_cntx_size > pe_fmax)
+ return I40E_ERR_INVALID_SIZE;
+
+ return 0;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
+{
+ i40e_status ret = 0;
+ u32 hash_lut_size = 0;
+ u32 val;
+
+ if (!settings)
+ return I40E_ERR_PARAM;
+
+ /* Validate the input settings */
+ ret = i40e_validate_filter_settings(hw, settings);
+ if (ret)
+ return ret;
+
+ /* Read the PF Queue Filter control register */
+ val = rd32(hw, I40E_PFQF_CTL_0);
+
+ /* Program required PE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEHSIZE_MASK;
+ /* Program required PE contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+ val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+ /* Program required FCoE hash buckets for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ val |= ((u32)settings->fcoe_filt_num <<
+ I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+ /* Program required FCoE DDP contexts for the PF */
+ val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+ val |= ((u32)settings->fcoe_cntx_num <<
+ I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+ /* Program Hash LUT size for the PF */
+ val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+ if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+ hash_lut_size = 1;
+ val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+ I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+ /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+ if (settings->enable_fdir)
+ val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+ if (settings->enable_ethtype)
+ val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+ if (settings->enable_macvlan)
+ val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+ wr32(hw, I40E_PFQF_CTL_0, val);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
new file mode 100644
index 00000000000..8dbd91f64b7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -0,0 +1,2076 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#include "i40e.h"
+
+static struct dentry *i40e_dbg_root;
+
+/**
+ * i40e_dbg_find_vsi - searches for the vsi with the given seid
+ * @pf - the pf structure to search for the vsi
+ * @seid - seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ if (seid < 0)
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ else
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
+ return pf->vsi[i];
+
+ return NULL;
+}
+
+/**
+ * i40e_dbg_find_veb - searches for the veb with the given seid
+ * @pf - the pf structure to search for the veb
+ * @seid - seid of the veb it is searching for
+ **/
+static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
+{
+ int i;
+
+ if ((seid < I40E_BASE_VEB_SEID) ||
+ (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ else
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == seid)
+ return pf->veb[i];
+ return NULL;
+}
+
+/**************************************************************
+ * dump
+ * The dump entry in debugfs is for getting a data snapshow of
+ * the driver's current configuration and runtime details.
+ * When the filesystem entry is written, a snapshot is taken.
+ * When the entry is read, the most recent snapshot data is dumped.
+ **************************************************************/
+static char *i40e_dbg_dump_buf;
+static ssize_t i40e_dbg_dump_data_len;
+static ssize_t i40e_dbg_dump_buffer_len;
+
+/**
+ * i40e_dbg_dump_read - read the dump data
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int bytes_not_copied;
+ int len;
+
+ /* is *ppos bigger than the available data? */
+ if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
+ return 0;
+
+ /* be sure to not read beyond the end of available data */
+ len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
+
+ bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos += len;
+ return len;
+}
+
+/**
+ * i40e_dbg_prep_dump_buf
+ * @pf: the pf we're working with
+ * @buflen: the desired buffer length
+ *
+ * Return positive if success, 0 if failed
+ **/
+static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
+{
+ /* if not already big enough, prep for re alloc */
+ if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buffer_len = 0;
+ i40e_dbg_dump_buf = NULL;
+ }
+
+ /* get a new buffer if needed */
+ if (!i40e_dbg_dump_buf) {
+ i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
+ if (i40e_dbg_dump_buf != NULL)
+ i40e_dbg_dump_buffer_len = buflen;
+ }
+
+ return i40e_dbg_dump_buffer_len;
+}
+
+/**
+ * i40e_dbg_dump_write - trigger a datadump snapshot
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ *
+ * Any write clears the stats
+ **/
+static ssize_t i40e_dbg_dump_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ char dump_request_buf[16];
+ bool seid_found = false;
+ int bytes_not_copied;
+ long seid = -1;
+ int buflen = 0;
+ int i, ret;
+ int len;
+ u8 *p;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(dump_request_buf))
+ return -ENOSPC;
+
+ bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ dump_request_buf[count] = '\0';
+
+ /* decode the SEID given to be dumped */
+ ret = kstrtol(dump_request_buf, 0, &seid);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
+ dump_request_buf);
+ } else if (seid == 0) {
+ seid_found = true;
+
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buffer_len = 0;
+ i40e_dbg_dump_data_len = 0;
+ i40e_dbg_dump_buf = NULL;
+ dev_info(&pf->pdev->dev, "debug buffer freed\n");
+
+ } else if (seid == pf->pf_seid || seid == 1) {
+ seid_found = true;
+
+ buflen = sizeof(struct i40e_pf);
+ buflen += (sizeof(struct i40e_aq_desc)
+ * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
+
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ p = i40e_dbg_dump_buf;
+
+ len = sizeof(struct i40e_pf);
+ memcpy(p, pf, len);
+ p += len;
+
+ len = (sizeof(struct i40e_aq_desc)
+ * pf->hw.aq.num_asq_entries);
+ memcpy(p, pf->hw.aq.asq.desc, len);
+ p += len;
+
+ len = (sizeof(struct i40e_aq_desc)
+ * pf->hw.aq.num_arq_entries);
+ memcpy(p, pf->hw.aq.arq.desc, len);
+ p += len;
+
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "PF seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ } else if (seid >= I40E_BASE_VSI_SEID) {
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_mac_filter *f;
+ int filter_count = 0;
+
+ mutex_lock(&pf->switch_mutex);
+ vsi = i40e_dbg_find_vsi(pf, seid);
+ if (!vsi) {
+ mutex_unlock(&pf->switch_mutex);
+ goto write_exit;
+ }
+
+ buflen = sizeof(struct i40e_vsi);
+ buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
+ buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
+ buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
+ buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ filter_count++;
+ buflen += sizeof(struct i40e_mac_filter) * filter_count;
+
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ p = i40e_dbg_dump_buf;
+ seid_found = true;
+
+ len = sizeof(struct i40e_vsi);
+ memcpy(p, vsi, len);
+ p += len;
+
+ len = (sizeof(struct i40e_q_vector)
+ * vsi->num_q_vectors);
+ memcpy(p, vsi->q_vectors, len);
+ p += len;
+
+ len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
+ memcpy(p, vsi->tx_rings, len);
+ p += len;
+ memcpy(p, vsi->rx_rings, len);
+ p += len;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ len = sizeof(struct i40e_tx_buffer);
+ memcpy(p, vsi->tx_rings[i].tx_bi, len);
+ p += len;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ len = sizeof(struct i40e_rx_buffer);
+ memcpy(p, vsi->rx_rings[i].rx_bi, len);
+ p += len;
+ }
+
+ /* macvlan filter list */
+ len = sizeof(struct i40e_mac_filter);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ memcpy(p, f, len);
+ p += len;
+ }
+
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "VSI seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ mutex_unlock(&pf->switch_mutex);
+ } else if (seid >= I40E_BASE_VEB_SEID) {
+ struct i40e_veb *veb = NULL;
+
+ mutex_lock(&pf->switch_mutex);
+ veb = i40e_dbg_find_veb(pf, seid);
+ if (!veb) {
+ mutex_unlock(&pf->switch_mutex);
+ goto write_exit;
+ }
+
+ buflen = sizeof(struct i40e_veb);
+ if (i40e_dbg_prep_dump_buf(pf, buflen)) {
+ seid_found = true;
+ memcpy(i40e_dbg_dump_buf, veb, buflen);
+ i40e_dbg_dump_data_len = buflen;
+ dev_info(&pf->pdev->dev,
+ "VEB seid %ld dumped %d bytes\n",
+ seid, (int)i40e_dbg_dump_data_len);
+ }
+ mutex_unlock(&pf->switch_mutex);
+ }
+
+write_exit:
+ if (!seid_found)
+ dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
+
+ return count;
+}
+
+static const struct file_operations i40e_dbg_dump_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_dump_read,
+ .write = i40e_dbg_dump_write,
+};
+
+/**************************************************************
+ * command
+ * The command entry in debugfs is for giving the driver commands
+ * to be executed - these may be for changing the internal switch
+ * setup, adding or removing filters, or other things. Many of
+ * these will be useful for some forms of unit testing.
+ **************************************************************/
+static char i40e_dbg_command_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_command_read - read for command datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_command_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into pokem datum
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
+{
+ struct rtnl_link_stats64 *nstat;
+ struct i40e_mac_filter *f;
+ struct i40e_vsi *vsi;
+ int i;
+
+ vsi = i40e_dbg_find_vsi(pf, seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "dump %d: seid not found\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev, "vsi seid %d\n", seid);
+ if (vsi->netdev)
+ dev_info(&pf->pdev->dev,
+ " netdev: name = %s\n",
+ vsi->netdev->name);
+ if (vsi->active_vlans)
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n",
+ vsi->netdev_registered,
+ vsi->current_netdev_flags, vsi->state, vsi->flags);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ dev_info(&pf->pdev->dev,
+ " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
+ f->macaddr, f->vlan, f->is_netdev, f->is_vf,
+ f->counter);
+ }
+ nstat = i40e_get_vsi_stats_struct(vsi);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (long unsigned int)nstat->rx_packets,
+ (long unsigned int)nstat->rx_bytes,
+ (long unsigned int)nstat->rx_errors,
+ (long unsigned int)nstat->rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (long unsigned int)nstat->tx_packets,
+ (long unsigned int)nstat->tx_bytes,
+ (long unsigned int)nstat->tx_errors,
+ (long unsigned int)nstat->tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats: multicast = %lu, collisions = %lu\n",
+ (long unsigned int)nstat->multicast,
+ (long unsigned int)nstat->collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (long unsigned int)nstat->rx_length_errors,
+ (long unsigned int)nstat->rx_over_errors,
+ (long unsigned int)nstat->rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (long unsigned int)nstat->rx_frame_errors,
+ (long unsigned int)nstat->rx_fifo_errors,
+ (long unsigned int)nstat->rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (long unsigned int)nstat->tx_aborted_errors,
+ (long unsigned int)nstat->tx_carrier_errors,
+ (long unsigned int)nstat->tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (long unsigned int)nstat->tx_heartbeat_errors,
+ (long unsigned int)nstat->tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
+ (long unsigned int)nstat->rx_compressed,
+ (long unsigned int)nstat->tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_packets,
+ (long unsigned int)vsi->net_stats_offsets.rx_bytes,
+ (long unsigned int)vsi->net_stats_offsets.rx_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_packets,
+ (long unsigned int)vsi->net_stats_offsets.tx_bytes,
+ (long unsigned int)vsi->net_stats_offsets.tx_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_dropped);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: multicast = %lu, collisions = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.multicast,
+ (long unsigned int)vsi->net_stats_offsets.collisions);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_length_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_over_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
+ (long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
+ (long unsigned int)vsi->net_stats_offsets.tx_window_errors);
+ dev_info(&pf->pdev->dev,
+ " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
+ (long unsigned int)vsi->net_stats_offsets.rx_compressed,
+ (long unsigned int)vsi->net_stats_offsets.tx_compressed);
+ dev_info(&pf->pdev->dev,
+ " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ vsi->tx_restart, vsi->tx_busy,
+ vsi->rx_buf_failed, vsi->rx_page_failed);
+ if (vsi->rx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: desc = %p\n",
+ i, vsi->rx_rings[i].desc);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+ i, vsi->rx_rings[i].dev,
+ vsi->rx_rings[i].netdev,
+ vsi->rx_rings[i].rx_bi);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, vsi->rx_rings[i].state,
+ vsi->rx_rings[i].queue_index,
+ vsi->rx_rings[i].reg_idx);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+ i, vsi->rx_rings[i].rx_hdr_len,
+ vsi->rx_rings[i].rx_buf_len,
+ vsi->rx_rings[i].dtype);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, vsi->rx_rings[i].hsplit,
+ vsi->rx_rings[i].next_to_use,
+ vsi->rx_rings[i].next_to_clean,
+ vsi->rx_rings[i].ring_active);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+ i, vsi->rx_rings[i].rx_stats.packets,
+ vsi->rx_rings[i].rx_stats.bytes,
+ vsi->rx_rings[i].rx_stats.non_eop_descs);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+ i,
+ vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
+ vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, vsi->rx_rings[i].size,
+ (long unsigned int)vsi->rx_rings[i].dma);
+ dev_info(&pf->pdev->dev,
+ " rx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, vsi->rx_rings[i].vsi,
+ vsi->rx_rings[i].q_vector);
+ }
+ }
+ if (vsi->tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: desc = %p\n",
+ i, vsi->tx_rings[i].desc);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+ i, vsi->tx_rings[i].dev,
+ vsi->tx_rings[i].netdev,
+ vsi->tx_rings[i].tx_bi);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+ i, vsi->tx_rings[i].state,
+ vsi->tx_rings[i].queue_index,
+ vsi->tx_rings[i].reg_idx);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: dtype = %d\n",
+ i, vsi->tx_rings[i].dtype);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i, vsi->tx_rings[i].hsplit,
+ vsi->tx_rings[i].next_to_use,
+ vsi->tx_rings[i].next_to_clean,
+ vsi->tx_rings[i].ring_active);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+ i, vsi->tx_rings[i].tx_stats.packets,
+ vsi->tx_rings[i].tx_stats.bytes,
+ vsi->tx_rings[i].tx_stats.restart_queue);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
+ i,
+ vsi->tx_rings[i].tx_stats.tx_busy,
+ vsi->tx_rings[i].tx_stats.completed,
+ vsi->tx_rings[i].tx_stats.tx_done_old);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+ i, vsi->tx_rings[i].size,
+ (long unsigned int)vsi->tx_rings[i].dma);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: vsi = %p, q_vector = %p\n",
+ i, vsi->tx_rings[i].vsi,
+ vsi->tx_rings[i].q_vector);
+ dev_info(&pf->pdev->dev,
+ " tx_rings[%i]: DCB tc = %d\n",
+ i, vsi->tx_rings[i].dcb_tc);
+ }
+ }
+ dev_info(&pf->pdev->dev,
+ " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
+ vsi->work_limit, vsi->rx_itr_setting,
+ ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed",
+ vsi->tx_itr_setting,
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed");
+ dev_info(&pf->pdev->dev,
+ " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+ if (vsi->q_vectors) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ dev_info(&pf->pdev->dev,
+ " q_vectors[%i]: base index = %ld\n",
+ i, ((long int)*vsi->q_vectors[i].rx.ring-
+ (long int)*vsi->q_vectors[0].rx.ring)/
+ sizeof(struct i40e_ring));
+ }
+ }
+ dev_info(&pf->pdev->dev,
+ " num_q_vectors = %i, base_vector = %i\n",
+ vsi->num_q_vectors, vsi->base_vector);
+ dev_info(&pf->pdev->dev,
+ " seid = %d, id = %d, uplink_seid = %d\n",
+ vsi->seid, vsi->id, vsi->uplink_seid);
+ dev_info(&pf->pdev->dev,
+ " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n",
+ vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc);
+ dev_info(&pf->pdev->dev, " type = %i\n", vsi->type);
+ dev_info(&pf->pdev->dev,
+ " info: valid_sections = 0x%04x, switch_id = 0x%04x\n",
+ vsi->info.valid_sections, vsi->info.switch_id);
+ dev_info(&pf->pdev->dev,
+ " info: sw_reserved[] = 0x%02x 0x%02x\n",
+ vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]);
+ dev_info(&pf->pdev->dev,
+ " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n",
+ vsi->info.sec_flags, vsi->info.sec_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n",
+ vsi->info.pvid, vsi->info.fcoe_pvid,
+ vsi->info.port_vlan_flags);
+ dev_info(&pf->pdev->dev,
+ " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1],
+ vsi->info.pvlan_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: ingress_table = 0x%08x, egress_table = 0x%08x\n",
+ vsi->info.ingress_table, vsi->info.egress_table);
+ dev_info(&pf->pdev->dev,
+ " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n",
+ vsi->info.cas_pv_tag, vsi->info.cas_pv_flags,
+ vsi->info.cas_pv_reserved);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[0], vsi->info.queue_mapping[1],
+ vsi->info.queue_mapping[2], vsi->info.queue_mapping[3],
+ vsi->info.queue_mapping[4], vsi->info.queue_mapping[5],
+ vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.queue_mapping[8], vsi->info.queue_mapping[9],
+ vsi->info.queue_mapping[10], vsi->info.queue_mapping[11],
+ vsi->info.queue_mapping[12], vsi->info.queue_mapping[13],
+ vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]);
+ dev_info(&pf->pdev->dev,
+ " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.tc_mapping[0], vsi->info.tc_mapping[1],
+ vsi->info.tc_mapping[2], vsi->info.tc_mapping[3],
+ vsi->info.tc_mapping[4], vsi->info.tc_mapping[5],
+ vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]);
+ dev_info(&pf->pdev->dev,
+ " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.queueing_opt_flags,
+ vsi->info.queueing_opt_reserved[0],
+ vsi->info.queueing_opt_reserved[1],
+ vsi->info.queueing_opt_reserved[2]);
+ dev_info(&pf->pdev->dev,
+ " info: up_enable_bits = 0x%02x\n",
+ vsi->info.up_enable_bits);
+ dev_info(&pf->pdev->dev,
+ " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n",
+ vsi->info.sched_reserved, vsi->info.outer_up_table);
+ dev_info(&pf->pdev->dev,
+ " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n",
+ vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1],
+ vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3],
+ vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5],
+ vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]);
+ dev_info(&pf->pdev->dev,
+ " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ vsi->info.qs_handle[0], vsi->info.qs_handle[1],
+ vsi->info.qs_handle[2], vsi->info.qs_handle[3],
+ vsi->info.qs_handle[4], vsi->info.qs_handle[5],
+ vsi->info.qs_handle[6], vsi->info.qs_handle[7]);
+ dev_info(&pf->pdev->dev,
+ " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n",
+ vsi->info.stat_counter_idx, vsi->info.sched_id);
+ dev_info(&pf->pdev->dev,
+ " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ vsi->info.resp_reserved[0], vsi->info.resp_reserved[1],
+ vsi->info.resp_reserved[2], vsi->info.resp_reserved[3],
+ vsi->info.resp_reserved[4], vsi->info.resp_reserved[5],
+ vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
+ vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
+ vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
+ if (vsi->back)
+ dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back);
+ dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
+ dev_info(&pf->pdev->dev,
+ " tc_config: numtc = %d, enabled_tc = 0x%x\n",
+ vsi->tc_config.numtc, vsi->tc_config.enabled_tc);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n",
+ i, vsi->tc_config.tc_info[i].qoffset,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].netdev_tc);
+ }
+ dev_info(&pf->pdev->dev,
+ " bw: bw_limit = %d, bw_max_quanta = %d\n",
+ vsi->bw_limit, vsi->bw_max_quanta);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev,
+ " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n",
+ i, vsi->bw_ets_share_credits[i],
+ vsi->bw_ets_limit_credits[i],
+ vsi->bw_ets_max_quanta[i]);
+ }
+}
+
+/**
+ * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
+{
+ struct i40e_adminq_ring *ring;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* first the send (command) ring, then the receive (event) ring */
+ dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n");
+ ring = &(hw->aq.asq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ dev_info(&pf->pdev->dev,
+ " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ dev_info(&pf->pdev->dev,
+ " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ d->params.raw[0], d->params.raw[1], d->params.raw[2],
+ d->params.raw[3], d->params.raw[4], d->params.raw[5],
+ d->params.raw[6], d->params.raw[7], d->params.raw[8],
+ d->params.raw[9], d->params.raw[10], d->params.raw[11],
+ d->params.raw[12], d->params.raw[13],
+ d->params.raw[14], d->params.raw[15]);
+ }
+
+ dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n");
+ ring = &(hw->aq.arq);
+ for (i = 0; i < ring->count; i++) {
+ struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
+ dev_info(&pf->pdev->dev,
+ " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
+ i, d->flags, d->opcode, d->datalen, d->retval,
+ d->cookie_high, d->cookie_low);
+ dev_info(&pf->pdev->dev,
+ " %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ d->params.raw[0], d->params.raw[1], d->params.raw[2],
+ d->params.raw[3], d->params.raw[4], d->params.raw[5],
+ d->params.raw[6], d->params.raw[7], d->params.raw[8],
+ d->params.raw[9], d->params.raw[10], d->params.raw[11],
+ d->params.raw[12], d->params.raw[13],
+ d->params.raw[14], d->params.raw[15]);
+ }
+}
+
+/**
+ * i40e_dbg_dump_desc - handles dump desc write into command datum
+ * @cnt: number of arguments that the user supplied
+ * @vsi_seid: vsi id entered by user
+ * @ring_id: ring id entered by user
+ * @desc_n: descriptor number entered by user
+ * @pf: the i40e_pf created in command write
+ * @is_rx_ring: true if rx, false if tx
+ **/
+static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
+ struct i40e_pf *pf, bool is_rx_ring)
+{
+ union i40e_rx_desc *ds;
+ struct i40e_ring ring;
+ struct i40e_vsi *vsi;
+ int i;
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "vsi %d not found\n", vsi_seid);
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
+ dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ return;
+ }
+ if (is_rx_ring)
+ ring = vsi->rx_rings[ring_id];
+ else
+ ring = vsi->tx_rings[ring_id];
+ if (cnt == 2) {
+ dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
+ vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+ for (i = 0; i < ring.count; i++) {
+ if (is_rx_ring)
+ ds = I40E_RX_DESC(&ring, i);
+ else
+ ds = (union i40e_rx_desc *)
+ I40E_TX_DESC(&ring, i);
+ if ((sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx\n", i,
+ ds->read.pkt_addr, ds->read.hdr_addr);
+ else
+ dev_info(&pf->pdev->dev,
+ " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ i, ds->read.pkt_addr,
+ ds->read.hdr_addr,
+ ds->read.rsvd1, ds->read.rsvd2);
+ }
+ } else if (cnt == 3) {
+ if (desc_n >= ring.count || desc_n < 0) {
+ dev_info(&pf->pdev->dev,
+ "descriptor %d not found\n", desc_n);
+ return;
+ }
+ if (is_rx_ring)
+ ds = I40E_RX_DESC(&ring, desc_n);
+ else
+ ds = (union i40e_rx_desc *)I40E_TX_DESC(&ring, desc_n);
+ if ((sizeof(union i40e_rx_desc) ==
+ sizeof(union i40e_16byte_rx_desc)) || (!is_rx_ring))
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i %s ring = %02i d[%03i] = 0x%016llx 0x%016llx\n",
+ vsi_seid, is_rx_ring ? "rx" : "tx", ring_id,
+ desc_n, ds->read.pkt_addr, ds->read.hdr_addr);
+ else
+ dev_info(&pf->pdev->dev,
+ "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ vsi_seid, ring_id,
+ desc_n, ds->read.pkt_addr, ds->read.hdr_addr,
+ ds->read.rsvd1, ds->read.rsvd2);
+ } else {
+ if (is_rx_ring)
+ dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ else
+ dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ }
+}
+
+/**
+ * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i])
+ dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
+ i, pf->vsi[i]->seid);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @estats: the eth stats structure to be dumped
+ **/
+static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
+ struct i40e_eth_stats *estats)
+{
+ dev_info(&pf->pdev->dev, " ethstats:\n");
+ dev_info(&pf->pdev->dev,
+ " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
+ estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
+ dev_info(&pf->pdev->dev,
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
+ estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+ dev_info(&pf->pdev->dev,
+ " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_missed, estats->rx_unknown_protocol,
+ estats->tx_bytes);
+ dev_info(&pf->pdev->dev,
+ " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
+ estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
+ dev_info(&pf->pdev->dev,
+ " tx_discards = \t%lld \ttx_errors = \t\t%lld\n",
+ estats->tx_discards, estats->tx_errors);
+}
+
+/**
+ * i40e_dbg_dump_stats - handles dump stats write into command datum
+ * @pf: the i40e_pf created in command write
+ * @stats: the stats structure to be dumped
+ **/
+static void i40e_dbg_dump_stats(struct i40e_pf *pf,
+ struct i40e_hw_port_stats *stats)
+{
+ int i;
+
+ dev_info(&pf->pdev->dev, " stats:\n");
+ dev_info(&pf->pdev->dev,
+ " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n",
+ stats->crc_errors, stats->illegal_bytes, stats->error_bytes);
+ dev_info(&pf->pdev->dev,
+ " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n",
+ stats->mac_local_faults, stats->mac_remote_faults,
+ stats->rx_length_errors);
+ dev_info(&pf->pdev->dev,
+ " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n",
+ stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx);
+ dev_info(&pf->pdev->dev,
+ " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n",
+ stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127);
+ dev_info(&pf->pdev->dev,
+ " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n",
+ stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023);
+ dev_info(&pf->pdev->dev,
+ " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n",
+ stats->rx_size_big, stats->rx_undersize, stats->rx_jabber);
+ dev_info(&pf->pdev->dev,
+ " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n",
+ stats->rx_fragments, stats->rx_oversize, stats->tx_size_64);
+ dev_info(&pf->pdev->dev,
+ " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n",
+ stats->tx_size_127, stats->tx_size_255, stats->tx_size_511);
+ dev_info(&pf->pdev->dev,
+ " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n",
+ stats->tx_size_1023, stats->tx_size_big,
+ stats->mac_short_packet_dropped);
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_rx[i],
+ i+1, stats->priority_xon_rx[i+1],
+ i+2, stats->priority_xon_rx[i+2],
+ i+3, stats->priority_xon_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xoff_rx[i],
+ i+1, stats->priority_xoff_rx[i+1],
+ i+2, stats->priority_xoff_rx[i+2],
+ i+3, stats->priority_xoff_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_tx[i],
+ i+1, stats->priority_xon_tx[i+1],
+ i+2, stats->priority_xon_tx[i+2],
+ i+3, stats->priority_xon_rx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xoff_tx[i],
+ i+1, stats->priority_xoff_tx[i+1],
+ i+2, stats->priority_xoff_tx[i+2],
+ i+3, stats->priority_xoff_tx[i+3]);
+ }
+ for (i = 0; i < 8; i += 4) {
+ dev_info(&pf->pdev->dev,
+ " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n",
+ i, stats->priority_xon_2_xoff[i],
+ i+1, stats->priority_xon_2_xoff[i+1],
+ i+2, stats->priority_xon_2_xoff[i+2],
+ i+3, stats->priority_xon_2_xoff[i+3]);
+ }
+
+ i40e_dbg_dump_eth_stats(pf, &stats->eth);
+}
+
+/**
+ * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb
+ * @pf: the i40e_pf created in command write
+ * @seid: the seid the user put in
+ **/
+static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
+{
+ struct i40e_veb *veb;
+
+ if ((seid < I40E_BASE_VEB_SEID) ||
+ (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
+ dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
+ return;
+ }
+
+ veb = i40e_dbg_find_veb(pf, seid);
+ if (!veb) {
+ dev_info(&pf->pdev->dev,
+ "%d: can't find veb\n", seid);
+ return;
+ }
+ dev_info(&pf->pdev->dev,
+ "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n",
+ veb->idx, veb->veb_idx, veb->stats_idx, veb->seid,
+ veb->uplink_seid);
+ i40e_dbg_dump_eth_stats(pf, &veb->stats);
+}
+
+/**
+ * i40e_dbg_dump_veb_all - dumps all known veb's stats
+ * @pf: the i40e_pf created in command write
+ **/
+static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
+{
+ struct i40e_veb *veb;
+ int i;
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ veb = pf->veb[i];
+ if (veb)
+ i40e_dbg_dump_veb_seid(pf, veb->seid);
+ }
+}
+
+#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
+/**
+ * i40e_dbg_command_write - write into command datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_command_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ u8 *print_buf_start;
+ u8 *print_buf;
+ char *cmd_buf;
+ int vsi_seid;
+ int veb_seid;
+ int cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+ bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ cmd_buf[count] = '\0';
+
+ print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
+ if (!print_buf_start)
+ goto command_write_done;
+ print_buf = print_buf_start;
+
+ if (strncmp(cmd_buf, "add vsi", 7) == 0) {
+ vsi_seid = -1;
+ cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ if (cnt == 0) {
+ /* default to PF VSI */
+ vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ } else if (vsi_seid < 0) {
+ dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
+ if (vsi)
+ dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
+ vsi->seid, vsi->uplink_seid);
+ else
+ dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
+
+ } else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
+ sscanf(&cmd_buf[7], "%i", &vsi_seid);
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid);
+ i40e_vsi_release(vsi);
+
+ } else if (strncmp(cmd_buf, "add relay", 9) == 0) {
+ struct i40e_veb *veb;
+ int uplink_seid, i;
+
+ cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (uplink_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "add relay %d: bad uplink seid\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add relay: vsi VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid)
+ break;
+ if (i >= I40E_MAX_VEB && uplink_seid != 0 &&
+ uplink_seid != pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "add relay: relay uplink %d not found\n",
+ uplink_seid);
+ goto command_write_done;
+ }
+
+ veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid,
+ vsi->tc_config.enabled_tc);
+ if (veb)
+ dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
+ else
+ dev_info(&pf->pdev->dev, "add relay failed\n");
+
+ } else if (strncmp(cmd_buf, "del relay", 9) == 0) {
+ int i;
+ cnt = sscanf(&cmd_buf[9], "%i", &veb_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del relay: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ } else if (veb_seid < 0) {
+ dev_info(&pf->pdev->dev,
+ "del relay %d: bad relay seid\n", veb_seid);
+ goto command_write_done;
+ }
+
+ /* find the veb */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && pf->veb[i]->seid == veb_seid)
+ break;
+ if (i >= I40E_MAX_VEB) {
+ dev_info(&pf->pdev->dev,
+ "del relay: relay %d not found\n", veb_seid);
+ goto command_write_done;
+ }
+
+ dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
+ i40e_veb_release(pf->veb[i]);
+
+ } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
+ u8 ma[6];
+ int vlan = 0;
+ struct i40e_mac_filter *f;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[11],
+ "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+ &vsi_seid,
+ &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+ &vlan);
+ if (cnt == 7) {
+ vlan = 0;
+ } else if (cnt != 8) {
+ dev_info(&pf->pdev->dev,
+ "add macaddr: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "add macaddr: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ f = i40e_add_filter(vsi, ma, vlan, false, false);
+ ret = i40e_sync_vsi_filters(vsi);
+ if (f && !ret)
+ dev_info(&pf->pdev->dev,
+ "add macaddr: %pM vlan=%d added to VSI %d\n",
+ ma, vlan, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n",
+ ma, vlan, vsi_seid, f, ret);
+
+ } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
+ u8 ma[6];
+ int vlan = 0;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[11],
+ "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i",
+ &vsi_seid,
+ &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5],
+ &vlan);
+ if (cnt == 7) {
+ vlan = 0;
+ } else if (cnt != 8) {
+ dev_info(&pf->pdev->dev,
+ "del macaddr: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "del macaddr: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ i40e_del_filter(vsi, ma, vlan, false, false);
+ ret = i40e_sync_vsi_filters(vsi);
+ if (!ret)
+ dev_info(&pf->pdev->dev,
+ "del macaddr: %pM vlan=%d removed from VSI %d\n",
+ ma, vlan, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n",
+ ma, vlan, vsi_seid, ret);
+
+ } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
+ int v;
+ u16 vid;
+ i40e_status ret;
+
+ cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev,
+ "add pvid: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+
+ vid = (unsigned)v;
+ ret = i40e_vsi_add_pvid(vsi, vid);
+ if (!ret)
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d added to VSI %d\n",
+ vid, vsi_seid);
+ else
+ dev_info(&pf->pdev->dev,
+ "add pvid: %d to VSI %d failed, ret=%d\n",
+ vid, vsi_seid, ret);
+
+ } else if (strncmp(cmd_buf, "del pvid", 8) == 0) {
+
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: bad command string, cnt=%d\n",
+ cnt);
+ goto command_write_done;
+ }
+
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "del pvid: VSI %d not found\n", vsi_seid);
+ goto command_write_done;
+ }
+
+ i40e_vsi_remove_pvid(vsi);
+ dev_info(&pf->pdev->dev,
+ "del pvid: removed from VSI %d\n", vsi_seid);
+
+ } else if (strncmp(cmd_buf, "dump", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "switch", 6) == 0) {
+ i40e_fetch_switch_configuration(pf, true);
+ } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_vsi_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_vsi_no_seid(pf);
+ } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) {
+ cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid);
+ if (cnt > 0)
+ i40e_dbg_dump_veb_seid(pf, vsi_seid);
+ else
+ i40e_dbg_dump_veb_all(pf);
+ } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) {
+ int ring_id, desc_n;
+ if (strncmp(&cmd_buf[10], "rx", 2) == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, true);
+ } else if (strncmp(&cmd_buf[10], "tx", 2)
+ == 0) {
+ cnt = sscanf(&cmd_buf[12], "%i %i %i",
+ &vsi_seid, &ring_id, &desc_n);
+ i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+ desc_n, pf, false);
+ } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
+ i40e_dbg_dump_aq_desc(pf);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev,
+ "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, "dump desc aq\n");
+ }
+ } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) {
+ dev_info(&pf->pdev->dev, "pf stats:\n");
+ i40e_dbg_dump_stats(pf, &pf->stats);
+ dev_info(&pf->pdev->dev, "pf stats_offsets:\n");
+ i40e_dbg_dump_stats(pf, &pf->stats_offsets);
+ } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
+ dev_info(&pf->pdev->dev,
+ "core reset count: %d\n", pf->corer_count);
+ dev_info(&pf->pdev->dev,
+ "global reset count: %d\n", pf->globr_count);
+ dev_info(&pf->pdev->dev,
+ "emp reset count: %d\n", pf->empr_count);
+ dev_info(&pf->pdev->dev,
+ "pf reset count: %d\n", pf->pfr_count);
+ } else if (strncmp(&cmd_buf[5], "port", 4) == 0) {
+ struct i40e_aqc_query_port_ets_config_resp *bw_data;
+ struct i40e_dcbx_config *cfg =
+ &pf->hw.local_dcbx_config;
+ struct i40e_dcbx_config *r_cfg =
+ &pf->hw.remote_dcbx_config;
+ int i, ret;
+
+ bw_data = kzalloc(sizeof(
+ struct i40e_aqc_query_port_ets_config_resp),
+ GFP_KERNEL);
+ if (!bw_data) {
+ ret = -ENOMEM;
+ goto command_write_done;
+ }
+
+ ret = i40e_aq_query_port_ets_config(&pf->hw,
+ pf->mac_seid,
+ bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Query Port ETS Config AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(bw_data);
+ bw_data = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n",
+ bw_data->tc_valid_bits,
+ bw_data->tc_strict_priority_bits,
+ le16_to_cpu(bw_data->tc_bw_max[0]),
+ le16_to_cpu(bw_data->tc_bw_max[1]));
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n",
+ bw_data->tc_bw_share_credits[i],
+ le16_to_cpu(bw_data->tc_bw_limits[i]));
+ }
+
+ kfree(bw_data);
+ bw_data = NULL;
+
+ dev_info(&pf->pdev->dev,
+ "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ cfg->etscfg.willing, cfg->etscfg.cbs,
+ cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etscfg.prioritytable[i],
+ cfg->etscfg.tcbwtable[i],
+ cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, cfg->etsrec.prioritytable[i],
+ cfg->etsrec.tcbwtable[i],
+ cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ cfg->pfc.willing, cfg->pfc.mbc,
+ cfg->pfc.pfccap, cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "port app_table: num_apps=%d\n", cfg->numapps);
+ for (i = 0; i < cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, cfg->app[i].priority,
+ cfg->app[i].selector,
+ cfg->app[i].protocolid);
+ }
+ /* Peer TLV DCBX data */
+ dev_info(&pf->pdev->dev,
+ "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n",
+ r_cfg->etscfg.willing,
+ r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etscfg.prioritytable[i],
+ r_cfg->etscfg.tcbwtable[i],
+ r_cfg->etscfg.tsatable[i]);
+ }
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n",
+ i, r_cfg->etsrec.prioritytable[i],
+ r_cfg->etsrec.tcbwtable[i],
+ r_cfg->etsrec.tsatable[i]);
+ }
+ dev_info(&pf->pdev->dev,
+ "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n",
+ r_cfg->pfc.willing,
+ r_cfg->pfc.mbc,
+ r_cfg->pfc.pfccap,
+ r_cfg->pfc.pfcenable);
+ dev_info(&pf->pdev->dev,
+ "remote port app_table: num_apps=%d\n",
+ r_cfg->numapps);
+ for (i = 0; i < r_cfg->numapps; i++) {
+ dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n",
+ i, r_cfg->app[i].priority,
+ r_cfg->app[i].selector,
+ r_cfg->app[i].protocolid);
+ }
+ } else {
+ dev_info(&pf->pdev->dev,
+ "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
+ dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n");
+ dev_info(&pf->pdev->dev, "dump stats\n");
+ dev_info(&pf->pdev->dev, "dump reset stats\n");
+ dev_info(&pf->pdev->dev, "dump port\n");
+ dev_info(&pf->pdev->dev,
+ "dump debug fwdata <cluster_id> <table_id> <index>\n");
+ }
+
+ } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) {
+ u32 level;
+ cnt = sscanf(&cmd_buf[10], "%i", &level);
+ if (cnt) {
+ if (I40E_DEBUG_USER & level) {
+ pf->hw.debug_mask = level;
+ dev_info(&pf->pdev->dev,
+ "set hw.debug_mask = 0x%08x\n",
+ pf->hw.debug_mask);
+ }
+ pf->msg_enable = level;
+ dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n",
+ pf->msg_enable);
+ } else {
+ dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n",
+ pf->msg_enable);
+ }
+ } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
+ dev_info(&pf->pdev->dev, "forcing PFR\n");
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "corer", 5) == 0) {
+ dev_info(&pf->pdev->dev, "forcing CoreR\n");
+ i40e_do_reset(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "globr", 5) == 0) {
+ dev_info(&pf->pdev->dev, "forcing GlobR\n");
+ i40e_do_reset(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+
+ } else if (strncmp(cmd_buf, "read", 4) == 0) {
+ u32 address;
+ u32 value;
+ cnt = sscanf(&cmd_buf[4], "%x", &address);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "read <reg>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address >= I40E_MAX_REGISTER) {
+ dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n",
+ address);
+ goto command_write_done;
+ }
+
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n",
+ address, value);
+
+ } else if (strncmp(cmd_buf, "write", 5) == 0) {
+ u32 address, value;
+ cnt = sscanf(&cmd_buf[5], "%x %x", &address, &value);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "write <reg> <value>\n");
+ goto command_write_done;
+ }
+
+ /* check the range on address */
+ if (address >= I40E_MAX_REGISTER) {
+ dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n",
+ address);
+ goto command_write_done;
+ }
+ wr32(&pf->hw, address, value);
+ value = rd32(&pf->hw, address);
+ dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n",
+ address, value);
+ } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) {
+ if (strncmp(&cmd_buf[12], "vsi", 3) == 0) {
+ cnt = sscanf(&cmd_buf[15], "%d", &vsi_seid);
+ if (cnt == 0) {
+ int i;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ i40e_vsi_reset_stats(pf->vsi[i]);
+ dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
+ } else if (cnt == 1) {
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "clear_stats vsi: bad vsi %d\n",
+ vsi_seid);
+ goto command_write_done;
+ }
+ i40e_vsi_reset_stats(vsi);
+ dev_info(&pf->pdev->dev,
+ "vsi clear stats called for vsi %d\n",
+ vsi_seid);
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n");
+ }
+ } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) {
+ i40e_pf_reset_stats(pf);
+ dev_info(&pf->pdev->dev, "pf clear stats called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n");
+ }
+ } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
+ (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
+ struct i40e_fdir_data fd_data;
+ int ret;
+ u16 packet_len, i, j = 0;
+ char *asc_packet;
+ bool add = false;
+
+ asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+ if (!asc_packet)
+ goto command_write_done;
+
+ fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+
+ if (!fd_data.raw_packet) {
+ kfree(asc_packet);
+ asc_packet = NULL;
+ goto command_write_done;
+ }
+
+ if (strncmp(cmd_buf, "add", 3) == 0)
+ add = true;
+ cnt = sscanf(&cmd_buf[13],
+ "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %512s",
+ &fd_data.q_index,
+ &fd_data.flex_off, &fd_data.pctype,
+ &fd_data.dest_vsi, &fd_data.dest_ctl,
+ &fd_data.fd_status, &fd_data.cnt_index,
+ &fd_data.fd_id, &packet_len, asc_packet);
+ if (cnt != 10) {
+ dev_info(&pf->pdev->dev,
+ "program fd_filter: bad command string, cnt=%d\n",
+ cnt);
+ kfree(asc_packet);
+ asc_packet = NULL;
+ kfree(fd_data.raw_packet);
+ goto command_write_done;
+ }
+
+ /* fix packet length if user entered 0 */
+ if (packet_len == 0)
+ packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP;
+
+ /* make sure to check the max as well */
+ packet_len = min_t(u16,
+ packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
+
+ dev_info(&pf->pdev->dev, "FD raw packet:\n");
+ for (i = 0; i < packet_len; i++) {
+ sscanf(&asc_packet[j], "%2hhx ",
+ &fd_data.raw_packet[i]);
+ j += 3;
+ snprintf(print_buf, 3, "%02x ", fd_data.raw_packet[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ ret = i40e_program_fdir_filter(&fd_data, pf, add);
+ if (!ret) {
+ dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed %d\n", ret);
+ }
+ kfree(fd_data.raw_packet);
+ fd_data.raw_packet = NULL;
+ kfree(asc_packet);
+ asc_packet = NULL;
+ } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
+ int ret;
+ ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Stop LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
+ int ret;
+ ret = i40e_aq_start_lldp(&pf->hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Start LLDP AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5],
+ "get local", 9) == 0) {
+ int ret, i;
+ u8 *buff;
+ u16 llen, rlen;
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw, 0,
+ I40E_AQ_LLDP_MIB_LOCAL,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (local) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (local) AQ buffer written back:\n");
+ for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+ snprintf(print_buf, 3, "%02x ", buff[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+ int ret, i;
+ u8 *buff;
+ u16 llen, rlen;
+ buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_aq_get_lldp_mib(&pf->hw,
+ I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
+ I40E_AQ_LLDP_MIB_LOCAL,
+ buff, I40E_LLDPDU_SIZE,
+ &llen, &rlen, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (remote) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ kfree(buff);
+ buff = NULL;
+ goto command_write_done;
+ }
+ dev_info(&pf->pdev->dev,
+ "Get LLDP MIB (remote) AQ buffer written back:\n");
+ for (i = 0; i < I40E_LLDPDU_SIZE; i++) {
+ snprintf(print_buf, 3, "%02x ", buff[i]);
+ print_buf += 3;
+ if ((i % 16) == 15) {
+ snprintf(print_buf, 1, "\n");
+ print_buf++;
+ }
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ kfree(buff);
+ buff = NULL;
+ } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
+ int ret;
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ true, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
+ int ret;
+ ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
+ false, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n",
+ pf->hw.aq.asq_last_status);
+ goto command_write_done;
+ }
+ }
+ } else if (strncmp(cmd_buf, "nvm read", 8) == 0) {
+ u16 buffer_len, i, bytes;
+ u16 module;
+ u32 offset;
+ u16 *buff;
+ int ret;
+
+ cnt = sscanf(&cmd_buf[8], "%hx %x %hx",
+ &module, &offset, &buffer_len);
+ if (cnt == 0) {
+ module = 0;
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 1) {
+ offset = 0;
+ buffer_len = 0;
+ } else if (cnt == 2) {
+ buffer_len = 0;
+ } else if (cnt > 3) {
+ dev_info(&pf->pdev->dev,
+ "nvm read: bad command string, cnt=%d\n", cnt);
+ goto command_write_done;
+ }
+
+ /* Read at least 512 words */
+ if (buffer_len == 0)
+ buffer_len = 512;
+
+ bytes = 2 * buffer_len;
+ buff = kzalloc(bytes, GFP_KERNEL);
+ if (!buff)
+ goto command_write_done;
+
+ ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed Acquiring NVM resource for read err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(buff);
+ goto command_write_done;
+ }
+
+ ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset),
+ bytes, (u8 *)buff, true, NULL);
+ i40e_release_nvm(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Read NVM AQ failed err=%d status=0x%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Read NVM module=0x%x offset=0x%x words=%d\n",
+ module, offset, buffer_len);
+ for (i = 0; i < buffer_len; i++) {
+ if ((i % 16) == 0) {
+ snprintf(print_buf, 11, "\n0x%08x: ",
+ offset + i);
+ print_buf += 11;
+ }
+ snprintf(print_buf, 5, "%04x ", buff[i]);
+ print_buf += 5;
+ }
+ dev_info(&pf->pdev->dev, "%s\n", print_buf_start);
+ }
+ kfree(buff);
+ buff = NULL;
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n");
+ dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n");
+ dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " del relay <relay_seid>\n");
+ dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+ dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n");
+ dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n");
+ dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " dump switch\n");
+ dev_info(&pf->pdev->dev, " dump vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+ dev_info(&pf->pdev->dev, " dump desc aq\n");
+ dev_info(&pf->pdev->dev, " dump stats\n");
+ dev_info(&pf->pdev->dev, " dump reset stats\n");
+ dev_info(&pf->pdev->dev, " msg_enable [level]\n");
+ dev_info(&pf->pdev->dev, " read <reg>\n");
+ dev_info(&pf->pdev->dev, " write <reg> <value>\n");
+ dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n");
+ dev_info(&pf->pdev->dev, " clear_stats pf\n");
+ dev_info(&pf->pdev->dev, " pfr\n");
+ dev_info(&pf->pdev->dev, " corer\n");
+ dev_info(&pf->pdev->dev, " globr\n");
+ dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
+ dev_info(&pf->pdev->dev, " lldp start\n");
+ dev_info(&pf->pdev->dev, " lldp stop\n");
+ dev_info(&pf->pdev->dev, " lldp get local\n");
+ dev_info(&pf->pdev->dev, " lldp get remote\n");
+ dev_info(&pf->pdev->dev, " lldp event on\n");
+ dev_info(&pf->pdev->dev, " lldp event off\n");
+ dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n");
+ }
+
+command_write_done:
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+ kfree(print_buf_start);
+ print_buf = NULL;
+ print_buf_start = NULL;
+ return count;
+}
+
+static const struct file_operations i40e_dbg_command_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_command_read,
+ .write = i40e_dbg_command_write,
+};
+
+/**************************************************************
+ * netdev_ops
+ * The netdev_ops entry in debugfs is for giving the driver commands
+ * to be executed from the netdev operations.
+ **************************************************************/
+static char i40e_dbg_netdev_ops_buf[256] = "hello world";
+
+/**
+ * i40e_dbg_netdev_ops - read for netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to write the data for the user to read
+ * @count: the size of the user's buffer
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ int buf_size = 256;
+ char *buf;
+ int len;
+
+ /* don't allow partal reads */
+ if (*ppos != 0)
+ return 0;
+ if (count < buf_size)
+ return -ENOSPC;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ len = snprintf(buf, buf_size, "%s: %s\n",
+ pf->vsi[pf->lan_vsi]->netdev->name,
+ i40e_dbg_netdev_ops_buf);
+
+ bytes_not_copied = copy_to_user(buffer, buf, len);
+ kfree(buf);
+
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+
+ *ppos = len;
+ return len;
+}
+
+/**
+ * i40e_dbg_netdev_ops_write - write into netdev_ops datum
+ * @filp: the opened file
+ * @buffer: where to find the user's data
+ * @count: the length of the user's data
+ * @ppos: file position offset
+ **/
+static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct i40e_pf *pf = filp->private_data;
+ int bytes_not_copied;
+ struct i40e_vsi *vsi;
+ int vsi_seid;
+ int i, cnt;
+
+ /* don't allow partial writes */
+ if (*ppos != 0)
+ return 0;
+ if (count >= sizeof(i40e_dbg_netdev_ops_buf))
+ return -ENOSPC;
+
+ memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf));
+ bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf,
+ buffer, count);
+ if (bytes_not_copied < 0)
+ return bytes_not_copied;
+ else if (bytes_not_copied > 0)
+ count -= bytes_not_copied;
+ i40e_dbg_netdev_ops_buf[count] = '\0';
+
+ if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "tx_timeout: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "tx_timeout called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
+ int mtu;
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
+ &vsi_seid, &mtu);
+ if (cnt != 2) {
+ dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "change_mtu: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev,
+ mtu);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "change_mtu called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev,
+ "set_rx_mode: VSI %d not found\n", vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ if (rtnl_trylock()) {
+ vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev);
+ rtnl_unlock();
+ dev_info(&pf->pdev->dev, "set_rx_mode called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
+ }
+
+ } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) {
+ cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid);
+ if (cnt != 1) {
+ dev_info(&pf->pdev->dev, "napi <vsi_seid>\n");
+ goto netdev_ops_write_done;
+ }
+ vsi = i40e_dbg_find_vsi(pf, vsi_seid);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "napi: VSI %d not found\n",
+ vsi_seid);
+ goto netdev_ops_write_done;
+ }
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ napi_schedule(&vsi->q_vectors[i].napi);
+ dev_info(&pf->pdev->dev, "napi called\n");
+ } else {
+ dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+ i40e_dbg_netdev_ops_buf);
+ dev_info(&pf->pdev->dev, "available commands\n");
+ dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
+ dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
+ dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
+ }
+netdev_ops_write_done:
+ return count;
+}
+
+static const struct file_operations i40e_dbg_netdev_ops_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = i40e_dbg_netdev_ops_read,
+ .write = i40e_dbg_netdev_ops_write,
+};
+
+/**
+ * i40e_dbg_pf_init - setup the debugfs directory for the pf
+ * @pf: the pf that is starting up
+ **/
+void i40e_dbg_pf_init(struct i40e_pf *pf)
+{
+ struct dentry *pfile __attribute__((unused));
+ const char *name = pci_name(pf->pdev);
+
+ pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
+ if (pf->i40e_dbg_pf) {
+ pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
+ pf, &i40e_dbg_command_fops);
+ pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+ &i40e_dbg_dump_fops);
+ pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
+ pf, &i40e_dbg_netdev_ops_fops);
+ } else {
+ dev_info(&pf->pdev->dev,
+ "debugfs entry for %s failed\n", name);
+ }
+}
+
+/**
+ * i40e_dbg_pf_exit - clear out the pf's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void i40e_dbg_pf_exit(struct i40e_pf *pf)
+{
+ debugfs_remove_recursive(pf->i40e_dbg_pf);
+ pf->i40e_dbg_pf = NULL;
+
+ kfree(i40e_dbg_dump_buf);
+ i40e_dbg_dump_buf = NULL;
+}
+
+/**
+ * i40e_dbg_init - start up debugfs for the driver
+ **/
+void i40e_dbg_init(void)
+{
+ i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL);
+ if (!i40e_dbg_root)
+ pr_info("init of debugfs failed\n");
+}
+
+/**
+ * i40e_dbg_exit - clean out the driver's debugfs entries
+ **/
+void i40e_dbg_exit(void)
+{
+ debugfs_remove_recursive(i40e_dbg_root);
+ i40e_dbg_root = NULL;
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
new file mode 100644
index 00000000000..de255143bde
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -0,0 +1,131 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_diag.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_diag_reg_pattern_test
+ * @hw: pointer to the hw struct
+ * @reg: reg to be tested
+ * @mask: bits to be touched
+ **/
+static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
+{
+ const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ u32 pat, val, orig_val;
+ int i;
+
+ orig_val = rd32(hw, reg);
+ for (i = 0; i < ARRAY_SIZE(patterns); i++) {
+ pat = patterns[i];
+ wr32(hw, reg, (pat & mask));
+ val = rd32(hw, reg);
+ if ((val & mask) != (pat & mask)) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
+ __func__, reg, pat, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+ }
+
+ wr32(hw, reg, orig_val);
+ val = rd32(hw, reg);
+ if (val != orig_val) {
+ i40e_debug(hw, I40E_DEBUG_DIAG,
+ "%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
+ __func__, reg, orig_val, val);
+ return I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return 0;
+}
+
+struct i40e_diag_reg_test_info i40e_reg_list[] = {
+ /* offset mask elements stride */
+ {I40E_QTX_CTL(0), 0x0000FFBF, 64, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 64, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 511, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, I40E_QINT_TQCTL_MAX_INDEX + 1, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, I40E_QINT_RQCTL_MAX_INDEX + 1, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ { 0 }
+};
+
+/**
+ * i40e_diag_reg_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform registers diagnostic test
+ **/
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+{
+ i40e_status ret_code = 0;
+ u32 reg, mask;
+ u32 i, j;
+
+ for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+ mask = i40e_reg_list[i].mask;
+ for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+ reg = i40e_reg_list[i].offset +
+ (j * i40e_reg_list[i].stride);
+ ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_diag_eeprom_test
+ * @hw: pointer to the hw struct
+ *
+ * Perform EEPROM diagnostic test
+ **/
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+{
+ i40e_status ret_code;
+ u16 reg_val;
+
+ /* read NVM control word and if NVM valid, validate EEPROM checksum*/
+ ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
+ if ((!ret_code) &&
+ ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
+ (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
+ ret_code = i40e_validate_nvm_checksum(hw, NULL);
+ } else {
+ ret_code = I40E_ERR_DIAG_TEST_FAILED;
+ }
+
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
new file mode 100644
index 00000000000..3d98277f452
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -0,0 +1,52 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_DIAG_H_
+#define _I40E_DIAG_H_
+
+#include "i40e_type.h"
+
+enum i40e_lb_mode {
+ I40E_LB_MODE_NONE = 0,
+ I40E_LB_MODE_PHY_LOCAL,
+ I40E_LB_MODE_PHY_REMOTE,
+ I40E_LB_MODE_MAC_LOCAL,
+};
+
+struct i40e_diag_reg_test_info {
+ u32 offset; /* the base register */
+ u32 mask; /* bits that can be tested */
+ u32 elements; /* number of elements if array */
+ u32 stride; /* bytes between each element */
+};
+
+extern struct i40e_diag_reg_test_info i40e_reg_list[];
+
+i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
+i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+
+#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
new file mode 100644
index 00000000000..9a76b8cec76
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -0,0 +1,1449 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* ethtool support for i40e */
+
+#include "i40e.h"
+#include "i40e_diag.h"
+
+struct i40e_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define I40E_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+#define I40E_NETDEV_STAT(_net_stat) \
+ I40E_STAT(struct net_device_stats, #_net_stat, _net_stat)
+#define I40E_PF_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_pf, _name, _stat)
+#define I40E_VSI_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_vsi, _name, _stat)
+
+static const struct i40e_stats i40e_gstrings_net_stats[] = {
+ I40E_NETDEV_STAT(rx_packets),
+ I40E_NETDEV_STAT(tx_packets),
+ I40E_NETDEV_STAT(rx_bytes),
+ I40E_NETDEV_STAT(tx_bytes),
+ I40E_NETDEV_STAT(rx_errors),
+ I40E_NETDEV_STAT(tx_errors),
+ I40E_NETDEV_STAT(rx_dropped),
+ I40E_NETDEV_STAT(tx_dropped),
+ I40E_NETDEV_STAT(multicast),
+ I40E_NETDEV_STAT(collisions),
+ I40E_NETDEV_STAT(rx_length_errors),
+ I40E_NETDEV_STAT(rx_crc_errors),
+};
+
+/* These PF_STATs might look like duplicates of some NETDEV_STATs,
+ * but they are separate. This device supports Virtualization, and
+ * as such might have several netdevs supporting VMDq and FCoE going
+ * through a single port. The NETDEV_STATs are for individual netdevs
+ * seen at the top of the stack, and the PF_STATs are for the physical
+ * function at the bottom of the stack hosting those netdevs.
+ *
+ * The PF_STATs are appended to the netdev stats only when ethtool -S
+ * is queried on the base PF netdev, not on the VMDq or FCoE netdev.
+ */
+static struct i40e_stats i40e_gstrings_stats[] = {
+ I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
+ I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
+ I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+ I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
+ I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
+ I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
+ I40E_PF_STAT("crc_errors", stats.crc_errors),
+ I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
+ I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
+ I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
+ I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
+ I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
+ I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
+ I40E_PF_STAT("link_xon_tx", stats.link_xon_tx),
+ I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
+ I40E_PF_STAT("rx_size_64", stats.rx_size_64),
+ I40E_PF_STAT("rx_size_127", stats.rx_size_127),
+ I40E_PF_STAT("rx_size_255", stats.rx_size_255),
+ I40E_PF_STAT("rx_size_511", stats.rx_size_511),
+ I40E_PF_STAT("rx_size_1023", stats.rx_size_1023),
+ I40E_PF_STAT("rx_size_1522", stats.rx_size_1522),
+ I40E_PF_STAT("rx_size_big", stats.rx_size_big),
+ I40E_PF_STAT("tx_size_64", stats.tx_size_64),
+ I40E_PF_STAT("tx_size_127", stats.tx_size_127),
+ I40E_PF_STAT("tx_size_255", stats.tx_size_255),
+ I40E_PF_STAT("tx_size_511", stats.tx_size_511),
+ I40E_PF_STAT("tx_size_1023", stats.tx_size_1023),
+ I40E_PF_STAT("tx_size_1522", stats.tx_size_1522),
+ I40E_PF_STAT("tx_size_big", stats.tx_size_big),
+ I40E_PF_STAT("rx_undersize", stats.rx_undersize),
+ I40E_PF_STAT("rx_fragments", stats.rx_fragments),
+ I40E_PF_STAT("rx_oversize", stats.rx_oversize),
+ I40E_PF_STAT("rx_jabber", stats.rx_jabber),
+ I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
+};
+
+#define I40E_QUEUE_STATS_LEN(n) \
+ ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
+ ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
+#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+ I40E_QUEUE_STATS_LEN((n)))
+#define I40E_PFC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_tx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
+ FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
+ / sizeof(u64))
+#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
+ I40E_PFC_STATS_LEN + \
+ I40E_VSI_STATS_LEN((n)))
+
+enum i40e_ethtool_test_id {
+ I40E_ETH_TEST_REG = 0,
+ I40E_ETH_TEST_EEPROM,
+ I40E_ETH_TEST_INTR,
+ I40E_ETH_TEST_LOOPBACK,
+ I40E_ETH_TEST_LINK,
+};
+
+static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Eeprom test (offline)",
+ "Interrupt test (offline)",
+ "Loopback test (offline)",
+ "Link test (on/offline)"
+};
+
+#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+
+/**
+ * i40e_get_settings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Reports speed/duplex settings based on media_type
+ **/
+static int i40e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+ bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+ u32 link_speed = hw_link_info->link_speed;
+
+ /* hardware is either in 40G mode or 10G mode
+ * NOTE: this section initializes supported and advertising
+ */
+ switch (hw_link_info->phy_type) {
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ecmd->supported = SUPPORTED_40000baseCR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseCR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ ecmd->supported = SUPPORTED_40000baseKR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ecmd->supported = SUPPORTED_40000baseSR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseSR4_Full;
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ecmd->supported = SUPPORTED_40000baseLR4_Full;
+ ecmd->advertising = ADVERTISED_40000baseLR4_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ecmd->supported = SUPPORTED_10000baseKX4_Full;
+ ecmd->advertising = ADVERTISED_10000baseKX4_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ecmd->supported = SUPPORTED_10000baseKR_Full;
+ ecmd->advertising = ADVERTISED_10000baseKR_Full;
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ default:
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ break;
+ }
+
+ /* for now just say autoneg all the time */
+ ecmd->supported |= SUPPORTED_Autoneg;
+
+ if (hw->phy.media_type == I40E_MEDIA_TYPE_BACKPLANE) {
+ ecmd->supported |= SUPPORTED_Backplane;
+ ecmd->advertising |= ADVERTISED_Backplane;
+ ecmd->port = PORT_NONE;
+ } else if (hw->phy.media_type == I40E_MEDIA_TYPE_BASET) {
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ }
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (link_up) {
+ switch (link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ /* need a SPEED_40000 in ethtool.h */
+ ethtool_cmd_speed_set(ecmd, 40000);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ default:
+ break;
+ }
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_pauseparam - Get Flow Control status
+ * Return tx/rx-pause status
+ **/
+static void i40e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ pause->autoneg =
+ ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_RX)
+ pause->rx_pause = 1;
+ if (hw_link_info->an_info & I40E_AQ_LINK_PAUSE_TX)
+ pause->tx_pause = 1;
+}
+
+static u32 i40e_get_msglevel(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ return pf->msg_enable;
+}
+
+static void i40e_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (I40E_DEBUG_USER & data)
+ pf->hw.debug_mask = data;
+ pf->msg_enable = data;
+}
+
+static int i40e_get_regs_len(struct net_device *netdev)
+{
+ int reg_count = 0;
+ int i;
+
+ for (i = 0; i40e_reg_list[i].offset != 0; i++)
+ reg_count += i40e_reg_list[i].elements;
+
+ return reg_count * sizeof(u32);
+}
+
+static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 *reg_buf = p;
+ int i, j, ri;
+ u32 reg;
+
+ /* Tell ethtool which driver-version-specific regs output we have.
+ *
+ * At some point, if we have ethtool doing special formatting of
+ * this data, it will rely on this version number to know how to
+ * interpret things. Hence, this needs to be updated if/when the
+ * diags register table is changed.
+ */
+ regs->version = 1;
+
+ /* loop through the diags reg table for what to print */
+ ri = 0;
+ for (i = 0; i40e_reg_list[i].offset != 0; i++) {
+ for (j = 0; j < i40e_reg_list[i].elements; j++) {
+ reg = i40e_reg_list[i].offset
+ + (j * i40e_reg_list[i].stride);
+ reg_buf[ri++] = rd32(hw, reg);
+ }
+ }
+
+}
+
+static int i40e_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ int first_word, last_word;
+ u16 i, eeprom_len;
+ u16 *eeprom_buff;
+ int ret_val = 0;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+ eeprom_len = last_word - first_word + 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ ret_val = i40e_read_nvm_buffer(hw, first_word, &eeprom_len,
+ eeprom_buff);
+ if (eeprom_len == 0) {
+ kfree(eeprom_buff);
+ return -EACCES;
+ }
+
+ /* Device's eeprom is always little-endian, word addressable */
+ for (i = 0; i < eeprom_len; i++)
+ le16_to_cpus(&eeprom_buff[i]);
+
+ memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+
+ return ret_val;
+}
+
+static int i40e_get_eeprom_len(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+
+ return hw->nvm.sr_size * 2;
+}
+
+static void i40e_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, i40e_driver_version_str,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw),
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void i40e_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+ ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = vsi->rx_rings[0].count;
+ ring->tx_pending = vsi->tx_rings[0].count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int i40e_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 new_rx_count, new_tx_count;
+ int i, err = 0;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ I40E_MIN_NUM_DESCRIPTORS,
+ I40E_MAX_NUM_DESCRIPTORS);
+ new_tx_count = ALIGN(new_tx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ I40E_MIN_NUM_DESCRIPTORS,
+ I40E_MAX_NUM_DESCRIPTORS);
+ new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == vsi->tx_rings[0].count) &&
+ (new_rx_count == vsi->rx_rings[0].count))
+ return 0;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ usleep_range(1000, 2000);
+
+ if (!netif_running(vsi->netdev)) {
+ /* simple case - set for the next time the netdev is started */
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ vsi->tx_rings[i].count = new_tx_count;
+ vsi->rx_rings[i].count = new_rx_count;
+ }
+ goto done;
+ }
+
+ /* We can't just free everything and then setup again,
+ * because the ISRs in MSI-X mode get passed pointers
+ * to the Tx and Rx ring structs.
+ */
+
+ /* alloc updated Tx resources */
+ if (new_tx_count != vsi->tx_rings[0].count) {
+ netdev_info(netdev,
+ "Changing Tx descriptor count from %d to %d.\n",
+ vsi->tx_rings[0].count, new_tx_count);
+ tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!tx_rings) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* clone ring and setup updated count */
+ tx_rings[i] = vsi->tx_rings[i];
+ tx_rings[i].count = new_tx_count;
+ err = i40e_setup_tx_descriptors(&tx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ i40e_free_tx_resources(&tx_rings[i]);
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+
+ goto done;
+ }
+ }
+ }
+
+ /* alloc updated Rx resources */
+ if (new_rx_count != vsi->rx_rings[0].count) {
+ netdev_info(netdev,
+ "Changing Rx descriptor count from %d to %d\n",
+ vsi->rx_rings[0].count, new_rx_count);
+ rx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!rx_rings) {
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* clone ring and setup updated count */
+ rx_rings[i] = vsi->rx_rings[i];
+ rx_rings[i].count = new_rx_count;
+ err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err) {
+ while (i) {
+ i--;
+ i40e_free_rx_resources(&rx_rings[i]);
+ }
+ kfree(rx_rings);
+ rx_rings = NULL;
+
+ goto free_tx;
+ }
+ }
+ }
+
+ /* Bring interface down, copy in the new ring info,
+ * then restore the interface
+ */
+ i40e_down(vsi);
+
+ if (tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_free_tx_resources(&vsi->tx_rings[i]);
+ vsi->tx_rings[i] = tx_rings[i];
+ }
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+ if (rx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_free_rx_resources(&vsi->rx_rings[i]);
+ vsi->rx_rings[i] = rx_rings[i];
+ }
+ kfree(rx_rings);
+ rx_rings = NULL;
+ }
+
+ i40e_up(vsi);
+
+free_tx:
+ /* error cleanup if the Rx allocations failed after getting Tx */
+ if (tx_rings) {
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ i40e_free_tx_resources(&tx_rings[i]);
+ kfree(tx_rings);
+ tx_rings = NULL;
+ }
+
+done:
+ clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+
+ return err;
+}
+
+static int i40e_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ switch (sset) {
+ case ETH_SS_TEST:
+ return I40E_TEST_LEN;
+ case ETH_SS_STATS:
+ if (vsi == pf->vsi[pf->lan_vsi])
+ return I40E_PF_STATS_LEN(netdev);
+ else
+ return I40E_VSI_STATS_LEN(netdev);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void i40e_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i = 0;
+ char *p;
+ int j;
+ struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+
+ i40e_update_stats(vsi);
+
+ for (j = 0; j < I40E_NETDEV_STATS_LEN; j++) {
+ p = (char *)net_stats + i40e_gstrings_net_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ data[i++] = vsi->tx_rings[j].tx_stats.packets;
+ data[i++] = vsi->tx_rings[j].tx_stats.bytes;
+ }
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ data[i++] = vsi->rx_rings[j].rx_stats.packets;
+ data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+ p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+ data[i++] = pf->stats.priority_xon_2_xoff[j];
+ }
+}
+
+static void i40e_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ for (i = 0; i < I40E_TEST_LEN; i++) {
+ memcpy(data, i40e_gstrings_test[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < I40E_NETDEV_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_net_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ i40e_gstrings_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ }
+}
+
+static int i40e_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static int i40e_link_test(struct i40e_pf *pf, u64 *data)
+{
+ if (i40e_get_link_status(&pf->hw))
+ *data = 0;
+ else
+ *data = 1;
+
+ return *data;
+}
+
+static int i40e_reg_test(struct i40e_pf *pf, u64 *data)
+{
+ i40e_status ret;
+
+ ret = i40e_diag_reg_test(&pf->hw);
+ *data = ret;
+
+ return ret;
+}
+
+static int i40e_eeprom_test(struct i40e_pf *pf, u64 *data)
+{
+ i40e_status ret;
+
+ ret = i40e_diag_eeprom_test(&pf->hw);
+ *data = ret;
+
+ return ret;
+}
+
+static int i40e_intr_test(struct i40e_pf *pf, u64 *data)
+{
+ *data = -ENOSYS;
+
+ return *data;
+}
+
+static int i40e_loopback_test(struct i40e_pf *pf, u64 *data)
+{
+ *data = -ENOSYS;
+
+ return *data;
+}
+
+static void i40e_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ set_bit(__I40E_TESTING, &pf->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ netdev_info(netdev, "offline testing starting\n");
+
+ /* Link test performed before hardware reset
+ * so autoneg doesn't interfere with test result
+ */
+ netdev_info(netdev, "link test starting\n");
+ if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ netdev_info(netdev, "register test starting\n");
+ if (i40e_reg_test(pf, &data[I40E_ETH_TEST_REG]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "eeprom test starting\n");
+ if (i40e_eeprom_test(pf, &data[I40E_ETH_TEST_EEPROM]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "interrupt test starting\n");
+ if (i40e_intr_test(pf, &data[I40E_ETH_TEST_INTR]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+ netdev_info(netdev, "loopback test starting\n");
+ if (i40e_loopback_test(pf, &data[I40E_ETH_TEST_LOOPBACK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ } else {
+ netdev_info(netdev, "online test starting\n");
+ /* Online tests */
+ if (i40e_link_test(pf, &data[I40E_ETH_TEST_LINK]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Offline only tests, not run in online; pass by default */
+ data[I40E_ETH_TEST_REG] = 0;
+ data[I40E_ETH_TEST_EEPROM] = 0;
+ data[I40E_ETH_TEST_INTR] = 0;
+ data[I40E_ETH_TEST_LOOPBACK] = 0;
+
+ clear_bit(__I40E_TESTING, &pf->state);
+ }
+}
+
+static void i40e_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+ /* restart autonegotiation */
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret = 0;
+
+ ret = i40e_aq_set_link_restart_an(hw, NULL);
+ if (ret) {
+ netdev_info(netdev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int i40e_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int blink_freq = 2;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ pf->led_status = i40e_led_get(hw);
+ return blink_freq;
+ case ETHTOOL_ID_ON:
+ i40e_led_set(hw, 0xF);
+ break;
+ case ETHTOOL_ID_OFF:
+ i40e_led_set(hw, 0x0);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ i40e_led_set(hw, pf->led_status);
+ break;
+ }
+
+ return 0;
+}
+
+/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
+ * Throttle Rate (ITR) ie. ITR(1) = 2us ITR(10) = 20 us, and also
+ * 125us (8000 interrupts per second) == ITR(62)
+ */
+
+static int i40e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ ec->tx_max_coalesced_frames_irq = vsi->work_limit;
+ ec->rx_max_coalesced_frames_irq = vsi->work_limit;
+
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
+ ec->rx_coalesce_usecs = 1;
+ else
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+
+ if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ ec->tx_coalesce_usecs = 1;
+ else
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+
+ return 0;
+}
+
+static int i40e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_q_vector *q_vector;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vector;
+ int i;
+
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ switch (ec->rx_coalesce_usecs) {
+ case 0:
+ vsi->rx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ break;
+ default:
+ if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+ break;
+ }
+
+ switch (ec->tx_coalesce_usecs) {
+ case 0:
+ vsi->tx_itr_setting = 0;
+ break;
+ case 1:
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ break;
+ default:
+ if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
+ (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ return -EINVAL;
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ break;
+ }
+
+ vector = vsi->base_vector;
+ q_vector = vsi->q_vectors;
+ for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on i40e */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through to add IP fields */
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fall through to add IP fields */
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = vsi->alloc_queue_pairs;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = i40e_get_rss_hash_opts(pf, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = 500;
+ ret = 0;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @pf: pointer to the physical function struct
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &=
+ ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |=
+ (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &=
+ ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |=
+ (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+#define IP_HEADER_OFFSET 14
+/**
+ * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct udphdr *udp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+ udp->source = fsp->h_u.tcp_ip4_spec.psrc;
+ udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
+ i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct tcphdr *tcp;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+ tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
+ + sizeof(struct iphdr));
+
+ ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
+ tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
+ tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
+
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required from the FDir descriptor
+ * @ethtool_rx_flow_spec: the flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * a specific flow spec
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @fsp: the ethtool flow spec
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
+ struct i40e_fdir_data *fd_data,
+ struct ethtool_rx_flow_spec *fsp, bool add)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct iphdr *ip;
+ bool err = false;
+ int ret;
+ int i;
+
+ ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
+
+ ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
+ ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
+ ip->protocol = fsp->h_u.usr_ip4_spec.proto;
+
+ for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ fd_data->pctype = i;
+ ret = i40e_program_fdir_filter(fd_data, pf, add);
+
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ }
+ }
+
+ return err ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for
+ * a specific flow spec based on their protocol
+ * @vsi: pointer to the targeted VSI
+ * @cmd: command to get or set RX flow classification rules
+ * @add: true adds a filter, false removes it
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
+ struct ethtool_rxnfc *cmd, bool add)
+{
+ struct i40e_fdir_data fd_data;
+ int ret = -EINVAL;
+ struct i40e_pf *pf;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (!vsi)
+ return -EINVAL;
+
+ pf = vsi->back;
+
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= vsi->num_queue_pairs))
+ return -EINVAL;
+
+ /* Populate the Flow Director that we have at the moment
+ * and allocate the raw packet buffer for the calling functions
+ */
+ fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
+ GFP_KERNEL);
+
+ if (!fd_data.raw_packet) {
+ dev_info(&pf->pdev->dev, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ fd_data.q_index = fsp->ring_cookie;
+ fd_data.flex_off = 0;
+ fd_data.pctype = 0;
+ fd_data.dest_vsi = vsi->id;
+ fd_data.dest_ctl = 0;
+ fd_data.fd_status = 0;
+ fd_data.cnt_index = 0;
+ fd_data.fd_id = 0;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+ break;
+ case UDP_V4_FLOW:
+ ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+ break;
+ case SCTP_V4_FLOW:
+ ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPV4_FLOW:
+ ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+ break;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add);
+ break;
+ default:
+ ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
+ break;
+ }
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "Could not specify spec type\n");
+ ret = -EINVAL;
+ }
+
+ kfree(fd_data.raw_packet);
+ fd_data.raw_packet = NULL;
+
+ return ret;
+}
+/**
+ * i40e_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40e_set_rss_hash_opt(pf, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = i40e_add_del_fdir_ethtool(vsi, cmd, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops i40e_ethtool_ops = {
+ .get_settings = i40e_get_settings,
+ .get_drvinfo = i40e_get_drvinfo,
+ .get_regs_len = i40e_get_regs_len,
+ .get_regs = i40e_get_regs,
+ .nway_reset = i40e_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_wol = i40e_get_wol,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+ .get_ringparam = i40e_get_ringparam,
+ .set_ringparam = i40e_set_ringparam,
+ .get_pauseparam = i40e_get_pauseparam,
+ .get_msglevel = i40e_get_msglevel,
+ .set_msglevel = i40e_set_msglevel,
+ .get_rxnfc = i40e_get_rxnfc,
+ .set_rxnfc = i40e_set_rxnfc,
+ .self_test = i40e_diag_test,
+ .get_strings = i40e_get_strings,
+ .set_phys_id = i40e_set_phys_id,
+ .get_sset_count = i40e_get_sset_count,
+ .get_ethtool_stats = i40e_get_ethtool_stats,
+ .get_coalesce = i40e_get_coalesce,
+ .set_coalesce = i40e_set_coalesce,
+ .get_ts_info = i40e_get_ts_info,
+};
+
+void i40e_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
new file mode 100644
index 00000000000..901804af8b0
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -0,0 +1,366 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#include "i40e_type.h"
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
+{
+ enum i40e_memory_type mem_type __attribute__((unused));
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ bool dma_mem_alloc_done = false;
+ struct i40e_dma_mem mem;
+ u64 alloc_len;
+
+ if (NULL == hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (sd_index >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
+ goto exit;
+ }
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (I40E_SD_TYPE_PAGED == type) {
+ mem_type = i40e_mem_pd;
+ alloc_len = I40E_HMC_PAGED_BP_SIZE;
+ } else {
+ mem_type = i40e_mem_bp_jumbo;
+ alloc_len = direct_mode_sz;
+ }
+
+ /* allocate a 4K pd page or 2M backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+ dma_mem_alloc_done = true;
+ if (I40E_SD_TYPE_PAGED == type) {
+ ret_code = i40e_allocate_virt_mem(hw,
+ &sd_entry->u.pd_table.pd_entry_virt_mem,
+ sizeof(struct i40e_hmc_pd_entry) * 512);
+ if (ret_code)
+ goto exit;
+ sd_entry->u.pd_table.pd_entry =
+ (struct i40e_hmc_pd_entry *)
+ sd_entry->u.pd_table.pd_entry_virt_mem.va;
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem,
+ sizeof(struct i40e_dma_mem));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &mem,
+ sizeof(struct i40e_dma_mem));
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+ /* initialize the sd entry */
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+ /* increment the ref count */
+ I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+ }
+ /* Increment backing page reference count */
+ if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+ I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+ if (ret_code)
+ if (dma_mem_alloc_done)
+ i40e_free_dma_mem(hw, &mem);
+
+ return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in i40e_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ **/
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_dma_mem mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
+ goto exit;
+ }
+
+ /* find corresponding sd */
+ sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+ if (I40E_SD_TYPE_PAGED !=
+ hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ goto exit;
+
+ rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ /* allocate a 4K backing page */
+ ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
+ I40E_HMC_PAGED_BP_SIZE,
+ I40E_HMC_PD_BP_BUF_ALIGNMENT);
+ if (ret_code)
+ goto exit;
+
+ memcpy(&pd_entry->bp.addr, &mem, sizeof(struct i40e_dma_mem));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+ /* Set page address and valid bit */
+ page_desc = mem.pa | 0x1;
+
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+
+ /* Add the backing page physical address in the pd entry */
+ memcpy(pd_addr, &page_desc, sizeof(u64));
+
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ I40E_INC_PD_REFCNT(pd_table);
+ }
+ I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ * 1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_entry *pd_entry;
+ struct i40e_hmc_pd_table *pd_table;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+
+ /* calculate index */
+ sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+ hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
+ goto exit;
+ }
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
+ goto exit;
+ }
+ /* get the entry and decrease its ref counter */
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ I40E_DEC_BP_REFCNT(&pd_entry->bp);
+ if (pd_entry->bp.ref_cnt)
+ goto exit;
+
+ /* mark the entry invalid */
+ pd_entry->valid = false;
+ I40E_DEC_PD_REFCNT(pd_table);
+ pd_addr = (u64 *)pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ if (is_pf)
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+ else
+ I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+ if (ret_code)
+ goto exit;
+ if (!pd_table->ref_cnt)
+ i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+ if (sd_entry->u.bp.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ struct i40e_hmc_sd_entry *sd_entry;
+ i40e_status ret_code = 0;
+
+ /* get the entry and decrease its ref counter */
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+ if (ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.ref_cnt) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto exit;
+ }
+
+ /* mark the entry invalid */
+ sd_entry->valid = false;
+
+ I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (is_pf) {
+ I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+ } else {
+ ret_code = I40E_NOT_SUPPORTED;
+ goto exit;
+ }
+ /* free memory here */
+ ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+ if (ret_code)
+ goto exit;
+exit:
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
new file mode 100644
index 00000000000..aacd42a261e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -0,0 +1,245 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD 512
+#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE 4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define I40E_FIRST_VF_FPM_ID 16
+
+struct i40e_hmc_obj_info {
+ u64 base; /* base addr in FPM */
+ u32 max_cnt; /* max count available for this hmc func */
+ u32 cnt; /* count of objects driver actually wants to create */
+ u64 size; /* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+ I40E_SD_TYPE_INVALID = 0,
+ I40E_SD_TYPE_PAGED = 1,
+ I40E_SD_TYPE_DIRECT = 2
+};
+
+struct i40e_hmc_bp {
+ enum i40e_sd_entry_type entry_type;
+ struct i40e_dma_mem addr; /* populate to be used by hw */
+ u32 sd_pd_index;
+ u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+ struct i40e_hmc_bp bp;
+ u32 sd_index;
+ bool valid;
+};
+
+struct i40e_hmc_pd_table {
+ struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+ struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */
+ struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+ u32 ref_cnt;
+ u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+ enum i40e_sd_entry_type entry_type;
+ bool valid;
+
+ union {
+ struct i40e_hmc_pd_table pd_table;
+ struct i40e_hmc_bp bp;
+ } u;
+};
+
+struct i40e_hmc_sd_table {
+ struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+ u32 sd_cnt;
+ u32 ref_cnt;
+ struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+ u32 signature;
+ /* equals to pci func num for PF and dynamically allocated for VFs */
+ u8 hmc_fn_id;
+ u16 first_sd_index; /* index of the first available SD */
+
+ /* hmc objects */
+ struct i40e_hmc_obj_info *hmc_obj;
+ struct i40e_virt_mem hmc_obj_virt_mem;
+ struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \
+{ \
+ u32 val1, val2, val3; \
+ val1 = (u32)(upper_32_bits(pa)); \
+ val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \
+ (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @hmc_fn_id: hmc function id
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \
+{ \
+ u32 val2, val3; \
+ val2 = (I40E_HMC_MAX_BP_COUNT << \
+ I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \
+ ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \
+ I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \
+ val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \
+ wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \
+ wr32((hw), I40E_PFHMC_SDDATALOW, val2); \
+ wr32((hw), I40E_PFHMC_SDCMD, val3); \
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ * @hmc_fn_id: hmc function id
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \
+ wr32((hw), I40E_PFHMC_PDINV, \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
+ wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
+ (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
+ ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{ \
+ u64 fpm_addr, fpm_limit; \
+ fpm_addr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (index); \
+ fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+ *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \
+ *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(sd_limit) += 1; \
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{ \
+ u64 fpm_adr, fpm_limit; \
+ fpm_adr = (hmc_info)->hmc_obj[(type)].base + \
+ (hmc_info)->hmc_obj[(type)].size * (idx); \
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \
+ *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \
+ *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \
+ /* add one more to the limit to correct our range */ \
+ *(pd_limit) += 1; \
+}
+i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+
+i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index);
+i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
new file mode 100644
index 00000000000..a695b91c9c7
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -0,0 +1,1006 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+ u64 aligned_offset = offset;
+
+ if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+ aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+ (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+ return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+ u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+ u64 fpm_size = 0;
+
+ fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+ fpm_size = i40e_align_l2obj_base(fpm_size);
+
+ return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ * - HMC Resource Profile has been selected before calling this function.
+ **/
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
+{
+ struct i40e_hmc_obj_info *obj, *full_obj;
+ i40e_status ret_code = 0;
+ u64 l2fpm_size;
+ u32 size_exp;
+
+ hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+ hw->hmc.hmc_fn_id = hw->pf_id;
+
+ /* allocate memory for hmc_obj */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+ sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+ hw->hmc.hmc_obj_virt_mem.va;
+
+ /* The full object will be used to create the LAN HMC SD */
+ full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+ full_obj->max_cnt = 0;
+ full_obj->cnt = 0;
+ full_obj->base = 0;
+ full_obj->size = 0;
+
+ /* Tx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = txq_num;
+ obj->base = 0;
+ size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (txq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ txq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* Rx queue context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+ obj->cnt = rxq_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (rxq_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ rxq_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE context information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+ obj->cnt = fcoe_cntx_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_cntx_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_cntx_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ /* FCoE filter information */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+ obj->cnt = fcoe_filt_num;
+ obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+ (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+ hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+ obj->base = i40e_align_l2obj_base(obj->base);
+ size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+ obj->size = (u64)1 << size_exp;
+
+ /* validate values requested by driver don't exceed HMC capacity */
+ if (fcoe_filt_num > obj->max_cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+ fcoe_filt_num, obj->max_cnt, ret_code);
+ goto init_lan_hmc_out;
+ }
+
+ /* aggregate values into the full LAN object for later */
+ full_obj->max_cnt += obj->max_cnt;
+ full_obj->cnt += obj->cnt;
+
+ hw->hmc.first_sd_index = 0;
+ hw->hmc.sd_table.ref_cnt = 0;
+ l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+ fcoe_filt_num);
+ if (NULL == hw->hmc.sd_table.sd_entry) {
+ hw->hmc.sd_table.sd_cnt = (u32)
+ (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+ I40E_HMC_DIRECT_BP_SIZE;
+
+ /* allocate the sd_entry members in the sd_table */
+ ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+ (sizeof(struct i40e_hmc_sd_entry) *
+ hw->hmc.sd_table.sd_cnt));
+ if (ret_code)
+ goto init_lan_hmc_out;
+ hw->hmc.sd_table.sd_entry =
+ (struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+ }
+ /* store in the LAN full object for later */
+ full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) invalid
+ * 2. write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for pd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by pd after this function
+ * returns.
+ **/
+static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+
+ if (!i40e_prep_remove_pd_page(hmc_info, idx))
+ ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in sd table (for direct address mode) invalid
+ * 2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ * to 0) and PMSDDATAHIGH to invalidate the sd page
+ * 3. Decrement the ref count for the sd_entry
+ * assumptions:
+ * 1. caller can deallocate the memory used by backing storage after this
+ * function returns.
+ **/
+static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
+{
+ i40e_status ret_code = 0;
+
+ if (!i40e_prep_remove_sd_bp(hmc_info, idx))
+ ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
+
+ return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 pd_idx = 0, pd_lmt = 0;
+ bool pd_error = false;
+ u32 sd_idx, sd_lmt;
+ u64 sd_size;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
+ goto exit;
+ }
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+ /* find pd index */
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ /* This is to cover for cases where you may not want to have an SD with
+ * the full 2M memory but something smaller. By not filling out any
+ * size, the function will default the SD size to be 2M.
+ */
+ if (info->direct_mode_sz == 0)
+ sd_size = I40E_HMC_DIRECT_BP_SIZE;
+ else
+ sd_size = info->direct_mode_sz;
+
+ /* check if all the sds are valid. If not, allocate a page and
+ * initialize it.
+ */
+ for (j = sd_idx; j < sd_lmt; j++) {
+ /* update the sd table entry */
+ ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+ info->entry_type,
+ sd_size);
+ if (ret_code)
+ goto exit_sd_error;
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ /* check if all the pds in this sd are valid. If not,
+ * allocate a page and initialize it.
+ */
+
+ /* find pd_idx and pd_lmt in this sd */
+ pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt,
+ ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = i40e_add_pd_table_entry(hw,
+ info->hmc_info,
+ i);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ /* remove the backing pages from pd_idx1 to i */
+ while (i && (i > pd_idx1)) {
+ i40e_remove_pd_bp(hw, info->hmc_info,
+ (i - 1), true);
+ i--;
+ }
+ }
+ }
+ if (!sd_entry->valid) {
+ sd_entry->valid = true;
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ I40E_SET_PF_SD_ENTRY(hw,
+ sd_entry->u.pd_table.pd_page_addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+ j, sd_entry->entry_type);
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ goto exit;
+ break;
+ }
+ }
+ }
+ goto exit;
+
+exit_sd_error:
+ /* cleanup for sd entries from j to sd_idx */
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case I40E_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx,
+ ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ i40e_remove_pd_bp(
+ hw,
+ info->hmc_info,
+ i,
+ true);
+ }
+ i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+ break;
+ case I40E_SD_TYPE_DIRECT:
+ i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ break;
+ }
+ j--;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ * any LAN/FCoE HMC objects can be created.
+ **/
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
+{
+ struct i40e_hmc_lan_create_obj_info info;
+ i40e_status ret_code = 0;
+ u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+ struct i40e_hmc_obj_info *obj;
+
+ /* Initialize part of the create object info struct */
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+ /* Build the SD entry for the LAN objects */
+ switch (model) {
+ case I40E_HMC_MODEL_DIRECT_PREFERRED:
+ case I40E_HMC_MODEL_DIRECT_ONLY:
+ info.entry_type = I40E_SD_TYPE_DIRECT;
+ /* Make one big object, a single SD */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if ((ret_code) &&
+ (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+ goto try_type_paged;
+ else if (ret_code)
+ goto configure_lan_hmc_out;
+ /* else clause falls through the break */
+ break;
+ case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+ info.entry_type = I40E_SD_TYPE_PAGED;
+ /* Make one big object in the PD table */
+ info.count = 1;
+ ret_code = i40e_create_lan_hmc_object(hw, &info);
+ if (ret_code)
+ goto configure_lan_hmc_out;
+ break;
+ default:
+ /* unsupported type */
+ ret_code = I40E_ERR_INVALID_SD_TYPE;
+ hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
+ ret_code);
+ goto configure_lan_hmc_out;
+ break;
+ }
+
+ /* Configure and program the FPM registers so objects can be created */
+
+ /* Tx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+ wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+ /* Rx contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+ wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE contexts */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+ wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+ /* FCoE filters */
+ obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+ wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+ (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+ wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+ return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
+{
+ i40e_status ret_code = 0;
+ struct i40e_hmc_pd_table *pd_table;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 sd_idx, sd_lmt;
+ u32 i, j;
+
+ if (NULL == info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
+ goto exit;
+ }
+ if (NULL == info->hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->sd_table.sd_entry) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
+ goto exit;
+ }
+
+ if (NULL == info->hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+ goto exit;
+ }
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+ hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
+ ret_code);
+ goto exit;
+ }
+
+ I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+ if (I40E_SD_TYPE_PAGED !=
+ info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+ continue;
+
+ rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+ pd_table =
+ &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
+ j, true);
+ if (ret_code)
+ goto exit;
+ }
+ }
+
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count,
+ &sd_idx, &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ret_code = I40E_ERR_INVALID_SD_INDEX;
+ goto exit;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case I40E_SD_TYPE_DIRECT:
+ ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ case I40E_SD_TYPE_PAGED:
+ ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+ if (ret_code)
+ goto exit;
+ break;
+ default:
+ break;
+ }
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+ struct i40e_hmc_lan_delete_obj_info info;
+ i40e_status ret_code;
+
+ info.hmc_info = &hw->hmc;
+ info.rsrc_type = I40E_HMC_LAN_FULL;
+ info.start_idx = 0;
+ info.count = 1;
+
+ /* delete the object */
+ ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+ /* free the SD table entry for LAN */
+ i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+ hw->hmc.sd_table.sd_cnt = 0;
+ hw->hmc.sd_table.sd_entry = NULL;
+
+ /* free memory used for hmc_obj */
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+ hw->hmc.hmc_obj = NULL;
+
+ return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele) \
+ offsetof(struct _struct, _ele), \
+ FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+ u16 offset;
+ u16 size_of;
+ u16 width;
+ u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+ /* Field Width LSB */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head), 13, 0 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, new_context), 1, 30 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, base), 57, 32 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena), 1, 89 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena), 1, 90 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena), 1, 91 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena), 1, 92 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid), 8, 96 },
+/* line 1 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb), 13, 0 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena), 1, 32 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, qlen), 13, 33 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena), 1, 46 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena), 1, 47 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena), 1, 48 + 128 },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr), 64, 64 + 128 },
+/* line 7 */
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, crc), 32, 0 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist), 10, 84 + (7 * 128) },
+ {I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act), 1, 94 + (7 * 128) },
+ { 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+ /* Field Width LSB */
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, head), 13, 0 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid), 8, 13 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, base), 57, 32 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen), 13, 89 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff), 7, 102 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff), 5, 109 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype), 2, 114 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize), 1, 116 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip), 1, 117 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena), 1, 118 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel), 1, 119 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0), 4, 120 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1), 2, 124 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv), 1, 127 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax), 14, 174 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1, 193 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1, 194 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { 0 }
+};
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw: the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+ /* clean the bit array */
+ memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
+
+ return 0;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static i40e_status i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
+{
+ u16 shift_width;
+ u64 bitfield;
+ u8 hi_byte;
+ u8 hi_mask;
+ u64 t_bits;
+ u64 mask;
+ u8 *p;
+ int f;
+
+ for (f = 0; ce_info[f].width != 0; f++) {
+ /* clear out the field */
+ bitfield = 0;
+
+ /* copy from the next struct field */
+ p = dest + ce_info[f].offset;
+ switch (ce_info[f].size_of) {
+ case 1:
+ bitfield = *p;
+ break;
+ case 2:
+ bitfield = cpu_to_le16(*(u16 *)p);
+ break;
+ case 4:
+ bitfield = cpu_to_le32(*(u32 *)p);
+ break;
+ case 8:
+ bitfield = cpu_to_le64(*(u64 *)p);
+ break;
+ }
+
+ /* prepare the bits and mask */
+ shift_width = ce_info[f].lsb % 8;
+ mask = ((u64)1 << ce_info[f].width) - 1;
+
+ /* save upper bytes for special case */
+ hi_mask = (u8)((mask >> 56) & 0xff);
+ hi_byte = (u8)((bitfield >> 56) & 0xff);
+
+ /* shift to correct alignment */
+ mask <<= shift_width;
+ bitfield <<= shift_width;
+
+ /* get the current bits from the target bit string */
+ p = context_bytes + (ce_info[f].lsb / 8);
+ memcpy(&t_bits, p, sizeof(u64));
+
+ t_bits &= ~mask; /* get the bits not changing */
+ t_bits |= bitfield; /* add in the new bits */
+
+ /* put it all back */
+ memcpy(p, &t_bits, sizeof(u64));
+
+ /* deal with the special case if needed
+ * example: 62 bit field that starts in bit 5 of first byte
+ * will overlap 3 bits into byte 9
+ */
+ if ((shift_width + ce_info[f].width) > 64) {
+ u8 byte;
+
+ hi_mask >>= (8 - shift_width);
+ hi_byte >>= (8 - shift_width);
+ byte = p[8] & ~hi_mask; /* get the bits not changing */
+ byte |= hi_byte; /* add in the new bits */
+ p[8] = byte; /* put it back */
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hmc_info: pointer to i40e_hmc_info struct
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer. This function is used for LAN Queue contexts.
+ **/
+static
+i40e_status i40e_hmc_get_object_va(struct i40e_hmc_info *hmc_info,
+ u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
+{
+ u32 obj_offset_in_sd, obj_offset_in_pd;
+ i40e_status ret_code = 0;
+ struct i40e_hmc_sd_entry *sd_entry;
+ struct i40e_hmc_pd_entry *pd_entry;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u64 obj_offset_in_fpm;
+ u32 sd_idx, sd_lmt;
+
+ if (NULL == hmc_info) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
+ goto exit;
+ }
+ if (NULL == hmc_info->hmc_obj) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+ goto exit;
+ }
+ if (NULL == object_base) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
+ goto exit;
+ }
+ if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+ ret_code = I40E_ERR_BAD_PTR;
+ hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
+ goto exit;
+ }
+ if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+ hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
+ ret_code);
+ ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+ goto exit;
+ }
+ /* find sd index and limit */
+ I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &sd_idx, &sd_lmt);
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+ hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+ if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+ I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+ &pd_idx, &pd_lmt);
+ rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+ obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_PAGED_BP_SIZE);
+ *object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+ } else {
+ obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+ I40E_HMC_DIRECT_BP_SIZE);
+ *object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+ }
+exit:
+ return ret_code;
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_TX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ **/
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw: the hardware struct
+ * @queue: the queue we care about
+ * @s: the struct to be filled
+ **/
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
+{
+ i40e_status err;
+ u8 *context_bytes;
+
+ err = i40e_hmc_get_object_va(&hw->hmc, &context_bytes,
+ I40E_HMC_LAN_RX, queue);
+ if (err < 0)
+ return err;
+
+ return i40e_set_hmc_context(context_bytes,
+ i40e_hmc_rxq_ce_info, (u8 *)s);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
new file mode 100644
index 00000000000..00ff3500607
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -0,0 +1,169 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data */
+struct i40e_hmc_obj_rxq {
+ u16 head;
+ u8 cpuid;
+ u64 base;
+ u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+ u8 dbuff;
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+ u8 hbuff;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 fc_ena;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
+ u16 rxmax;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
+ u8 lrxqthresh;
+};
+
+/* Tx queue context data */
+struct i40e_hmc_obj_txq {
+ u16 head;
+ u8 new_context;
+ u64 base;
+ u8 fc_ena;
+ u8 timesync_ena;
+ u8 fd_ena;
+ u8 alt_vlan_ena;
+ u16 thead_wb;
+ u16 cpuid;
+ u8 head_wb_ena;
+ u16 qlen;
+ u8 tphrdesc_ena;
+ u8 tphrpacket_ena;
+ u8 tphwdesc_ena;
+ u64 head_wb_addr;
+ u32 crc;
+ u16 rdylist;
+ u8 rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+ I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+ u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+ u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+ I40E_HMC_LAN_OBJ_SZ_8 = 0x3,
+ I40E_HMC_LAN_OBJ_SZ_16 = 0x4,
+ I40E_HMC_LAN_OBJ_SZ_32 = 0x5,
+ I40E_HMC_LAN_OBJ_SZ_64 = 0x6,
+ I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+ I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+ I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ 128
+#define I40E_HMC_OBJ_SIZE_RXQ 32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT 32
+
+enum i40e_hmc_lan_rsrc_type {
+ I40E_HMC_LAN_FULL = 0,
+ I40E_HMC_LAN_TX = 1,
+ I40E_HMC_LAN_RX = 2,
+ I40E_HMC_FCOE_CTX = 3,
+ I40E_HMC_FCOE_FILT = 4,
+ I40E_HMC_LAN_MAX = 5
+};
+
+enum i40e_hmc_model {
+ I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+ I40E_HMC_MODEL_DIRECT_ONLY = 1,
+ I40E_HMC_MODEL_PAGED_ONLY = 2,
+ I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ enum i40e_sd_entry_type entry_type;
+ u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+ struct i40e_hmc_info *hmc_info;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+};
+
+i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
+
+#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
new file mode 100644
index 00000000000..221aa479501
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -0,0 +1,7377 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Local includes */
+#include "i40e.h"
+
+const char i40e_driver_name[] = "i40e";
+static const char i40e_driver_string[] =
+ "Intel(R) Ethernet Connection XL710 Network Driver";
+
+#define DRV_KERN "-k"
+
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 3
+#define DRV_VERSION_BUILD 9
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+ __stringify(DRV_VERSION_MINOR) "." \
+ __stringify(DRV_VERSION_BUILD) DRV_KERN
+const char i40e_driver_version_str[] = DRV_VERSION;
+static const char i40e_copyright[] = "Copyright (c) 2013 Intel Corporation.";
+
+/* a bit of forward declarations */
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
+static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static int i40e_add_vsi(struct i40e_vsi *vsi);
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
+static int i40e_setup_pf_switch(struct i40e_pf *pf);
+static int i40e_setup_misc_vector(struct i40e_pf *pf);
+static void i40e_determine_queue_usage(struct i40e_pf *pf);
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+
+/* i40e_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, I40E_SFP_XL710_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_SFP_X710_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QEMU_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_A_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_B_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_C_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_KX_D_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_A_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_B_DEVICE_ID), 0},
+ {PCI_VDEVICE(INTEL, I40E_QSFP_C_DEVICE_ID), 0},
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
+
+#define I40E_MAX_VF_COUNT 128
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ **/
+int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ mem->size = ALIGN(size, alignment);
+ mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+
+ dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
+ mem->va = NULL;
+ mem->pa = 0;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to fill out
+ * @size: size of memory requested
+ **/
+int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+ u32 size)
+{
+ mem->size = size;
+ mem->va = kzalloc(size, GFP_KERNEL);
+
+ if (!mem->va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * @hw: pointer to the HW structure
+ * @mem: ptr to mem struct to free
+ **/
+int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+ /* it's ok to kfree a NULL pointer */
+ kfree(mem->va);
+ mem->va = NULL;
+ mem->size = 0;
+
+ return 0;
+}
+
+/**
+ * i40e_get_lump - find a lump of free generic resource
+ * @pf: board private structure
+ * @pile: the pile of resource to search
+ * @needed: the number of items needed
+ * @id: an owner id to stick on the items assigned
+ *
+ * Returns the base item index of the lump, or negative for error
+ *
+ * The search_hint trick and lack of advanced fit-finding only work
+ * because we're highly likely to have all the same size lump requests.
+ * Linear search time and any fragmentation should be minimal.
+ **/
+static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
+ u16 needed, u16 id)
+{
+ int ret = -ENOMEM;
+ int i, j;
+
+ if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
+ dev_info(&pf->pdev->dev,
+ "param err: pile=%p needed=%d id=0x%04x\n",
+ pile, needed, id);
+ return -EINVAL;
+ }
+
+ /* start the linear search with an imperfect hint */
+ i = pile->search_hint;
+ while (i < pile->num_entries) {
+ /* skip already allocated entries */
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ i++;
+ continue;
+ }
+
+ /* do we have enough in this lump? */
+ for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
+ if (pile->list[i+j] & I40E_PILE_VALID_BIT)
+ break;
+ }
+
+ if (j == needed) {
+ /* there was enough, so assign it to the requestor */
+ for (j = 0; j < needed; j++)
+ pile->list[i+j] = id | I40E_PILE_VALID_BIT;
+ ret = i;
+ pile->search_hint = i + j;
+ break;
+ } else {
+ /* not enough, so skip over it and continue looking */
+ i += j;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_put_lump - return a lump of generic resource
+ * @pile: the pile of resource to search
+ * @index: the base item index
+ * @id: the owner id of the items assigned
+ *
+ * Returns the count of items in the lump
+ **/
+static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
+{
+ int valid_id = (id | I40E_PILE_VALID_BIT);
+ int count = 0;
+ int i;
+
+ if (!pile || index >= pile->num_entries)
+ return -EINVAL;
+
+ for (i = index;
+ i < pile->num_entries && pile->list[i] == valid_id;
+ i++) {
+ pile->list[i] = 0;
+ count++;
+ }
+
+ if (count && index < pile->search_hint)
+ pile->search_hint = index;
+
+ return count;
+}
+
+/**
+ * i40e_service_event_schedule - Schedule the service task to wake up
+ * @pf: board private structure
+ *
+ * If not already scheduled, this puts the task into the work queue
+ **/
+static void i40e_service_event_schedule(struct i40e_pf *pf)
+{
+ if (!test_bit(__I40E_DOWN, &pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
+ !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
+ schedule_work(&pf->service_task);
+}
+
+/**
+ * i40e_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ *
+ * If any port has noticed a Tx timeout, it is likely that the whole
+ * device is munged, not just the one netdev port, so go for the full
+ * reset.
+ **/
+static void i40e_tx_timeout(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+
+ pf->tx_timeout_count++;
+
+ if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
+ pf->tx_timeout_recovery_level = 0;
+ pf->tx_timeout_last_recovery = jiffies;
+ netdev_info(netdev, "tx_timeout recovery level %d\n",
+ pf->tx_timeout_recovery_level);
+
+ switch (pf->tx_timeout_recovery_level) {
+ case 0:
+ /* disable and re-enable queues for the VSI */
+ if (in_interrupt()) {
+ set_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+ } else {
+ i40e_vsi_reinit_locked(vsi);
+ }
+ break;
+ case 1:
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ break;
+ case 2:
+ set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ break;
+ case 3:
+ set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ break;
+ default:
+ netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ i40e_down(vsi);
+ break;
+ }
+ i40e_service_event_schedule(pf);
+ pf->tx_timeout_recovery_level++;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_get_vsi_stats_struct - Get System Network Statistics
+ * @vsi: the VSI we care about
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
+{
+ return &vsi->net_stats;
+}
+
+/**
+ * i40e_get_netdev_stats_struct - Get statistics for netdev interface
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the service task.
+ **/
+static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+ struct net_device *netdev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ *storage = *i40e_get_vsi_stats_struct(vsi);
+
+ return storage;
+}
+
+/**
+ * i40e_vsi_reset_stats - Resets all stats of the given vsi
+ * @vsi: the VSI to have its stats reset
+ **/
+void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
+{
+ struct rtnl_link_stats64 *ns;
+ int i;
+
+ if (!vsi)
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ memset(ns, 0, sizeof(*ns));
+ memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
+ memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
+ memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
+ if (vsi->rx_rings)
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ memset(&vsi->rx_rings[i].rx_stats, 0 ,
+ sizeof(vsi->rx_rings[i].rx_stats));
+ memset(&vsi->tx_rings[i].tx_stats, 0,
+ sizeof(vsi->tx_rings[i].tx_stats));
+ }
+ vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_pf_reset_stats - Reset all of the stats for the given pf
+ * @pf: the PF to be reset
+ **/
+void i40e_pf_reset_stats(struct i40e_pf *pf)
+{
+ memset(&pf->stats, 0, sizeof(pf->stats));
+ memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
+ pf->stat_offsets_loaded = false;
+}
+
+/**
+ * i40e_stat_update48 - read and update a 48 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @hireg: the high 32 bit reg to read
+ * @loreg: the low 32 bit reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero. In the process, we also manage
+ * the potential roll-over.
+ **/
+static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ if (hw->device_id == I40E_QEMU_DEVICE_ID) {
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+ } else {
+ new_data = rd64(hw, loreg);
+ }
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = new_data - *offset;
+ else
+ *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * i40e_stat_update32 - read and update a 32 bit stat from the chip
+ * @hw: ptr to the hardware info
+ * @reg: the hw reg to read
+ * @offset_loaded: has the initial offset been loaded yet
+ * @offset: ptr to current offset value
+ * @stat: ptr to the stat
+ **/
+static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+ if (likely(new_data >= *offset))
+ *stat = (u32)(new_data - *offset);
+ else
+ *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+/**
+ * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
+ * @vsi: the VSI to be updated
+ **/
+void i40e_update_eth_stats(struct i40e_vsi *vsi)
+{
+ int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_discards, &es->rx_discards);
+
+ i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+ I40E_GLV_GORCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+ I40E_GLV_UPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+ I40E_GLV_MPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+ I40E_GLV_BPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+ I40E_GLV_GOTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+ I40E_GLV_UPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+ I40E_GLV_MPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+ I40E_GLV_BPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ vsi->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_veb_stats - Update Switch component statistics
+ * @veb: the VEB being updated
+ **/
+static void i40e_update_veb_stats(struct i40e_veb *veb)
+{
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ int idx = 0;
+
+ idx = veb->stats_idx;
+ es = &veb->stats;
+ oes = &veb->stats_offsets;
+
+ /* Gather up the stats that the hw collects */
+ i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_discards, &es->tx_discards);
+ i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+
+ i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
+ veb->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ veb->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
+ **/
+static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u64 xoff = 0;
+ u16 i, v;
+
+ if ((hw->fc.current_mode != I40E_FC_FULL) &&
+ (hw->fc.current_mode != I40E_FC_RX_PAUSE))
+ return;
+
+ xoff = nsd->link_xoff_rx;
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
+
+ /* No new LFC xoff rx */
+ if (!(nsd->link_xoff_rx - xoff))
+ return;
+
+ /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *ring = &vsi->tx_rings[i];
+ clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+ }
+ }
+}
+
+/**
+ * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
+ * @pf: the corresponding PF
+ *
+ * Update the Rx XOFF counter (PAUSE frames) in PFC mode
+ **/
+static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
+ struct i40e_dcbx_config *dcb_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u16 i, v;
+ u8 tc;
+
+ dcb_cfg = &hw->local_dcbx_config;
+
+ /* See if DCB enabled with PFC TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
+ !(dcb_cfg->pfc.pfcenable)) {
+ i40e_update_link_xoff_rx(pf);
+ return;
+ }
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ u64 prio_xoff = nsd->priority_xoff_rx[i];
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_rx[i],
+ &nsd->priority_xoff_rx[i]);
+
+ /* No new PFC xoff rx */
+ if (!(nsd->priority_xoff_rx[i] - prio_xoff))
+ continue;
+ /* Get the TC for given priority */
+ tc = dcb_cfg->etscfg.prioritytable[i];
+ xoff[tc] = true;
+ }
+
+ /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+
+ if (!vsi)
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ struct i40e_ring *ring = &vsi->tx_rings[i];
+
+ tc = ring->dcb_tc;
+ if (xoff[tc])
+ clear_bit(__I40E_HANG_CHECK_ARMED,
+ &ring->state);
+ }
+ }
+}
+
+/**
+ * i40e_update_stats - Update the board statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * There are a few instances where we store the same stat in a
+ * couple of different structs. This is partly because we have
+ * the netdev stats that need to be filled out, which is slightly
+ * different from the "eth_stats" defined by the chip and used in
+ * VF communications. We sort it all out here in a central place.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct rtnl_link_stats64 *ons;
+ struct rtnl_link_stats64 *ns; /* netdev stats */
+ struct i40e_eth_stats *oes;
+ struct i40e_eth_stats *es; /* device's eth stats */
+ u32 tx_restart, tx_busy;
+ u32 rx_page, rx_buf;
+ u64 rx_p, rx_b;
+ u64 tx_p, tx_b;
+ int i;
+ u16 q;
+
+ if (test_bit(__I40E_DOWN, &vsi->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ ns = i40e_get_vsi_stats_struct(vsi);
+ ons = &vsi->net_stats_offsets;
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+
+ /* Gather up the netdev and vsi stats that the driver collects
+ * on the fly during packet processing
+ */
+ rx_b = rx_p = 0;
+ tx_b = tx_p = 0;
+ tx_restart = tx_busy = 0;
+ rx_page = 0;
+ rx_buf = 0;
+ for (q = 0; q < vsi->num_queue_pairs; q++) {
+ struct i40e_ring *p;
+
+ p = &vsi->rx_rings[q];
+ rx_b += p->rx_stats.bytes;
+ rx_p += p->rx_stats.packets;
+ rx_buf += p->rx_stats.alloc_rx_buff_failed;
+ rx_page += p->rx_stats.alloc_rx_page_failed;
+
+ p = &vsi->tx_rings[q];
+ tx_b += p->tx_stats.bytes;
+ tx_p += p->tx_stats.packets;
+ tx_restart += p->tx_stats.restart_queue;
+ tx_busy += p->tx_stats.tx_busy;
+ }
+ vsi->tx_restart = tx_restart;
+ vsi->tx_busy = tx_busy;
+ vsi->rx_page_failed = rx_page;
+ vsi->rx_buf_failed = rx_buf;
+
+ ns->rx_packets = rx_p;
+ ns->rx_bytes = rx_b;
+ ns->tx_packets = tx_p;
+ ns->tx_bytes = tx_b;
+
+ i40e_update_eth_stats(vsi);
+ /* update netdev stats from eth stats */
+ ons->rx_errors = oes->rx_errors;
+ ns->rx_errors = es->rx_errors;
+ ons->tx_errors = oes->tx_errors;
+ ns->tx_errors = es->tx_errors;
+ ons->multicast = oes->rx_multicast;
+ ns->multicast = es->rx_multicast;
+ ons->tx_dropped = oes->tx_discards;
+ ns->tx_dropped = es->tx_discards;
+
+ /* Get the port data only if this is the main PF VSI */
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
+
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
+ ns->rx_crc_errors = nsd->crc_errors;
+
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
+ ns->rx_errors = nsd->crc_errors
+ + nsd->illegal_bytes;
+
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
+ ns->rx_length_errors = nsd->rx_length_errors;
+
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
+ pf->stat_offsets_loaded,
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
+ }
+
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+ }
+
+ pf->stat_offsets_loaded = true;
+}
+
+/**
+ * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL
+ **/
+static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (vlan == f->vlan) &&
+ (!is_vf || f->is_vf) &&
+ (!is_netdev || f->is_netdev))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_find_mac - Find a mac addr in the macvlan filters list
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address we are searching for
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns the first filter with the provided MAC address or NULL if
+ * MAC address was not found
+ **/
+struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if ((ether_addr_equal(macaddr, f->macaddr)) &&
+ (!is_vf || f->is_vf) &&
+ (!is_netdev || f->is_netdev))
+ return f;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
+ * @vsi: the VSI to be searched
+ *
+ * Returns true if VSI is in vlan mode or false otherwise
+ **/
+bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+
+ /* Only -1 for all the filters denotes not in vlan mode
+ * so we have to go through all the list in order to make sure
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan >= 0)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be filtered
+ * @is_vf: true if it is a vf
+ * @is_netdev: true if it is a netdev
+ *
+ * Goes through all the macvlan filters and adds a
+ * macvlan filter for each unique vlan that already exists
+ *
+ * Returns first filter found on success, else NULL
+ **/
+struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (!i40e_find_filter(vsi, macaddr, f->vlan,
+ is_vf, is_netdev)) {
+ if (!i40e_add_filter(vsi, macaddr, f->vlan,
+ is_vf, is_netdev))
+ return NULL;
+ }
+ }
+
+ return list_first_entry_or_null(&vsi->mac_filter_list,
+ struct i40e_mac_filter, list);
+}
+
+/**
+ * i40e_add_filter - Add a mac/vlan filter to the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure its a vf filter, else doesn't matter
+ * @is_netdev: make sure its a netdev filter, else doesn't matter
+ *
+ * Returns ptr to the filter object or NULL when no memory available.
+ **/
+struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return NULL;
+
+ f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ if (!f) {
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ goto add_filter_out;
+
+ memcpy(f->macaddr, macaddr, ETH_ALEN);
+ f->vlan = vlan;
+ f->changed = true;
+
+ INIT_LIST_HEAD(&f->list);
+ list_add(&f->list, &vsi->mac_filter_list);
+ }
+
+ /* increment counter and add a new flag if needed */
+ if (is_vf) {
+ if (!f->is_vf) {
+ f->is_vf = true;
+ f->counter++;
+ }
+ } else if (is_netdev) {
+ if (!f->is_netdev) {
+ f->is_netdev = true;
+ f->counter++;
+ }
+ } else {
+ f->counter++;
+ }
+
+ /* changed tells sync_filters_subtask to
+ * push the filter down to the firmware
+ */
+ if (f->changed) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+
+add_filter_out:
+ return f;
+}
+
+/**
+ * i40e_del_filter - Remove a mac/vlan filter from the VSI
+ * @vsi: the VSI to be searched
+ * @macaddr: the MAC address
+ * @vlan: the vlan
+ * @is_vf: make sure it's a vf filter, else doesn't matter
+ * @is_netdev: make sure it's a netdev filter, else doesn't matter
+ **/
+void i40e_del_filter(struct i40e_vsi *vsi,
+ u8 *macaddr, s16 vlan,
+ bool is_vf, bool is_netdev)
+{
+ struct i40e_mac_filter *f;
+
+ if (!vsi || !macaddr)
+ return;
+
+ f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
+ if (!f || f->counter == 0)
+ return;
+
+ if (is_vf) {
+ if (f->is_vf) {
+ f->is_vf = false;
+ f->counter--;
+ }
+ } else if (is_netdev) {
+ if (f->is_netdev) {
+ f->is_netdev = false;
+ f->counter--;
+ }
+ } else {
+ /* make sure we don't remove a filter in use by vf or netdev */
+ int min_f = 0;
+ min_f += (f->is_vf ? 1 : 0);
+ min_f += (f->is_netdev ? 1 : 0);
+
+ if (f->counter > min_f)
+ f->counter--;
+ }
+
+ /* counter == 0 tells sync_filters_subtask to
+ * remove the filter from the firmware's list
+ */
+ if (f->counter == 0) {
+ f->changed = true;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_set_mac - NDO callback to set mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_set_mac(struct net_device *netdev, void *p)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct sockaddr *addr = p;
+ struct i40e_mac_filter *f;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ i40e_status ret;
+ ret = i40e_aq_mac_address_write(&vsi->back->hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ addr->sa_data, NULL);
+ if (ret) {
+ netdev_info(netdev,
+ "Addr change for Main VSI failed: %d\n",
+ ret);
+ return -EADDRNOTAVAIL;
+ }
+
+ memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ }
+
+ /* In order to be sure to not drop any packets, add the new address
+ * then delete the old one.
+ */
+ f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
+ if (!f)
+ return -ENOMEM;
+
+ i40e_sync_vsi_filters(vsi);
+ i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
+ i40e_sync_vsi_filters(vsi);
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
+ * @vsi: the VSI being setup
+ * @ctxt: VSI context structure
+ * @enabled_tc: Enabled TCs bitmap
+ * @is_add: True if called before Add VSI
+ *
+ * Setup VSI queue mapping for enabled traffic classes.
+ **/
+static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt,
+ u8 enabled_tc,
+ bool is_add)
+{
+ struct i40e_pf *pf = vsi->back;
+ u16 sections = 0;
+ u8 netdev_tc = 0;
+ u16 numtc = 0;
+ u16 qcount;
+ u8 offset;
+ u16 qmap;
+ int i;
+
+ sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ offset = 0;
+
+ if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
+ /* Find numtc from enabled TC bitmap */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i)) /* TC is enabled */
+ numtc++;
+ }
+ if (!numtc) {
+ dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
+ numtc = 1;
+ }
+ } else {
+ /* At least TC0 is enabled in case of non-DCB case */
+ numtc = 1;
+ }
+
+ vsi->tc_config.numtc = numtc;
+ vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
+
+ /* Setup queue offset/count for all TCs for given VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* See if the given TC is enabled for the given VSI */
+ if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+ int pow, num_qps;
+
+ vsi->tc_config.tc_info[i].qoffset = offset;
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (i == 0)
+ qcount = pf->rss_size;
+ else
+ qcount = pf->num_tc_qps;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ break;
+ case I40E_VSI_FDIR:
+ case I40E_VSI_SRIOV:
+ case I40E_VSI_VMDQ2:
+ default:
+ qcount = vsi->alloc_queue_pairs;
+ vsi->tc_config.tc_info[i].qcount = qcount;
+ WARN_ON(i != 0);
+ break;
+ }
+
+ /* find the power-of-2 of the number of queue pairs */
+ num_qps = vsi->tc_config.tc_info[i].qcount;
+ pow = 0;
+ while (num_qps &&
+ ((1 << pow) < vsi->tc_config.tc_info[i].qcount)) {
+ pow++;
+ num_qps >>= 1;
+ }
+
+ vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
+ qmap =
+ (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+
+ offset += vsi->tc_config.tc_info[i].qcount;
+ } else {
+ /* TC is not enabled so set the offset to
+ * default queue and allocate one queue
+ * for the given TC.
+ */
+ vsi->tc_config.tc_info[i].qoffset = 0;
+ vsi->tc_config.tc_info[i].qcount = 1;
+ vsi->tc_config.tc_info[i].netdev_tc = 0;
+
+ qmap = 0;
+ }
+ ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_queue_pairs = offset;
+
+ /* Scheduler section valid can only be set for ADD VSI */
+ if (is_add) {
+ sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
+
+ ctxt->info.up_enable_bits = enabled_tc;
+ }
+ if (vsi->type == I40E_VSI_SRIOV) {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ ctxt->info.queue_mapping[i] =
+ cpu_to_le16(vsi->base_queue + i);
+ } else {
+ ctxt->info.mapping_flags |=
+ cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
+ ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
+ }
+ ctxt->info.valid_sections |= cpu_to_le16(sections);
+}
+
+/**
+ * i40e_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40e_set_rx_mode(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct netdev_hw_addr *uca;
+ struct netdev_hw_addr *mca;
+ struct netdev_hw_addr *ha;
+
+ /* add addr if not already in the filter list */
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (!i40e_find_mac(vsi, uca->addr, false, true)) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_put_mac_in_vlan(vsi, uca->addr,
+ false, true);
+ else
+ i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
+ false, true);
+ }
+ }
+
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (!i40e_find_mac(vsi, mca->addr, false, true)) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ i40e_put_mac_in_vlan(vsi, mca->addr,
+ false, true);
+ else
+ i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
+ false, true);
+ }
+ }
+
+ /* remove filter if not in netdev list */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ bool found = false;
+
+ if (!f->is_netdev)
+ continue;
+
+ if (is_multicast_ether_addr(f->macaddr)) {
+ netdev_for_each_mc_addr(mca, netdev) {
+ if (ether_addr_equal(mca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ } else {
+ netdev_for_each_uc_addr(uca, netdev) {
+ if (ether_addr_equal(uca->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+
+ for_each_dev_addr(netdev, ha) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found)
+ i40e_del_filter(
+ vsi, f->macaddr, I40E_VLAN_ANY, false, true);
+ }
+
+ /* check for other flag changes */
+ if (vsi->current_netdev_flags != vsi->netdev->flags) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+}
+
+/**
+ * i40e_sync_vsi_filters - Update the VSI filter list to the HW
+ * @vsi: ptr to the VSI
+ *
+ * Push any outstanding VSI filter changes through the AdminQ.
+ *
+ * Returns 0 or error value
+ **/
+int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+ bool promisc_forced_on = false;
+ bool add_happened = false;
+ int filter_list_len = 0;
+ u32 changed_flags = 0;
+ i40e_status aq_ret = 0;
+ struct i40e_pf *pf;
+ int num_add = 0;
+ int num_del = 0;
+ u16 cmd_flags;
+
+ /* empty array typed pointers, kcalloc later */
+ struct i40e_aqc_add_macvlan_element_data *add_list;
+ struct i40e_aqc_remove_macvlan_element_data *del_list;
+
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
+ usleep_range(1000, 2000);
+ pf = vsi->back;
+
+ if (vsi->netdev) {
+ changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
+ vsi->current_netdev_flags = vsi->netdev->flags;
+ }
+
+ if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
+ vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
+
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_remove_macvlan_element_data);
+ del_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_remove_macvlan_element_data),
+ GFP_KERNEL);
+ if (!del_list)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter != 0)
+ continue;
+ f->changed = false;
+ cmd_flags = 0;
+
+ /* add to delete list */
+ memcpy(del_list[num_del].mac_addr,
+ f->macaddr, ETH_ALEN);
+ del_list[num_del].vlan_tag =
+ cpu_to_le16((u16)(f->vlan ==
+ I40E_VLAN_ANY ? 0 : f->vlan));
+
+ /* vlan0 as wild card to allow packets from all vlans */
+ if (f->vlan == I40E_VLAN_ANY ||
+ (vsi->netdev && !(vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)))
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ del_list[num_del].flags = cmd_flags;
+ num_del++;
+
+ /* unlink from filter list */
+ list_del(&f->list);
+ kfree(f);
+
+ /* flush a full buffer */
+ if (num_del == filter_list_len) {
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw,
+ vsi->seid, del_list, num_del,
+ NULL);
+ num_del = 0;
+ memset(del_list, 0, sizeof(*del_list));
+
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
+ aq_ret,
+ pf->hw.aq.asq_last_status);
+ }
+ }
+ if (num_del) {
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+ del_list, num_del, NULL);
+ num_del = 0;
+
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
+ }
+
+ kfree(del_list);
+ del_list = NULL;
+
+ /* do all the adds now */
+ filter_list_len = pf->hw.aq.asq_buf_size /
+ sizeof(struct i40e_aqc_add_macvlan_element_data),
+ add_list = kcalloc(filter_list_len,
+ sizeof(struct i40e_aqc_add_macvlan_element_data),
+ GFP_KERNEL);
+ if (!add_list)
+ return -ENOMEM;
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ if (!f->changed)
+ continue;
+
+ if (f->counter == 0)
+ continue;
+ f->changed = false;
+ add_happened = true;
+ cmd_flags = 0;
+
+ /* add to add array */
+ memcpy(add_list[num_add].mac_addr,
+ f->macaddr, ETH_ALEN);
+ add_list[num_add].vlan_tag =
+ cpu_to_le16(
+ (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+ add_list[num_add].queue_number = 0;
+
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+
+ /* vlan0 as wild card to allow packets from all vlans */
+ if (f->vlan == I40E_VLAN_ANY || (vsi->netdev &&
+ !(vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)))
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ add_list[num_add].flags = cpu_to_le16(cmd_flags);
+ num_add++;
+
+ /* flush a full buffer */
+ if (num_add == filter_list_len) {
+ aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add,
+ NULL);
+ num_add = 0;
+
+ if (aq_ret)
+ break;
+ memset(add_list, 0, sizeof(*add_list));
+ }
+ }
+ if (num_add) {
+ aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ add_list, num_add, NULL);
+ num_add = 0;
+ }
+ kfree(add_list);
+ add_list = NULL;
+
+ if (add_happened && (!aq_ret)) {
+ /* do nothing */;
+ } else if (add_happened && (aq_ret)) {
+ dev_info(&pf->pdev->dev,
+ "add filter failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
+ if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
+ !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state)) {
+ promisc_forced_on = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state);
+ dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
+ }
+ }
+ }
+
+ /* check for changes in promiscuous modes */
+ if (changed_flags & IFF_ALLMULTI) {
+ bool cur_multipromisc;
+ cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_multipromisc,
+ NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set multi promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
+ }
+ if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+ bool cur_promisc;
+ cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
+ test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state));
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+ vsi->seid,
+ cur_promisc, NULL);
+ if (aq_ret)
+ dev_info(&pf->pdev->dev,
+ "set uni promisc failed, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
+ }
+
+ clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
+ return 0;
+}
+
+/**
+ * i40e_sync_filters_subtask - Sync the VSI filter list with HW
+ * @pf: board private structure
+ **/
+static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+{
+ int v;
+
+ if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
+ return;
+ pf->flags &= ~I40E_FLAG_FILTER_SYNC;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v] &&
+ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
+ i40e_sync_vsi_filters(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ struct i40e_vsi *vsi = np->vsi;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
+ return -EINVAL;
+
+ netdev_info(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ i40e_vsi_reinit_locked(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
+ return; /* already enabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
+ * @vsi: the vsi being adjusted
+ **/
+void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status ret;
+
+ if ((vsi->info.valid_sections &
+ cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
+ ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
+ I40E_AQ_VSI_PVLAN_EMOD_MASK))
+ return; /* already disabled */
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ }
+}
+
+/**
+ * i40e_vlan_rx_register - Setup or shutdown vlan offload
+ * @netdev: network interface to be adjusted
+ * @features: netdev features to test if VLAN offload is enabled or not
+ **/
+static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+}
+
+/**
+ * i40e_vsi_add_vlan - Add vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be added (0 = untagged only , -1 = any)
+ **/
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ struct i40e_mac_filter *f, *add_f;
+ bool is_netdev, is_vf;
+ int ret;
+
+ is_vf = (vsi->type == I40E_VSI_SRIOV);
+ is_netdev = !!(vsi->netdev);
+
+ if (is_netdev) {
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add vlan filter %d for %pM\n",
+ vid, vsi->netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add vlan filter %d for %pM\n",
+ vid, f->macaddr);
+ return -ENOMEM;
+ }
+ }
+
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not sync filters for vid %d\n", vid);
+ return ret;
+ }
+
+ /* Now if we add a vlan tag, make sure to check if it is the first
+ * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
+ * with 0, so we now accept untagged and specified tagged traffic
+ * (and not any taged and untagged)
+ */
+ if (vid > 0) {
+ if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
+ I40E_VLAN_ANY,
+ is_vf, is_netdev)) {
+ i40e_del_filter(vsi, vsi->netdev->dev_addr,
+ I40E_VLAN_ANY, is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ vsi->netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev)) {
+ i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr,
+ 0, is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter 0 for %pM\n",
+ f->macaddr);
+ return -ENOMEM;
+ }
+ }
+ }
+ ret = i40e_sync_vsi_filters(vsi);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
+ * @vsi: the vsi being configured
+ * @vid: vlan id to be removed (0 = untagged only , -1 = any)
+ *
+ * Return: 0 on success or negative otherwise
+ **/
+int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct i40e_mac_filter *f, *add_f;
+ bool is_vf, is_netdev;
+ int filter_count = 0;
+ int ret;
+
+ is_vf = (vsi->type == I40E_VSI_SRIOV);
+ is_netdev = !!(netdev);
+
+ if (is_netdev)
+ i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
+
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
+ return ret;
+ }
+
+ /* go through all the filters for this VSI and if there is only
+ * vid == 0 it means there are no other filters, so vid 0 must
+ * be replaced with -1. This signifies that we should from now
+ * on accept any traffic (with any tag present, or untagged)
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (is_netdev) {
+ if (f->vlan &&
+ ether_addr_equal(netdev->dev_addr, f->macaddr))
+ filter_count++;
+ }
+
+ if (f->vlan)
+ filter_count++;
+ }
+
+ if (!filter_count && is_netdev) {
+ i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
+ f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ if (!f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter %d for %pM\n",
+ I40E_VLAN_ANY, netdev->dev_addr);
+ return -ENOMEM;
+ }
+ }
+
+ if (!filter_count) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
+ add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
+ is_vf, is_netdev);
+ if (!add_f) {
+ dev_info(&vsi->back->pdev->dev,
+ "Could not add filter %d for %pM\n",
+ I40E_VLAN_ANY, f->macaddr);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return i40e_sync_vsi_filters(vsi);
+}
+
+/**
+ * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be added
+ *
+ * net_device_ops implementation for adding vlan ids
+ **/
+static int i40e_vlan_rx_add_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ int ret = 0;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
+
+ /* If the network stack called us with vid = 0, we should
+ * indicate to i40e_vsi_add_vlan() that we want to receive
+ * any traffic (i.e. with any vlan tag, or untagged)
+ */
+ ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY);
+
+ if (!ret && (vid < VLAN_N_VID))
+ set_bit(vid, vsi->active_vlans);
+
+ return ret;
+}
+
+/**
+ * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * @netdev: network interface to be adjusted
+ * @vid: vlan id to be removed
+ *
+ * net_device_ops implementation for adding vlan ids
+ **/
+static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+ __always_unused __be16 proto, u16 vid)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
+
+ /* return code is ignored as there is nothing a user
+ * can do about failure to remove and a log message was
+ * already printed from the other function
+ */
+ i40e_vsi_kill_vlan(vsi, vid);
+
+ clear_bit(vid, vsi->active_vlans);
+
+ return 0;
+}
+
+/**
+ * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
+ * @vsi: the vsi being brought back up
+ **/
+static void i40e_restore_vlan(struct i40e_vsi *vsi)
+{
+ u16 vid;
+
+ if (!vsi->netdev)
+ return;
+
+ i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+
+ for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
+ i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
+ vid);
+}
+
+/**
+ * i40e_vsi_add_pvid - Add pvid for the VSI
+ * @vsi: the vsi being adjusted
+ * @vid: the vlan id to set as a PVID
+ **/
+int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
+{
+ struct i40e_vsi_context ctxt;
+ i40e_status aq_ret;
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = cpu_to_le16(vid);
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+
+ ctxt.seid = vsi->seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (aq_ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: update vsi failed, aq_err=%d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_remove_pvid - Remove the pvid from the VSI
+ * @vsi: the vsi being adjusted
+ *
+ * Just use the vlan_rx_register() service to put it back to normal
+ **/
+void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
+{
+ vsi->info.pvid = 0;
+ i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
+}
+
+/**
+ * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free VSI's transmit software resources
+ **/
+static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i].desc)
+ i40e_free_tx_resources(&vsi->tx_rings[i]);
+}
+
+/**
+ * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
+ * @vsi: ptr to the VSI
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
+{
+ int i, err = 0;
+
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+ return err;
+}
+
+/**
+ * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
+ * @vsi: ptr to the VSI
+ *
+ * Free all receive software resources
+ **/
+static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
+{
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->rx_rings[i].desc)
+ i40e_free_rx_resources(&vsi->rx_rings[i]);
+}
+
+/**
+ * i40e_configure_tx_ring - Configure a transmit ring context and rest
+ * @ring: The Tx ring to configure
+ *
+ * Configure the Tx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_tx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ i40e_status err = 0;
+ u32 qtx_ctl = 0;
+
+ /* some ATR related tx ring init */
+ if (vsi->back->flags & I40E_FLAG_FDIR_ATR_ENABLED) {
+ ring->atr_sample_rate = vsi->back->atr_sample_rate;
+ ring->atr_count = 0;
+ } else {
+ ring->atr_sample_rate = 0;
+ }
+
+ /* initialize XPS */
+ if (ring->q_vector && ring->netdev &&
+ !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->affinity_mask,
+ ring->queue_index);
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(tx_ctx));
+
+ tx_ctx.new_context = 1;
+ tx_ctx.base = (ring->dma / 128);
+ tx_ctx.qlen = ring->count;
+ tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED));
+
+ /* As part of VSI creation/update, FW allocates certain
+ * Tx arbitration queue sets for each TC enabled for
+ * the VSI. The FW returns the handles to these queue
+ * sets as part of the response buffer to Add VSI,
+ * Update VSI, etc. AQ commands. It is expected that
+ * these queue set handles be associated with the Tx
+ * queues by the driver as part of the TX queue context
+ * initialization. This has to be done regardless of
+ * DCB as by default everything is mapped to TC0.
+ */
+ tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
+ tx_ctx.rdylist_act = 0;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_tx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* Now associate this queue with this PCI function */
+ qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
+ qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ & I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
+ i40e_flush(hw);
+
+ clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
+
+ /* cache tail off for easier writes later */
+ ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
+
+ return 0;
+}
+
+/**
+ * i40e_configure_rx_ring - Configure a receive ring context
+ * @ring: The Rx ring to configure
+ *
+ * Configure the Rx descriptor ring in the HMC context.
+ **/
+static int i40e_configure_rx_ring(struct i40e_ring *ring)
+{
+ struct i40e_vsi *vsi = ring->vsi;
+ u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ i40e_status err = 0;
+
+ ring->state = 0;
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
+ ring->rx_hdr_len = vsi->rx_hdr_len;
+
+ rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rx_ctx.base = (ring->dma / 128);
+ rx_ctx.qlen = ring->count;
+
+ if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
+ set_ring_16byte_desc_enabled(ring);
+ rx_ctx.dsize = 0;
+ } else {
+ rx_ctx.dsize = 1;
+ }
+
+ rx_ctx.dtype = vsi->dtype;
+ if (vsi->dtype) {
+ set_ring_ps_enabled(ring);
+ rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
+ I40E_RX_SPLIT_IP |
+ I40E_RX_SPLIT_TCP_UDP |
+ I40E_RX_SPLIT_SCTP;
+ } else {
+ rx_ctx.hsplit_0 = 0;
+ }
+
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame,
+ (chain_len * ring->rx_buf_len));
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 1;
+ rx_ctx.l2tsel = 1;
+ rx_ctx.showiv = 1;
+
+ /* clear the context in the HMC */
+ err = i40e_clear_lan_rx_queue_context(hw, pf_q);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* set the context in the HMC */
+ err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
+ if (err) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+ return -ENOMEM;
+ }
+
+ /* cache tail for quicker writes, and clear the reg before use */
+ ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
+ writel(0, ring->tail);
+
+ i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_configure_tx - Configure the VSI for Tx
+ * @vsi: VSI structure describing this set of rings and resources
+ *
+ * Configure the Tx VSI for operation.
+ **/
+static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
+ err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_configure_rx - Configure the VSI for Rx
+ * @vsi: the VSI being configured
+ *
+ * Configure the Rx VSI for operation.
+ **/
+static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
+{
+ int err = 0;
+ u16 i;
+
+ if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
+ vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
+ + ETH_FCS_LEN + VLAN_HLEN;
+ else
+ vsi->max_frame = I40E_RXBUFFER_2048;
+
+ /* figure out correct receive buffer length */
+ switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
+ I40E_FLAG_RX_PS_ENABLED)) {
+ case I40E_FLAG_RX_1BUF_ENABLED:
+ vsi->rx_hdr_len = 0;
+ vsi->rx_buf_len = vsi->max_frame;
+ vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+ break;
+ case I40E_FLAG_RX_PS_ENABLED:
+ vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
+ break;
+ default:
+ vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
+ break;
+ }
+
+ /* round up for the chip's needs */
+ vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
+ (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+ vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
+ (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+
+ /* set up individual rings */
+ for (i = 0; i < vsi->num_queue_pairs && !err; i++)
+ err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
+{
+ u16 qoffset, qcount;
+ int i, n;
+
+ if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
+ return;
+
+ for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
+ if (!(vsi->tc_config.enabled_tc & (1 << n)))
+ continue;
+
+ qoffset = vsi->tc_config.tc_info[n].qoffset;
+ qcount = vsi->tc_config.tc_info[n].qcount;
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+ rx_ring->dcb_tc = n;
+ tx_ring->dcb_tc = n;
+ }
+ }
+}
+
+/**
+ * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
+{
+ if (vsi->netdev)
+ i40e_set_rx_mode(vsi->netdev);
+}
+
+/**
+ * i40e_vsi_configure - Set up the VSI for action
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_configure(struct i40e_vsi *vsi)
+{
+ int err;
+
+ i40e_set_vsi_rx_mode(vsi);
+ i40e_restore_vlan(vsi);
+ i40e_vsi_config_dcb_rings(vsi);
+ err = i40e_vsi_configure_tx(vsi);
+ if (!err)
+ err = i40e_vsi_configure_rx(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_q_vector *q_vector;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vector;
+ int i, q;
+ u32 val;
+ u32 qp;
+
+ /* The interrupt indexing is offset by 1 in the PFINT_ITRn
+ * and PFINT_LNKLSTn registers, e.g.:
+ * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
+ */
+ qp = vsi->base_queue;
+ vector = vsi->base_vector;
+ q_vector = vsi->q_vectors;
+ for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
+ q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
+ q_vector->tx.itr);
+
+ /* Linked list for the queuepairs assigned to this vector */
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
+ for (q = 0; q < q_vector->num_ringpairs; q++) {
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (qp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_TX
+ << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ ((qp+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
+ (I40E_QUEUE_TYPE_RX
+ << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ /* Terminate the linked list */
+ if (q == (q_vector->num_ringpairs - 1))
+ val |= (I40E_QUEUE_END_OF_LIST
+ << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp++;
+ }
+ }
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_enable_misc_int_causes - enable the non-queue interrupts
+ * @hw: ptr to the hardware info
+ **/
+static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
+{
+ u32 val;
+
+ /* clear things first */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
+
+ val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
+ I40E_PFINT_ICR0_ENA_GPIO_MASK |
+ I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+
+ /* OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/**
+ * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
+ * @vsi: the VSI being configured
+ **/
+static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
+{
+ struct i40e_q_vector *q_vector = vsi->q_vectors;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ /* set the ITR configuration */
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
+
+ i40e_enable_misc_int_causes(hw);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the q int */
+ val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+
+ wr32(hw, I40E_QINT_RQCTL(0), val);
+
+ val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+
+ wr32(hw, I40E_QINT_TQCTL(0), val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
+ * @pf: board private structure
+ **/
+static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+
+ wr32(hw, I40E_PFINT_DYN_CTL0, val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_irq_dynamic_enable - Enable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+
+ val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_msix_clean_rings - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ return IRQ_HANDLED;
+
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_fdir_clean_rings - Interrupt Handler for FDIR rings
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ **/
+static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
+{
+ struct i40e_q_vector *q_vector = data;
+
+ if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+ return IRQ_HANDLED;
+
+ pr_info("fdir ring cleaning needed\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ *
+ * Allocates MSI-X vectors and requests interrupts from the kernel.
+ **/
+static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
+{
+ int q_vectors = vsi->num_q_vectors;
+ struct i40e_pf *pf = vsi->back;
+ int base = vsi->base_vector;
+ int rx_int_idx = 0;
+ int tx_int_idx = 0;
+ int vector, err;
+
+ for (vector = 0; vector < q_vectors; vector++) {
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+
+ if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "TxRx", rx_int_idx++);
+ tx_int_idx++;
+ } else if (q_vector->rx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "rx", rx_int_idx++);
+ } else if (q_vector->tx.ring[0]) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", basename, "tx", tx_int_idx++);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(pf->msix_entries[base + vector].vector,
+ vsi->irq_handler,
+ 0,
+ q_vector->name,
+ q_vector);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "%s: request_irq failed, error: %d\n",
+ __func__, err);
+ goto free_queue_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+ &q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
+ NULL);
+ free_irq(pf->msix_entries[base + vector].vector,
+ &(vsi->q_vectors[vector]));
+ }
+ return err;
+}
+
+/**
+ * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
+ * @vsi: the VSI being un-configured
+ **/
+static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ int i;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
+ wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+ }
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
+
+ i40e_flush(hw);
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ synchronize_irq(pf->msix_entries[i + base].vector);
+ } else {
+ /* Legacy and MSI mode - this stops all interrupt handling */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+ synchronize_irq(pf->pdev->irq);
+ }
+}
+
+/**
+ * i40e_vsi_enable_irq - Enable IRQ for the given VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = vsi->base_vector;
+ i < (vsi->num_q_vectors + vsi->base_vector); i++)
+ i40e_irq_dynamic_enable(vsi, i);
+ } else {
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_stop_misc_vector - Stop the vector that handles non-queue events
+ * @pf: board private structure
+ **/
+static void i40e_stop_misc_vector(struct i40e_pf *pf)
+{
+ /* Disable ICR 0 */
+ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
+ i40e_flush(&pf->hw);
+}
+
+/**
+ * i40e_intr - MSI/Legacy and non-queue interrupt handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ * This is the handler used for all MSI/Legacy interrupts, and deals
+ * with both queue and non-queue interrupts. This is also used in
+ * MSIX mode to handle the non-queue interrupts.
+ **/
+static irqreturn_t i40e_intr(int irq, void *data)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)data;
+ struct i40e_hw *hw = &pf->hw;
+ u32 icr0, icr0_remaining;
+ u32 val, ena_mask;
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+ /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+ if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+ return IRQ_NONE;
+
+ val = rd32(hw, I40E_PFINT_DYN_CTL0);
+ val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, val);
+
+ ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
+ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
+
+ /* temporarily disable queue cause for NAPI processing */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+ qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+ qval = rd32(hw, I40E_QINT_TQCTL(0));
+ qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_flush(hw);
+
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ }
+
+ if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
+ val = rd32(hw, I40E_GLGEN_RSTAT);
+ val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+ >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ if (val & I40E_RESET_CORER)
+ pf->corer_count++;
+ else if (val & I40E_RESET_GLOBR)
+ pf->globr_count++;
+ else if (val & I40E_RESET_EMPR)
+ pf->empr_count++;
+ }
+
+ /* If a critical error is pending we have no choice but to reset the
+ * device.
+ * Report and mask out any remaining unexpected interrupts.
+ */
+ icr0_remaining = icr0 & ena_mask;
+ if (icr0_remaining) {
+ dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
+ icr0_remaining);
+ if ((icr0_remaining & I40E_PFINT_ICR0_HMC_ERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) ||
+ (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
+ if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ dev_info(&pf->pdev->dev, "HMC error interrupt\n");
+ } else {
+ dev_info(&pf->pdev->dev, "device will be reset\n");
+ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ i40e_service_event_schedule(pf);
+ }
+ }
+ ena_mask &= ~icr0_remaining;
+ }
+
+ /* re-enable interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
+ i40e_flush(hw);
+ if (!test_bit(__I40E_DOWN, &pf->state)) {
+ i40e_service_event_schedule(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @r_idx: rx queue index
+ **/
+static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
+{
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+ struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
+
+ rx_ring->q_vector = q_vector;
+ q_vector->rx.ring[q_vector->rx.count] = rx_ring;
+ q_vector->rx.count++;
+ q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
+ * @vsi: the VSI being configured
+ * @v_idx: vector index
+ * @t_idx: tx queue index
+ **/
+static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
+{
+ struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
+ struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+
+ tx_ring->q_vector = q_vector;
+ q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+ q_vector->tx.count++;
+ q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->num_ringpairs++;
+ q_vector->vsi = vsi;
+}
+
+/**
+ * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @vsi: the VSI being configured
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per queue pair, but on a constrained vector budget, we
+ * group the queue pairs as "efficiently" as possible.
+ **/
+static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
+{
+ int qp_remaining = vsi->num_queue_pairs;
+ int q_vectors = vsi->num_q_vectors;
+ int qp_per_vector;
+ int v_start = 0;
+ int qp_idx = 0;
+
+ /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
+ * group them so there are multiple queues per vector.
+ */
+ for (; v_start < q_vectors && qp_remaining; v_start++) {
+ qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+ for (; qp_per_vector;
+ qp_per_vector--, qp_idx++, qp_remaining--) {
+ map_vector_to_rxq(vsi, v_start, qp_idx);
+ map_vector_to_txq(vsi, v_start, qp_idx);
+ }
+ }
+}
+
+/**
+ * i40e_vsi_request_irq - Request IRQ from the OS
+ * @vsi: the VSI being configured
+ * @basename: name for the vector
+ **/
+static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ err = i40e_vsi_request_irq_msix(vsi, basename);
+ else if (pf->flags & I40E_FLAG_MSI_ENABLED)
+ err = request_irq(pf->pdev->irq, i40e_intr, 0,
+ pf->misc_int_name, pf);
+ else
+ err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
+ pf->misc_int_name, pf);
+
+ if (err)
+ dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * i40e_netpoll - A Polling 'interrupt'handler
+ * @netdev: network interface device structure
+ *
+ * This is used by netconsole to send skbs without having to re-enable
+ * interrupts. It's not called while the normal interrupt routine is executing.
+ **/
+static void i40e_netpoll(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return;
+
+ pf->flags |= I40E_FLAG_IN_NETPOLL;
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ for (i = 0; i < vsi->num_q_vectors; i++)
+ i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+ } else {
+ i40e_intr(pf->pdev->irq, netdev);
+ }
+ pf->flags &= ~I40E_FLAG_IN_NETPOLL;
+}
+#endif
+
+/**
+ * i40e_vsi_control_tx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int i, j, pf_q;
+ u32 tx_reg;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ j = 1000;
+ do {
+ usleep_range(1000, 2000);
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
+ ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
+
+ if (enable) {
+ /* is STAT set ? */
+ if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+ dev_info(&pf->pdev->dev,
+ "Tx %d already enabled\n", i);
+ continue;
+ }
+ } else {
+ /* is !STAT set ? */
+ if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) {
+ dev_info(&pf->pdev->dev,
+ "Tx %d already disabled\n", i);
+ continue;
+ }
+ }
+
+ /* turn on/off the queue */
+ if (enable)
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK;
+ else
+ tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+
+ wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+
+ /* wait for the change to finish */
+ for (j = 0; j < 10; j++) {
+ tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+ if (enable) {
+ if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ }
+
+ udelay(10);
+ }
+ if (j >= 10) {
+ dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
+ pf_q, (enable ? "en" : "dis"));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_control_rx - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int i, j, pf_q;
+ u32 rx_reg;
+
+ pf_q = vsi->base_queue;
+ for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+ j = 1000;
+ do {
+ usleep_range(1000, 2000);
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+ } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
+ ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+
+ if (enable) {
+ /* is STAT set ? */
+ if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
+ } else {
+ /* is !STAT set ? */
+ if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
+ }
+
+ /* turn on/off the queue */
+ if (enable)
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK;
+ else
+ rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK);
+ wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+
+ /* wait for the change to finish */
+ for (j = 0; j < 10; j++) {
+ rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+
+ if (enable) {
+ if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ } else {
+ if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ }
+
+ udelay(10);
+ }
+ if (j >= 10) {
+ dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
+ pf_q, (enable ? "en" : "dis"));
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_control_rings - Start or stop a VSI's rings
+ * @vsi: the VSI being configured
+ * @enable: start or stop the rings
+ **/
+static int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
+{
+ int ret;
+
+ /* do rx first for enable and last for disable */
+ if (request) {
+ ret = i40e_vsi_control_rx(vsi, request);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_tx(vsi, request);
+ } else {
+ ret = i40e_vsi_control_tx(vsi, request);
+ if (ret)
+ return ret;
+ ret = i40e_vsi_control_rx(vsi, request);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vsi_free_irq - Free the irq association with the OS
+ * @vsi: the VSI being configured
+ **/
+static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ int base = vsi->base_vector;
+ u32 val, qp;
+ int i;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (!vsi->q_vectors)
+ return;
+
+ for (i = 0; i < vsi->num_q_vectors; i++) {
+ u16 vector = i + base;
+
+ /* free only the irqs that were actually requested */
+ if (vsi->q_vectors[i].num_ringpairs == 0)
+ continue;
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(pf->msix_entries[vector].vector,
+ NULL);
+ free_irq(pf->msix_entries[vector].vector,
+ &vsi->q_vectors[i]);
+
+ /* Tear down the interrupt queue link list
+ *
+ * We know that they come in pairs and always
+ * the Rx first, then the Tx. To clear the
+ * link list, stick the EOL value into the
+ * next_q field of the registers.
+ */
+ val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
+
+ while (qp != I40E_QUEUE_END_OF_LIST) {
+ u32 next;
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
+ >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ qp = next;
+ }
+ }
+ } else {
+ free_irq(pf->pdev->irq, pf);
+
+ val = rd32(hw, I40E_PFINT_LNKLST0);
+ qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
+ >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
+ val |= I40E_QUEUE_END_OF_LIST
+ << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_LNKLST0, val);
+
+ val = rd32(hw, I40E_QINT_RQCTL(qp));
+ val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
+ I40E_QINT_RQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_RQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_RQCTL(qp), val);
+
+ val = rd32(hw, I40E_QINT_TQCTL(qp));
+
+ val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
+ I40E_QINT_TQCTL_MSIX0_INDX_MASK |
+ I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ I40E_QINT_TQCTL_INTEVENT_MASK);
+
+ val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
+ I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
+
+ wr32(hw, I40E_QINT_TQCTL(qp), val);
+ }
+}
+
+/**
+ * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
+ * @vsi: the VSI being un-configured
+ *
+ * This frees the memory allocated to the q_vectors and
+ * deletes references to the NAPI struct.
+ **/
+static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
+ struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
+ int r_idx;
+
+ if (!q_vector)
+ continue;
+
+ /* disassociate q_vector from rings */
+ for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
+ q_vector->tx.ring[r_idx]->q_vector = NULL;
+ for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
+ q_vector->rx.ring[r_idx]->q_vector = NULL;
+
+ /* only VSI w/ an associated netdev is set up w/ NAPI */
+ if (vsi->netdev)
+ netif_napi_del(&q_vector->napi);
+ }
+ kfree(vsi->q_vectors);
+}
+
+/**
+ * i40e_reset_interrupt_capability - Disable interrupt setup in OS
+ * @pf: board private structure
+ **/
+static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
+{
+ /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ pci_disable_msix(pf->pdev);
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
+ pci_disable_msi(pf->pdev);
+ }
+ pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
+}
+
+/**
+ * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @pf: board private structure
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
+{
+ int i;
+
+ i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i])
+ i40e_vsi_free_q_vectors(pf->vsi[i]);
+ i40e_reset_interrupt_capability(pf);
+}
+
+/**
+ * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_enable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
+ * @vsi: the VSI being configured
+ **/
+static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+{
+ int q_idx;
+
+ if (!vsi->netdev)
+ return;
+
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+ napi_disable(&vsi->q_vectors[q_idx].napi);
+}
+
+/**
+ * i40e_quiesce_vsi - Pause a given VSI
+ * @vsi: the VSI being paused
+ **/
+static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (test_bit(__I40E_DOWN, &vsi->state))
+ return;
+
+ set_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ if (vsi->netdev && netif_running(vsi->netdev)) {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ } else {
+ set_bit(__I40E_DOWN, &vsi->state);
+ i40e_down(vsi);
+ }
+}
+
+/**
+ * i40e_unquiesce_vsi - Resume a given VSI
+ * @vsi: the VSI being resumed
+ **/
+static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
+{
+ if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
+ return;
+
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+ if (vsi->netdev && netif_running(vsi->netdev))
+ vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
+ else
+ i40e_up(vsi); /* this clears the DOWN bit */
+}
+
+/**
+ * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ i40e_quiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
+ * @pf: the PF
+ **/
+static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
+{
+ int v;
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ i40e_unquiesce_vsi(pf->vsi[v]);
+ }
+}
+
+/**
+ * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Return the number of TCs from given DCBx configuration
+ **/
+static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ u8 num_tc = 0;
+ int i;
+
+ /* Scan the ETS Config Priority Table to find
+ * traffic class enabled for a given priority
+ * and use the traffic class index to get the
+ * number of traffic classes enabled
+ */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ if (dcbcfg->etscfg.prioritytable[i] > num_tc)
+ num_tc = dcbcfg->etscfg.prioritytable[i];
+ }
+
+ /* Traffic class index starts from zero so
+ * increment to return the actual count
+ */
+ return num_tc + 1;
+}
+
+/**
+ * i40e_dcb_get_enabled_tc - Get enabled traffic classes
+ * @dcbcfg: the corresponding DCBx configuration structure
+ *
+ * Query the current DCB configuration and return the number of
+ * traffic classes enabled from the given DCBX config
+ **/
+static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
+{
+ u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
+ u8 enabled_tc = 1;
+ u8 i;
+
+ for (i = 0; i < num_tc; i++)
+ enabled_tc |= 1 << i;
+
+ return enabled_tc;
+}
+
+/**
+ * i40e_pf_get_num_tc - Get enabled traffic classes for PF
+ * @pf: PF being queried
+ *
+ * Return number of traffic classes enabled for the given PF
+ **/
+static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u8 i, enabled_tc;
+ u8 num_tc = 0;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ /* If DCB is not enabled then always in single TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return 1;
+
+ /* MFP mode return count of enabled TCs for this PF */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ num_tc++;
+ }
+ return num_tc;
+ }
+
+ /* SFP mode will be enabled for all TCs on port */
+ return i40e_dcb_get_num_tc(dcbcfg);
+}
+
+/**
+ * i40e_pf_get_default_tc - Get bitmap for first enabled TC
+ * @pf: PF being queried
+ *
+ * Return a bitmap for first enabled traffic class for this PF.
+ **/
+static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
+{
+ u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
+ u8 i = 0;
+
+ if (!enabled_tc)
+ return 0x1; /* TC0 */
+
+ /* Find the first enabled TC */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ break;
+ }
+
+ return 1 << i;
+}
+
+/**
+ * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
+ * @pf: PF being queried
+ *
+ * Return a bitmap for enabled traffic classes for this PF.
+ **/
+static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
+{
+ /* If DCB is not enabled for this PF then just return default TC */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return i40e_pf_get_default_tc(pf);
+
+ /* MFP mode will have enabled TCs set by FW */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED)
+ return pf->hw.func_caps.enabled_tcmap;
+
+ /* SFP mode we want PF to be enabled for all TCs */
+ return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
+}
+
+/**
+ * i40e_vsi_get_bw_info - Query VSI BW Information
+ * @vsi: the VSI being queried
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
+{
+ struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
+ struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+ u32 tc_bw_max;
+ int i;
+
+ /* Get the VSI level BW configuration */
+ aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi bw config, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
+ return -EINVAL;
+ }
+
+ /* Get the VSI level BW configuration per TC */
+ aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
+ aq_ret, pf->hw.aq.asq_last_status);
+ return -EINVAL;
+ }
+
+ if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
+ dev_info(&pf->pdev->dev,
+ "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
+ bw_config.tc_valid_bits,
+ bw_ets_config.tc_valid_bits);
+ /* Still continuing */
+ }
+
+ vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
+ vsi->bw_max_quanta = bw_config.max_bw;
+ tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
+ (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
+ vsi->bw_ets_limit_credits[i] =
+ le16_to_cpu(bw_ets_config.credits[i]);
+ /* 3 bits out of 4 for each TC */
+ vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC bitmap
+ * @bw_credits: BW shared credits per TC
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
+ u8 *bw_share)
+{
+ struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+ i40e_status aq_ret;
+ int i;
+
+ bw_data.tc_valid_bits = enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ bw_data.tc_bw_credits[i] = bw_share[i];
+
+ aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+ NULL);
+ if (aq_ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "%s: AQ command Config VSI BW allocation per TC failed = %d\n",
+ __func__, vsi->back->hw.aq.asq_last_status);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ vsi->info.qs_handle[i] = bw_data.qs_handles[i];
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @enabled_tc: TC map to be enabled
+ *
+ **/
+static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ u8 netdev_tc = 0;
+ int i;
+ struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+ if (!netdev)
+ return;
+
+ if (!enabled_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ /* Set up actual enabled TCs on the VSI */
+ if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
+ return;
+
+ /* set per TC queues for the VSI */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ /* Only set TC queues for enabled tcs
+ *
+ * e.g. For a VSI that has TC0 and TC3 enabled the
+ * enabled_tc bitmap would be 0x00001001; the driver
+ * will set the numtc for netdev as 2 that will be
+ * referenced by the netdev layer as TC 0 and 1.
+ */
+ if (vsi->tc_config.enabled_tc & (1 << i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_config.tc_info[i].netdev_tc,
+ vsi->tc_config.tc_info[i].qcount,
+ vsi->tc_config.tc_info[i].qoffset);
+ }
+
+ /* Assign UP2TC map for the VSI */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ /* Get the actual TC# for the UP */
+ u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
+ * @vsi: the VSI being configured
+ * @ctxt: the ctxt buffer returned from AQ VSI update param command
+ **/
+static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
+ struct i40e_vsi_context *ctxt)
+{
+ /* copy just the sections touched not the entire info
+ * since not all sections are valid as returned by
+ * update vsi params
+ */
+ vsi->info.mapping_flags = ctxt->info.mapping_flags;
+ memcpy(&vsi->info.queue_mapping,
+ &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
+ memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
+ * @vsi: VSI to be configured
+ * @enabled_tc: TC bitmap
+ *
+ * This configures a particular VSI for TCs that are mapped to the
+ * given TC bitmap. It uses default bandwidth share for TCs across
+ * VSIs to configure TC for a particular VSI.
+ *
+ * NOTE:
+ * It is expected that the VSI queues have been quisced before calling
+ * this function.
+ **/
+static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
+{
+ u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ struct i40e_vsi_context ctxt;
+ int ret = 0;
+ int i;
+
+ /* Check if enabled_tc is same as existing or new TCs */
+ if (vsi->tc_config.enabled_tc == enabled_tc)
+ return ret;
+
+ /* Enable ETS TCs with equal BW Share for now across all VSIs */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (enabled_tc & (1 << i))
+ bw_share[i] = 1;
+ }
+
+ ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed configuring TC map %d for VSI %d\n",
+ enabled_tc, vsi->seid);
+ goto out;
+ }
+
+ /* Update Queue Pairs Mapping for currently enabled UPs */
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = vsi->back->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+
+ /* Update the VSI after updating the VSI queue-mapping information */
+ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "update vsi failed, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ goto out;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ /* Update current VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Failed updating vsi bw info, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ goto out;
+ }
+
+ /* Update the netdev TC setup */
+ i40e_vsi_config_netdev_tc(vsi, enabled_tc);
+out:
+ return ret;
+}
+
+/**
+ * i40e_up_complete - Finish the last steps of bringing up a connection
+ * @vsi: the VSI being configured
+ **/
+static int i40e_up_complete(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ i40e_vsi_configure_msix(vsi);
+ else
+ i40e_configure_msi_and_legacy(vsi);
+
+ /* start rings */
+ err = i40e_vsi_control_rings(vsi, true);
+ if (err)
+ return err;
+
+ clear_bit(__I40E_DOWN, &vsi->state);
+ i40e_napi_enable_all(vsi);
+ i40e_vsi_enable_irq(vsi);
+
+ if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
+ (vsi->netdev)) {
+ netif_tx_start_all_queues(vsi->netdev);
+ netif_carrier_on(vsi->netdev);
+ }
+ i40e_service_event_schedule(pf);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_reinit_locked - Reset the VSI
+ * @vsi: the VSI being configured
+ *
+ * Rebuild the ring structs after some configuration
+ * has changed, e.g. MTU size.
+ **/
+static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
+ usleep_range(1000, 2000);
+ i40e_down(vsi);
+
+ /* Give a VF some time to respond to the reset. The
+ * two second wait is based upon the watchdog cycle in
+ * the VF driver.
+ */
+ if (vsi->type == I40E_VSI_SRIOV)
+ msleep(2000);
+ i40e_up(vsi);
+ clear_bit(__I40E_CONFIG_BUSY, &pf->state);
+}
+
+/**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+ int err;
+
+ err = i40e_vsi_configure(vsi);
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ return err;
+}
+
+/**
+ * i40e_down - Shutdown the connection processing
+ * @vsi: the VSI being stopped
+ **/
+void i40e_down(struct i40e_vsi *vsi)
+{
+ int i;
+
+ /* It is assumed that the caller of this function
+ * sets the vsi->state __I40E_DOWN bit.
+ */
+ if (vsi->netdev) {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_disable(vsi->netdev);
+ }
+ i40e_vsi_disable_irq(vsi);
+ i40e_vsi_control_rings(vsi, false);
+ i40e_napi_disable_all(vsi);
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ i40e_clean_tx_ring(&vsi->tx_rings[i]);
+ i40e_clean_rx_ring(&vsi->rx_rings[i]);
+ }
+}
+
+/**
+ * i40e_setup_tc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ **/
+static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u8 enabled_tc = 0;
+ int ret = -EINVAL;
+ int i;
+
+ /* Check if DCB enabled to continue */
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+ netdev_info(netdev, "DCB is not enabled for adapter\n");
+ goto exit;
+ }
+
+ /* Check if MFP enabled */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
+ goto exit;
+ }
+
+ /* Check whether tc count is within enabled limit */
+ if (tc > i40e_pf_get_num_tc(pf)) {
+ netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
+ goto exit;
+ }
+
+ /* Generate TC map for number of tc requested */
+ for (i = 0; i < tc; i++)
+ enabled_tc |= (1 << i);
+
+ /* Requesting same TC configuration as already enabled */
+ if (enabled_tc == vsi->tc_config.enabled_tc)
+ return 0;
+
+ /* Quiesce VSI queues */
+ i40e_quiesce_vsi(vsi);
+
+ /* Configure VSI for enabled TCs */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
+ vsi->seid);
+ goto exit;
+ }
+
+ /* Unquiesce VSI */
+ i40e_unquiesce_vsi(vsi);
+
+exit:
+ return ret;
+}
+
+/**
+ * i40e_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog subtask is
+ * enabled, and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_open(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ char int_name[IFNAMSIZ];
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__I40E_TESTING, &pf->state))
+ return -EBUSY;
+
+ netif_carrier_off(netdev);
+
+ /* allocate descriptors */
+ err = i40e_vsi_setup_tx_resources(vsi);
+ if (err)
+ goto err_setup_tx;
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ err = i40e_vsi_configure(vsi);
+ if (err)
+ goto err_setup_rx;
+
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
+
+ err = i40e_up_complete(vsi);
+ if (err)
+ goto err_up_complete;
+
+ if ((vsi->type == I40E_VSI_MAIN) || (vsi->type == I40E_VSI_VMDQ2)) {
+ err = i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, true, NULL);
+ if (err)
+ netdev_info(netdev,
+ "couldn't set broadcast err %d aq_err %d\n",
+ err, pf->hw.aq.asq_last_status);
+ }
+
+ return 0;
+
+err_up_complete:
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+err_setup_rx:
+ i40e_vsi_free_rx_resources(vsi);
+err_setup_tx:
+ i40e_vsi_free_tx_resources(vsi);
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+
+ return err;
+}
+
+/**
+ * i40e_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the driver's control, but
+ * this netdev interface is disabled.
+ *
+ * Returns 0, this is not allowed to fail
+ **/
+static int i40e_close(struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (test_and_set_bit(__I40E_DOWN, &vsi->state))
+ return 0;
+
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_do_reset - Start a PF or Core Reset sequence
+ * @pf: board private structure
+ * @reset_flags: which reset is requested
+ *
+ * The essential difference in resets is that the PF Reset
+ * doesn't clear the packet buffers, doesn't reset the PE
+ * firmware, and doesn't bother the other PFs on the chip.
+ **/
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+{
+ u32 val;
+
+ WARN_ON(in_interrupt());
+
+ /* do the biggest reset indicated */
+ if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+
+ /* Request a Global Reset
+ *
+ * This will start the chip's countdown to the actual full
+ * chip reset event, and a warning interrupt to be sent
+ * to all PFs, including the requestor. Our handler
+ * for the warning interrupt will deal with the shutdown
+ * and recovery of the switch setup.
+ */
+ dev_info(&pf->pdev->dev, "GlobalR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+
+ } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+
+ /* Request a Core Reset
+ *
+ * Same as Global Reset, except does *not* include the MAC/PHY
+ */
+ dev_info(&pf->pdev->dev, "CoreR requested\n");
+ val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ val |= I40E_GLGEN_RTRIG_CORER_MASK;
+ wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
+ i40e_flush(&pf->hw);
+
+ } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+
+ /* Request a PF Reset
+ *
+ * Resets only the PF-specific registers
+ *
+ * This goes directly to the tear-down and rebuild of
+ * the switch, since we need to do all the recovery as
+ * for the Core Reset.
+ */
+ dev_info(&pf->pdev->dev, "PFR requested\n");
+ i40e_handle_reset_warning(pf);
+
+ } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+ int v;
+
+ /* Find the VSI(s) that requested a re-init */
+ dev_info(&pf->pdev->dev,
+ "VSI reinit requested\n");
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ if (vsi != NULL &&
+ test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
+ i40e_vsi_reinit_locked(pf->vsi[v]);
+ clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
+ }
+ }
+
+ /* no further action needed, so return now */
+ return;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "bad reset request 0x%08x\n", reset_flags);
+ return;
+ }
+}
+
+/**
+ * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ *
+ * Handler for LAN Queue Overflow Event generated by the firmware for PF
+ * and VF queues
+ **/
+static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_aqc_lan_overflow *data =
+ (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
+ u32 queue = le32_to_cpu(data->prtdcb_rupto);
+ u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ u16 vf_id;
+
+ dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n",
+ __func__, queue, qtx_ctl);
+
+ /* Queue belongs to VF, find the VF and issue VF reset */
+ if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
+ >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
+ vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
+ >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
+ vf_id -= hw->func_caps.vf_base_id;
+ vf = &pf->vf[vf_id];
+ i40e_vc_notify_vf_reset(vf);
+ /* Allow VF to process pending reset notification */
+ msleep(20);
+ i40e_reset_vf(vf, false);
+ }
+}
+
+/**
+ * i40e_service_event_complete - Finish up the service event
+ * @pf: board private structure
+ **/
+static void i40e_service_event_complete(struct i40e_pf *pf)
+{
+ BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
+
+ /* flush memory to make sure state is correct before next watchog */
+ smp_mb__before_clear_bit();
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+}
+
+/**
+ * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
+ * @pf: board private structure
+ **/
+static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
+{
+ if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
+ return;
+
+ pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &pf->state))
+ return;
+}
+
+/**
+ * i40e_vsi_link_event - notify VSI of a link event
+ * @vsi: vsi to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
+{
+ if (!vsi)
+ return;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ if (!vsi->netdev || !vsi->netdev_registered)
+ break;
+
+ if (link_up) {
+ netif_carrier_on(vsi->netdev);
+ netif_tx_wake_all_queues(vsi->netdev);
+ } else {
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_all_queues(vsi->netdev);
+ }
+ break;
+
+ case I40E_VSI_SRIOV:
+ break;
+
+ case I40E_VSI_VMDQ2:
+ case I40E_VSI_CTRL:
+ case I40E_VSI_MIRROR:
+ default:
+ /* there is no notification for other VSIs */
+ break;
+ }
+}
+
+/**
+ * i40e_veb_link_event - notify elements on the veb of a link event
+ * @veb: veb to be notified
+ * @link_up: link up or down
+ **/
+static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
+{
+ struct i40e_pf *pf;
+ int i;
+
+ if (!veb || !veb->pf)
+ return;
+ pf = veb->pf;
+
+ /* depth first... */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
+ i40e_veb_link_event(pf->veb[i], link_up);
+
+ /* ... now the local VSIs */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
+ i40e_vsi_link_event(pf->vsi[i], link_up);
+}
+
+/**
+ * i40e_link_event - Update netif_carrier status
+ * @pf: board private structure
+ **/
+static void i40e_link_event(struct i40e_pf *pf)
+{
+ bool new_link, old_link;
+
+ new_link = (pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP);
+ old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
+
+ if (new_link == old_link)
+ return;
+
+ netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+ "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+
+ /* Notify the base of the switch tree connected to
+ * the link. Floating VEBs are not notified.
+ */
+ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ else
+ i40e_vsi_link_event(pf->vsi[pf->lan_vsi], new_link);
+
+ if (pf->vf)
+ i40e_vc_notify_link_state(pf);
+}
+
+/**
+ * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
+ * @pf: board private structure
+ *
+ * Set the per-queue flags to request a check for stuck queues in the irq
+ * clean functions, then force interrupts to be sure the irq clean is called.
+ **/
+static void i40e_check_hang_subtask(struct i40e_pf *pf)
+{
+ int i, v;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* for each VSI/netdev
+ * for each Tx queue
+ * set the check flag
+ * for each q_vector
+ * force an interrupt
+ */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ int armed = 0;
+
+ if (!pf->vsi[v] ||
+ test_bit(__I40E_DOWN, &vsi->state) ||
+ (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
+ continue;
+
+ for (i = 0; i < vsi->num_queue_pairs; i++) {
+ set_check_for_tx_hang(&vsi->tx_rings[i]);
+ if (test_bit(__I40E_HANG_CHECK_ARMED,
+ &vsi->tx_rings[i].state))
+ armed++;
+ }
+
+ if (armed) {
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
+ (I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK));
+ } else {
+ u16 vec = vsi->base_vector - 1;
+ u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ for (i = 0; i < vsi->num_q_vectors; i++, vec++)
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(vec), val);
+ }
+ i40e_flush(&vsi->back->hw);
+ }
+ }
+}
+
+/**
+ * i40e_watchdog_subtask - Check and bring link up
+ * @pf: board private structure
+ **/
+static void i40e_watchdog_subtask(struct i40e_pf *pf)
+{
+ int i;
+
+ /* if interface is down do nothing */
+ if (test_bit(__I40E_DOWN, &pf->state) ||
+ test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ return;
+
+ /* Update the stats for active netdevs so the network stack
+ * can look at updated numbers whenever it cares to
+ */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->netdev)
+ i40e_update_stats(pf->vsi[i]);
+
+ /* Update the stats for the active switching components */
+ for (i = 0; i < I40E_MAX_VEB; i++)
+ if (pf->veb[i])
+ i40e_update_veb_stats(pf->veb[i]);
+}
+
+/**
+ * i40e_reset_subtask - Set up for resetting the device and driver
+ * @pf: board private structure
+ **/
+static void i40e_reset_subtask(struct i40e_pf *pf)
+{
+ u32 reset_flags = 0;
+
+ if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+ clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+ clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+ clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
+ }
+ if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
+ reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+ clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
+ }
+
+ /* If there's a recovery already waiting, it takes
+ * precedence before starting a new reset sequence.
+ */
+ if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
+ i40e_handle_reset_warning(pf);
+ return;
+ }
+
+ /* If we're already down or resetting, just bail */
+ if (reset_flags &&
+ !test_bit(__I40E_DOWN, &pf->state) &&
+ !test_bit(__I40E_CONFIG_BUSY, &pf->state))
+ i40e_do_reset(pf, reset_flags);
+}
+
+/**
+ * i40e_handle_link_event - Handle link event
+ * @pf: board private structure
+ * @e: event info posted on ARQ
+ **/
+static void i40e_handle_link_event(struct i40e_pf *pf,
+ struct i40e_arq_event_info *e)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+ struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+
+ /* save off old link status information */
+ memcpy(&pf->hw.phy.link_info_old, hw_link_info,
+ sizeof(pf->hw.phy.link_info_old));
+
+ /* update link status */
+ hw_link_info->phy_type = (enum i40e_aq_phy_type)status->phy_type;
+ hw_link_info->link_speed = (enum i40e_aq_link_speed)status->link_speed;
+ hw_link_info->link_info = status->link_info;
+ hw_link_info->an_info = status->an_info;
+ hw_link_info->ext_info = status->ext_info;
+ hw_link_info->lse_enable =
+ le16_to_cpu(status->command_flags) &
+ I40E_AQ_LSE_ENABLE;
+
+ /* process the event */
+ i40e_link_event(pf);
+
+ /* Do a new status request to re-enable LSE reporting
+ * and load new status information into the hw struct,
+ * then see if the status changed while processing the
+ * initial event.
+ */
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_link_event(pf);
+}
+
+/**
+ * i40e_clean_adminq_subtask - Clean the AdminQ rings
+ * @pf: board private structure
+ **/
+static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
+{
+ struct i40e_arq_event_info event;
+ struct i40e_hw *hw = &pf->hw;
+ u16 pending, i = 0;
+ i40e_status ret;
+ u16 opcode;
+ u32 val;
+
+ if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
+ return;
+
+ event.msg_size = I40E_MAX_AQ_BUF_SIZE;
+ event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
+ if (!event.msg_buf)
+ return;
+
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &pending);
+ if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+ dev_info(&pf->pdev->dev, "No ARQ event found\n");
+ break;
+ } else if (ret) {
+ dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
+ break;
+ }
+
+ opcode = le16_to_cpu(event.desc.opcode);
+ switch (opcode) {
+
+ case i40e_aqc_opc_get_link_status:
+ i40e_handle_link_event(pf, &event);
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+ ret = i40e_vc_process_vf_msg(pf,
+ le16_to_cpu(event.desc.retval),
+ le32_to_cpu(event.desc.cookie_high),
+ le32_to_cpu(event.desc.cookie_low),
+ event.msg_buf,
+ event.msg_size);
+ break;
+ case i40e_aqc_opc_lldp_update_mib:
+ dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
+ i40e_handle_lan_overflow_event(pf, &event);
+ break;
+ default:
+ dev_info(&pf->pdev->dev,
+ "ARQ Error: Unknown event %d received\n",
+ event.desc.opcode);
+ break;
+ }
+ } while (pending && (i++ < pf->adminq_work_limit));
+
+ clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
+ /* re-enable Admin queue interrupt cause */
+ val = rd32(hw, I40E_PFINT_ICR0_ENA);
+ val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, val);
+ i40e_flush(hw);
+
+ kfree(event.msg_buf);
+}
+
+/**
+ * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
+ * @veb: pointer to the VEB instance
+ *
+ * This is a recursive function that first builds the attached VSIs then
+ * recurses in to build the next layer of VEB. We track the connections
+ * through our own index numbers because the seid's from the HW could
+ * change across the reset.
+ **/
+static int i40e_reconstitute_veb(struct i40e_veb *veb)
+{
+ struct i40e_vsi *ctl_vsi = NULL;
+ struct i40e_pf *pf = veb->pf;
+ int v, veb_idx;
+ int ret;
+
+ /* build VSI that owns this VEB, temporarily attached to base VEB */
+ for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+ if (pf->vsi[v] &&
+ pf->vsi[v]->veb_idx == veb->idx &&
+ pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ ctl_vsi = pf->vsi[v];
+ break;
+ }
+ }
+ if (!ctl_vsi) {
+ dev_info(&pf->pdev->dev,
+ "missing owner VSI for veb_idx %d\n", veb->idx);
+ ret = -ENOENT;
+ goto end_reconstitute;
+ }
+ if (ctl_vsi != pf->vsi[pf->lan_vsi])
+ ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ ret = i40e_add_vsi(ctl_vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of owner VSI failed: %d\n", ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(ctl_vsi);
+
+ /* create the VEB in the switch and move the VSI onto the VEB */
+ ret = i40e_add_veb(veb, ctl_vsi);
+ if (ret)
+ goto end_reconstitute;
+
+ /* create the remaining VSIs attached to this VEB */
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
+ continue;
+
+ if (pf->vsi[v]->veb_idx == veb->idx) {
+ struct i40e_vsi *vsi = pf->vsi[v];
+ vsi->uplink_seid = veb->seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of vsi_idx %d failed: %d\n",
+ v, ret);
+ goto end_reconstitute;
+ }
+ i40e_vsi_reset_stats(vsi);
+ }
+ }
+
+ /* create any VEBs attached to this VEB - RECURSION */
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
+ pf->veb[veb_idx]->uplink_seid = veb->seid;
+ ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
+ if (ret)
+ break;
+ }
+ }
+
+end_reconstitute:
+ return ret;
+}
+
+/**
+ * i40e_get_capabilities - get info about the HW
+ * @pf: the PF struct
+ **/
+static int i40e_get_capabilities(struct i40e_pf *pf)
+{
+ struct i40e_aqc_list_capabilities_element_resp *cap_buf;
+ u16 data_size;
+ int buf_len;
+ int err;
+
+ buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+ do {
+ cap_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!cap_buf)
+ return -ENOMEM;
+
+ /* this loads the data into the hw struct for us */
+ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
+ &data_size,
+ i40e_aqc_opc_list_func_capabilities,
+ NULL);
+ /* data loaded, buffer no longer needed */
+ kfree(cap_buf);
+
+ if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
+ /* retry with a larger buffer */
+ buf_len = data_size;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ dev_info(&pf->pdev->dev,
+ "capability discovery failed: aq=%d\n",
+ pf->hw.aq.asq_last_status);
+ return -ENODEV;
+ }
+ } while (err);
+
+ if (pf->hw.debug_mask & I40E_DEBUG_USER)
+ dev_info(&pf->pdev->dev,
+ "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
+ pf->hw.pf_id, pf->hw.func_caps.num_vfs,
+ pf->hw.func_caps.num_msix_vectors,
+ pf->hw.func_caps.num_msix_vectors_vf,
+ pf->hw.func_caps.fd_filters_guaranteed,
+ pf->hw.func_caps.fd_filters_best_effort,
+ pf->hw.func_caps.num_tx_qp,
+ pf->hw.func_caps.num_vsis);
+
+ return 0;
+}
+
+/**
+ * i40e_fdir_setup - initialize the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_setup(struct i40e_pf *pf)
+{
+ struct i40e_vsi *vsi;
+ bool new_vsi = false;
+ int err, i;
+
+ if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+ return;
+
+ pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+
+ /* find existing or make new FDIR VSI */
+ vsi = NULL;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ vsi = pf->vsi[i];
+ if (!vsi) {
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->mac_seid, 0);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
+ pf->flags &= ~I40E_FLAG_FDIR_ENABLED;
+ return;
+ }
+ new_vsi = true;
+ }
+ WARN_ON(vsi->base_queue != I40E_FDIR_RING);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_rings);
+
+ err = i40e_vsi_setup_tx_resources(vsi);
+ if (!err)
+ err = i40e_vsi_setup_rx_resources(vsi);
+ if (!err)
+ err = i40e_vsi_configure(vsi);
+ if (!err && new_vsi) {
+ char int_name[IFNAMSIZ + 9];
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ }
+ if (!err)
+ err = i40e_up_complete(vsi);
+
+ clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
+}
+
+/**
+ * i40e_fdir_teardown - release the Flow Director resources
+ * @pf: board private structure
+ **/
+static void i40e_fdir_teardown(struct i40e_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_release(pf->vsi[i]);
+ break;
+ }
+ }
+}
+
+/**
+ * i40e_handle_reset_warning - prep for the core to reset
+ * @pf: board private structure
+ *
+ * Close up the VFs and other things in prep for a Core Reset,
+ * then get ready to rebuild the world.
+ **/
+static void i40e_handle_reset_warning(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status ret;
+ u32 v;
+
+ clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
+ if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+ return;
+
+ dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
+
+ i40e_vc_notify_reset(pf);
+
+ /* quiesce the VSIs and their queues that are not already DOWN */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ if (pf->vsi[v])
+ pf->vsi[v]->seid = 0;
+ }
+
+ i40e_shutdown_adminq(&pf->hw);
+
+ /* Now we wait for GRST to settle out.
+ * We don't have to delete the VEBs or VSIs from the hw switch
+ * because the reset will make them disappear.
+ */
+ ret = i40e_pf_reset(hw);
+ if (ret)
+ dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ pf->pfr_count++;
+
+ if (test_bit(__I40E_DOWN, &pf->state))
+ goto end_core_reset;
+ dev_info(&pf->pdev->dev, "Rebuilding internal switch\n");
+
+ /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
+ ret = i40e_init_adminq(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+ goto end_core_reset;
+ }
+
+ ret = i40e_get_capabilities(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
+ ret);
+ goto end_core_reset;
+ }
+
+ /* call shutdown HMC */
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+
+ ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+ ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
+ goto end_core_reset;
+ }
+
+ /* do basic switch setup */
+ ret = i40e_setup_pf_switch(pf);
+ if (ret)
+ goto end_core_reset;
+
+ /* Rebuild the VSIs and VEBs that existed before reset.
+ * They are still in our local switch element arrays, so only
+ * need to rebuild the switch model in the HW.
+ *
+ * If there were VEBs but the reconstitution failed, we'll try
+ * try to recover minimal use by getting the basic PF VSI working.
+ */
+ if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
+ dev_info(&pf->pdev->dev, "attempting to rebuild switch\n");
+ /* find the one VEB connected to the MAC, and find orphans */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (!pf->veb[v])
+ continue;
+
+ if (pf->veb[v]->uplink_seid == pf->mac_seid ||
+ pf->veb[v]->uplink_seid == 0) {
+ ret = i40e_reconstitute_veb(pf->veb[v]);
+
+ if (!ret)
+ continue;
+
+ /* If Main VEB failed, we're in deep doodoo,
+ * so give up rebuilding the switch and set up
+ * for minimal rebuild of PF VSI.
+ * If orphan failed, we'll report the error
+ * but try to keep going.
+ */
+ if (pf->veb[v]->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of switch failed: %d, will try to set up simple PF connection\n",
+ ret);
+ pf->vsi[pf->lan_vsi]->uplink_seid
+ = pf->mac_seid;
+ break;
+ } else if (pf->veb[v]->uplink_seid == 0) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of orphan VEB failed: %d\n",
+ ret);
+ }
+ }
+ }
+ }
+
+ if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
+ dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+ /* no VEB, so rebuild only the Main VSI */
+ ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "rebuild of Main VSI failed: %d\n", ret);
+ goto end_core_reset;
+ }
+ }
+
+ /* reinit the misc interrupt */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ ret = i40e_setup_misc_vector(pf);
+
+ /* restart the VSIs that were rebuilt and running before the reset */
+ i40e_pf_unquiesce_all_vsi(pf);
+
+ /* tell the firmware that we're starting */
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+ dev_info(&pf->pdev->dev, "PF reset done\n");
+
+end_core_reset:
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+}
+
+/**
+ * i40e_handle_mdd_event
+ * @pf: pointer to the pf structure
+ *
+ * Called from the MDD irq handler to identify possibly malicious vfs
+ **/
+static void i40e_handle_mdd_event(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ bool mdd_detected = false;
+ struct i40e_vf *vf;
+ u32 reg;
+ int i;
+
+ if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
+ return;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
+ >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
+ >> I40E_GL_MDET_TX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
+ >> I40E_GL_MDET_TX_QUEUE_SHIFT;
+ dev_info(&pf->pdev->dev,
+ "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+ mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
+ >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
+ >> I40E_GL_MDET_RX_EVENT_SHIFT;
+ u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
+ >> I40E_GL_MDET_RX_QUEUE_SHIFT;
+ dev_info(&pf->pdev->dev,
+ "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n",
+ event, queue, func);
+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ /* see if one of the VFs needs its hand slapped */
+ for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
+ vf = &(pf->vf[i]);
+ reg = rd32(hw, I40E_VP_MDET_TX(i));
+ if (reg & I40E_VP_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "MDD TX event on VF %d\n", i);
+ }
+
+ reg = rd32(hw, I40E_VP_MDET_RX(i));
+ if (reg & I40E_VP_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
+ vf->num_mdd_events++;
+ dev_info(&pf->pdev->dev, "MDD RX event on VF %d\n", i);
+ }
+
+ if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
+ dev_info(&pf->pdev->dev,
+ "Too many MDD events on VF %d, disabled\n", i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
+ }
+
+ /* re-enable mdd interrupt cause */
+ clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_service_task - Run the driver's async subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void i40e_service_task(struct work_struct *work)
+{
+ struct i40e_pf *pf = container_of(work,
+ struct i40e_pf,
+ service_task);
+ unsigned long start_time = jiffies;
+
+ i40e_reset_subtask(pf);
+ i40e_handle_mdd_event(pf);
+ i40e_vc_process_vflr_event(pf);
+ i40e_watchdog_subtask(pf);
+ i40e_fdir_reinit_subtask(pf);
+ i40e_check_hang_subtask(pf);
+ i40e_sync_filters_subtask(pf);
+ i40e_clean_adminq_subtask(pf);
+
+ i40e_service_event_complete(pf);
+
+ /* If the tasks have taken longer than one timer cycle or there
+ * is more work to be done, reschedule the service task now
+ * rather than wait for the timer to tick again.
+ */
+ if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
+ test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) ||
+ test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) ||
+ test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_service_timer - timer callback
+ * @data: pointer to PF struct
+ **/
+static void i40e_service_timer(unsigned long data)
+{
+ struct i40e_pf *pf = (struct i40e_pf *)data;
+
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+ i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ vsi->alloc_queue_pairs = pf->num_lan_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ vsi->num_q_vectors = pf->num_lan_msix;
+ else
+ vsi->num_q_vectors = 1;
+
+ break;
+
+ case I40E_VSI_FDIR:
+ vsi->alloc_queue_pairs = 1;
+ vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = 1;
+ break;
+
+ case I40E_VSI_VMDQ2:
+ vsi->alloc_queue_pairs = pf->num_vmdq_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ vsi->num_q_vectors = pf->num_vmdq_msix;
+ break;
+
+ case I40E_VSI_SRIOV:
+ vsi->alloc_queue_pairs = pf->num_vf_qps;
+ vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+ I40E_REQ_DESCRIPTOR_MULTIPLE);
+ break;
+
+ default:
+ WARN_ON(1);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
+ * @pf: board private structure
+ * @type: type of VSI
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
+{
+ int ret = -ENODEV;
+ struct i40e_vsi *vsi;
+ int vsi_idx;
+ int i;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VSI list may be fragmented if VSI creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free VSIs in the list.
+ *
+ * find next empty vsi slot, looping back around if necessary
+ */
+ i = pf->next_vsi;
+ while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+ i++;
+ if (i >= pf->hw.func_caps.num_vsis) {
+ i = 0;
+ while (i < pf->next_vsi && pf->vsi[i])
+ i++;
+ }
+
+ if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+ vsi_idx = i; /* Found one! */
+ } else {
+ ret = -ENODEV;
+ goto err_alloc_vsi; /* out of VSI slots! */
+ }
+ pf->next_vsi = ++i;
+
+ vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
+ if (!vsi) {
+ ret = -ENOMEM;
+ goto err_alloc_vsi;
+ }
+ vsi->type = type;
+ vsi->back = pf;
+ set_bit(__I40E_DOWN, &vsi->state);
+ vsi->flags = 0;
+ vsi->idx = vsi_idx;
+ vsi->rx_itr_setting = pf->rx_itr_default;
+ vsi->tx_itr_setting = pf->tx_itr_default;
+ vsi->netdev_registered = false;
+ vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
+ INIT_LIST_HEAD(&vsi->mac_filter_list);
+
+ i40e_set_num_rings_in_vsi(vsi);
+
+ /* Setup default MSIX irq handler for VSI */
+ i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
+
+ pf->vsi[vsi_idx] = vsi;
+ ret = vsi_idx;
+err_alloc_vsi:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_vsi_clear - Deallocate the VSI provided
+ * @vsi: the VSI being un-configured
+ **/
+static int i40e_vsi_clear(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf;
+
+ if (!vsi)
+ return 0;
+
+ if (!vsi->back)
+ goto free_vsi;
+ pf = vsi->back;
+
+ mutex_lock(&pf->switch_mutex);
+ if (!pf->vsi[vsi->idx]) {
+ dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
+ vsi->idx, vsi->idx, vsi, vsi->type);
+ goto unlock_vsi;
+ }
+
+ if (pf->vsi[vsi->idx] != vsi) {
+ dev_err(&pf->pdev->dev,
+ "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
+ pf->vsi[vsi->idx]->idx,
+ pf->vsi[vsi->idx],
+ pf->vsi[vsi->idx]->type,
+ vsi->idx, vsi, vsi->type);
+ goto unlock_vsi;
+ }
+
+ /* updates the pf for this cleared vsi */
+ i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
+ i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+
+ pf->vsi[vsi->idx] = NULL;
+ if (vsi->idx < pf->next_vsi)
+ pf->next_vsi = vsi->idx;
+
+unlock_vsi:
+ mutex_unlock(&pf->switch_mutex);
+free_vsi:
+ kfree(vsi);
+
+ return 0;
+}
+
+/**
+ * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being configured
+ **/
+static int i40e_alloc_rings(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int ret = 0;
+ int i;
+
+ vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!vsi->rx_rings) {
+ ret = -ENOMEM;
+ goto err_alloc_rings;
+ }
+
+ vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
+ sizeof(struct i40e_ring), GFP_KERNEL);
+ if (!vsi->tx_rings) {
+ ret = -ENOMEM;
+ kfree(vsi->rx_rings);
+ goto err_alloc_rings;
+ }
+
+ /* Set basic values in the rings to be used later during open() */
+ for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+ struct i40e_ring *rx_ring = &vsi->rx_rings[i];
+ struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+
+ tx_ring->queue_index = i;
+ tx_ring->reg_idx = vsi->base_queue + i;
+ tx_ring->ring_active = false;
+ tx_ring->vsi = vsi;
+ tx_ring->netdev = vsi->netdev;
+ tx_ring->dev = &pf->pdev->dev;
+ tx_ring->count = vsi->num_desc;
+ tx_ring->size = 0;
+ tx_ring->dcb_tc = 0;
+
+ rx_ring->queue_index = i;
+ rx_ring->reg_idx = vsi->base_queue + i;
+ rx_ring->ring_active = false;
+ rx_ring->vsi = vsi;
+ rx_ring->netdev = vsi->netdev;
+ rx_ring->dev = &pf->pdev->dev;
+ rx_ring->count = vsi->num_desc;
+ rx_ring->size = 0;
+ rx_ring->dcb_tc = 0;
+ if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
+ set_ring_16byte_desc_enabled(rx_ring);
+ else
+ clear_ring_16byte_desc_enabled(rx_ring);
+ }
+
+err_alloc_rings:
+ return ret;
+}
+
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+ if (vsi) {
+ kfree(vsi->rx_rings);
+ kfree(vsi->tx_rings);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
+ * @pf: board private structure
+ * @vectors: the number of MSI-X vectors to request
+ *
+ * Returns the number of vectors reserved, or error
+ **/
+static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
+{
+ int err = 0;
+
+ pf->num_msix_entries = 0;
+ while (vectors >= I40E_MIN_MSIX) {
+ err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
+ if (err == 0) {
+ /* good to go */
+ pf->num_msix_entries = vectors;
+ break;
+ } else if (err < 0) {
+ /* total failure */
+ dev_info(&pf->pdev->dev,
+ "MSI-X vector reservation failed: %d\n", err);
+ vectors = 0;
+ break;
+ } else {
+ /* err > 0 is the hint for retry */
+ dev_info(&pf->pdev->dev,
+ "MSI-X vectors wanted %d, retrying with %d\n",
+ vectors, err);
+ vectors = err;
+ }
+ }
+
+ if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ dev_info(&pf->pdev->dev,
+ "Couldn't get enough vectors, only %d available\n",
+ vectors);
+ vectors = 0;
+ }
+
+ return vectors;
+}
+
+/**
+ * i40e_init_msix - Setup the MSIX capability
+ * @pf: board private structure
+ *
+ * Work with the OS to set up the MSIX vectors needed.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_msix(struct i40e_pf *pf)
+{
+ i40e_status err = 0;
+ struct i40e_hw *hw = &pf->hw;
+ int v_budget, i;
+ int vec;
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ return -ENODEV;
+
+ /* The number of vectors we'll request will be comprised of:
+ * - Add 1 for "other" cause for Admin Queue events, etc.
+ * - The number of LAN queue pairs
+ * already adjusted for the NUMA node
+ * assumes symmetric Tx/Rx pairing
+ * - The number of VMDq pairs
+ * Once we count this up, try the request.
+ *
+ * If we can't get what we want, we'll simplify to nearly nothing
+ * and try again. If that still fails, we punt.
+ */
+ pf->num_lan_msix = pf->num_lan_qps;
+ pf->num_vmdq_msix = pf->num_vmdq_qps;
+ v_budget = 1 + pf->num_lan_msix;
+ v_budget += (pf->num_vmdq_vsis * pf->num_vmdq_msix);
+ if (pf->flags & I40E_FLAG_FDIR_ENABLED)
+ v_budget++;
+
+ /* Scale down if necessary, and the rings will share vectors */
+ v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
+
+ pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!pf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < v_budget; i++)
+ pf->msix_entries[i].entry = i;
+ vec = i40e_reserve_msix_vectors(pf, v_budget);
+ if (vec < I40E_MIN_MSIX) {
+ pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
+ kfree(pf->msix_entries);
+ pf->msix_entries = NULL;
+ return -ENODEV;
+
+ } else if (vec == I40E_MIN_MSIX) {
+ /* Adjust for minimal MSIX use */
+ dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_vsis = 0;
+ pf->num_vmdq_qps = 0;
+ pf->num_vmdq_msix = 0;
+ pf->num_lan_qps = 1;
+ pf->num_lan_msix = 1;
+
+ } else if (vec != v_budget) {
+ /* Scale vector usage down */
+ pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
+ vec--; /* reserve the misc vector */
+
+ /* partition out the remaining vectors */
+ switch (vec) {
+ case 2:
+ pf->num_vmdq_vsis = 1;
+ pf->num_lan_msix = 1;
+ break;
+ case 3:
+ pf->num_vmdq_vsis = 1;
+ pf->num_lan_msix = 2;
+ break;
+ default:
+ pf->num_lan_msix = min_t(int, (vec / 2),
+ pf->num_lan_qps);
+ pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
+ I40E_DEFAULT_NUM_VMDQ_VSI);
+ break;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @vsi: the VSI being configured
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ int v_idx, num_q_vectors;
+
+ /* if not MSIX, give the one vector only to the LAN VSI */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ num_q_vectors = vsi->num_q_vectors;
+ else if (vsi == pf->vsi[pf->lan_vsi])
+ num_q_vectors = 1;
+ else
+ return -EINVAL;
+
+ vsi->q_vectors = kcalloc(num_q_vectors,
+ sizeof(struct i40e_q_vector),
+ GFP_KERNEL);
+ if (!vsi->q_vectors)
+ return -ENOMEM;
+
+ for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ vsi->q_vectors[v_idx].vsi = vsi;
+ vsi->q_vectors[v_idx].v_idx = v_idx;
+ cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
+ if (vsi->netdev)
+ netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
+ i40e_napi_poll, vsi->work_limit);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_init_interrupt_scheme - Determine proper interrupt scheme
+ * @pf: board private structure to initialize
+ **/
+static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
+{
+ int err = 0;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_init_msix(pf);
+ if (err) {
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+
+ /* rework the queue expectations without MSIX */
+ i40e_determine_queue_usage(pf);
+ }
+ }
+
+ if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+ err = pci_enable_msi(pf->pdev);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI init failed (%d), trying legacy.\n", err);
+ pf->flags &= ~I40E_FLAG_MSI_ENABLED;
+ }
+ }
+
+ /* track first vector for misc interrupts */
+ err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
+}
+
+/**
+ * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0, which is used to manage the
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * when in MSI or Legacy interrupt mode.
+ **/
+static int i40e_setup_misc_vector(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int err = 0;
+
+ /* Only request the irq if this is the first time through, and
+ * not when we're rebuilding after a Reset
+ */
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+ err = request_irq(pf->msix_entries[0].vector,
+ i40e_intr, 0, pf->misc_int_name, pf);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "request_irq for msix_misc failed: %d\n", err);
+ return -EFAULT;
+ }
+ }
+
+ i40e_enable_misc_int_causes(hw);
+
+ /* associate no queues to the misc vector */
+ wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
+
+ i40e_flush(hw);
+
+ i40e_irq_dynamic_enable_icr0(pf);
+
+ return err;
+}
+
+/**
+ * i40e_config_rss - Prepare for RSS if used
+ * @pf: board private structure
+ **/
+static int i40e_config_rss(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 lut = 0;
+ int i, j;
+ u64 hena;
+ /* Set of random keys generated using kernel random number generator */
+ static const u32 seed[I40E_PFQF_HKEY_MAX_INDEX + 1] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c, 0x35897377,
+ 0x328b25e1, 0x4fa98922, 0xb7d90c14, 0xd5bad70d,
+ 0xcd15a2c1, 0xe8580225, 0x4a1e9d11, 0xfe5731be};
+
+ /* Fill out hash function seed */
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HKEY(i), seed[i]);
+
+ /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
+ hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4)|
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+
+ /* The assumption is that lan qp count will be the highest
+ * qp count for any PF VSI that needs RSS.
+ * If multiple VSIs need RSS support, all the qp counts
+ * for those VSIs should be a power of 2 for RSS to work.
+ * If LAN VSI is the only consumer for RSS then this requirement
+ * is not necessary.
+ */
+ if (j == pf->rss_size)
+ j = 0;
+ /* lut = 4-byte sliding window of 4 lut entries */
+ lut = (lut << 8) | (j &
+ ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+ /* On i = 3, we have 4 entries in lut; write to the register */
+ if ((i & 3) == 3)
+ wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
+ }
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_sw_init - Initialize general software structures (struct i40e_pf)
+ * @pf: board private structure to initialize
+ *
+ * i40e_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int i40e_sw_init(struct i40e_pf *pf)
+{
+ int err = 0;
+ int size;
+
+ pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
+ (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
+ if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
+ if (I40E_DEBUG_USER & debug)
+ pf->hw.debug_mask = debug;
+ pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
+ I40E_DEFAULT_MSG_ENABLE);
+ }
+
+ /* Set default capability flags */
+ pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
+ I40E_FLAG_MSI_ENABLED |
+ I40E_FLAG_MSIX_ENABLED |
+ I40E_FLAG_RX_PS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_RX_1BUF_ENABLED;
+
+ pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+ if (pf->hw.func_caps.rss) {
+ pf->flags |= I40E_FLAG_RSS_ENABLED;
+ pf->rss_size = min_t(int, pf->rss_size_max,
+ nr_cpus_node(numa_node_id()));
+ } else {
+ pf->rss_size = 1;
+ }
+
+ if (pf->hw.func_caps.dcb)
+ pf->num_tc_qps = I40E_DEFAULT_QUEUES_PER_TC;
+ else
+ pf->num_tc_qps = 0;
+
+ if (pf->hw.func_caps.fd) {
+ /* FW/NVM is not yet fixed in this regard */
+ if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
+ (pf->hw.func_caps.fd_filters_best_effort > 0)) {
+ pf->flags |= I40E_FLAG_FDIR_ATR_ENABLED;
+ dev_info(&pf->pdev->dev,
+ "Flow Director ATR mode Enabled\n");
+ pf->flags |= I40E_FLAG_FDIR_ENABLED;
+ dev_info(&pf->pdev->dev,
+ "Flow Director Side Band mode Enabled\n");
+ pf->fdir_pf_filter_count =
+ pf->hw.func_caps.fd_filters_guaranteed;
+ }
+ } else {
+ pf->fdir_pf_filter_count = 0;
+ }
+
+ if (pf->hw.func_caps.vmdq) {
+ pf->flags |= I40E_FLAG_VMDQ_ENABLED;
+ pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
+ pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+ }
+
+ /* MFP mode enabled */
+ if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+ pf->flags |= I40E_FLAG_MFP_ENABLED;
+ dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
+ }
+
+#ifdef CONFIG_PCI_IOV
+ if (pf->hw.func_caps.num_vfs) {
+ pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
+ pf->flags |= I40E_FLAG_SRIOV_ENABLED;
+ pf->num_req_vfs = min_t(int,
+ pf->hw.func_caps.num_vfs,
+ I40E_MAX_VF_COUNT);
+ }
+#endif /* CONFIG_PCI_IOV */
+ pf->eeprom_version = 0xDEAD;
+ pf->lan_veb = I40E_NO_VEB;
+ pf->lan_vsi = I40E_NO_VSI;
+
+ /* set up queue assignment tracking */
+ size = sizeof(struct i40e_lump_tracking)
+ + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
+ pf->qp_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->qp_pile) {
+ err = -ENOMEM;
+ goto sw_init_done;
+ }
+ pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
+ pf->qp_pile->search_hint = 0;
+
+ /* set up vector assignment tracking */
+ size = sizeof(struct i40e_lump_tracking)
+ + (sizeof(u16) * pf->hw.func_caps.num_msix_vectors);
+ pf->irq_pile = kzalloc(size, GFP_KERNEL);
+ if (!pf->irq_pile) {
+ kfree(pf->qp_pile);
+ err = -ENOMEM;
+ goto sw_init_done;
+ }
+ pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
+ pf->irq_pile->search_hint = 0;
+
+ mutex_init(&pf->switch_mutex);
+
+sw_init_done:
+ return err;
+}
+
+/**
+ * i40e_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ **/
+static int i40e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ i40e_vlan_stripping_enable(vsi);
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ return 0;
+}
+
+static const struct net_device_ops i40e_netdev_ops = {
+ .ndo_open = i40e_open,
+ .ndo_stop = i40e_close,
+ .ndo_start_xmit = i40e_lan_xmit_frame,
+ .ndo_get_stats64 = i40e_get_netdev_stats_struct,
+ .ndo_set_rx_mode = i40e_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = i40e_set_mac,
+ .ndo_change_mtu = i40e_change_mtu,
+ .ndo_tx_timeout = i40e_tx_timeout,
+ .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = i40e_netpoll,
+#endif
+ .ndo_setup_tc = i40e_setup_tc,
+ .ndo_set_features = i40e_set_features,
+ .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
+ .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
+ .ndo_get_vf_config = i40e_ndo_get_vf_config,
+};
+
+/**
+ * i40e_config_netdev - Setup the netdev flags
+ * @vsi: the VSI being configured
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_config_netdev(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
+ int etherdev_size;
+
+ etherdev_size = sizeof(struct i40e_netdev_priv);
+ netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
+ if (!netdev)
+ return -ENOMEM;
+
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_SG;
+
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_SCTP_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM |
+ NETIF_F_RXHASH |
+ 0;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features;
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+ memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ } else {
+ /* relate the VSI_VMDQ name to the VSI_MAIN name */
+ snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
+ pf->vsi[pf->lan_vsi]->netdev->name);
+ random_ether_addr(mac_addr);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
+ }
+
+ memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+ /* vlan gets same features (except vlan offload)
+ * after any tweaks for specific VSI types
+ */
+ netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* Setup netdev TC information */
+ i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
+
+ netdev->netdev_ops = &i40e_netdev_ops;
+ netdev->watchdog_timeo = 5 * HZ;
+ i40e_set_ethtool_ops(netdev);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_delete - Delete a VSI from the switch
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static void i40e_vsi_delete(struct i40e_vsi *vsi)
+{
+ /* remove default VSI is not allowed */
+ if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
+ return;
+
+ /* there is no HW VSI for FDIR */
+ if (vsi->type == I40E_VSI_FDIR)
+ return;
+
+ i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
+ return;
+}
+
+/**
+ * i40e_add_vsi - Add a VSI to the switch
+ * @vsi: the VSI being configured
+ *
+ * This initializes a VSI context depending on the VSI type to be added and
+ * passes it down to the add_vsi aq command.
+ **/
+static int i40e_add_vsi(struct i40e_vsi *vsi)
+{
+ int ret = -ENODEV;
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi_context ctxt;
+ u8 enabled_tc = 0x1; /* TC0 enabled */
+ int f_count = 0;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ switch (vsi->type) {
+ case I40E_VSI_MAIN:
+ /* The PF's main VSI is already setup as part of the
+ * device initialization, so we'll not bother with
+ * the add_vsi call, but we will retrieve the current
+ * VSI context.
+ */
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+ ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get pf vsi config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return -ENOENT;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+
+ enabled_tc = i40e_pf_get_tc_map(pf);
+
+ /* MFP mode setup queue map and update VSI */
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto err;
+ }
+ /* update the local VSI info queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+ } else {
+ /* Default/Main VSI is only enabled for TC0
+ * reconfigure it to enable all TCs that are
+ * available on the port in SFP mode.
+ */
+ ret = i40e_vsi_config_tc(vsi, enabled_tc);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
+ enabled_tc, ret,
+ pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ }
+ }
+ break;
+
+ case I40E_VSI_FDIR:
+ /* no queue mapping or actual HW VSI needed */
+ vsi->info.valid_sections = 0;
+ vsi->seid = 0;
+ vsi->id = 0;
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ return 0;
+ break;
+
+ case I40E_VSI_VMDQ2:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = 0;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ ctxt.info.switch_id = 0;
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ case I40E_VSI_SRIOV:
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = 0x1; /* regular data port */
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+
+ /* This VSI is connected to VEB so the switch_id
+ * should be set to zero by default.
+ */
+ ctxt.info.switch_id = cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+
+ ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ /* Setup the VSI tx/rx queue map for TC0 only for now */
+ i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
+ break;
+
+ default:
+ return -ENODEV;
+ }
+
+ if (vsi->type != I40E_VSI_MAIN) {
+ ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add vsi failed, aq_err=%d\n",
+ vsi->back->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto err;
+ }
+ memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+ vsi->info.valid_sections = 0;
+ vsi->seid = ctxt.seid;
+ vsi->id = ctxt.vsi_number;
+ }
+
+ /* If macvlan filters already exist, force them to get loaded */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ f->changed = true;
+ f_count++;
+ }
+ if (f_count) {
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ pf->flags |= I40E_FLAG_FILTER_SYNC;
+ }
+
+ /* Update VSI BW information */
+ ret = i40e_vsi_get_bw_info(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't get vsi bw info, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ /* VSI is already added so not tearing that up */
+ ret = 0;
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * i40e_vsi_release - Delete a VSI and free its resources
+ * @vsi: the VSI being removed
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_vsi_release(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_veb *veb = NULL;
+ struct i40e_pf *pf;
+ u16 uplink_seid;
+ int i, n;
+
+ pf = vsi->back;
+
+ /* release of a VEB-owner or last VSI is not allowed */
+ if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
+ vsi->seid, vsi->uplink_seid);
+ return -ENODEV;
+ }
+ if (vsi == pf->vsi[pf->lan_vsi] &&
+ !test_bit(__I40E_DOWN, &pf->state)) {
+ dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
+ return -ENODEV;
+ }
+
+ uplink_seid = vsi->uplink_seid;
+ if (vsi->type != I40E_VSI_SRIOV) {
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ if (vsi->netdev) {
+ /* results in a call to i40e_close() */
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+ } else {
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+ }
+ i40e_vsi_disable_irq(vsi);
+ }
+
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, f->vlan,
+ f->is_vf, f->is_netdev);
+ i40e_sync_vsi_filters(vsi);
+
+ i40e_vsi_delete(vsi);
+ i40e_vsi_free_q_vectors(vsi);
+ i40e_vsi_clear_rings(vsi);
+ i40e_vsi_clear(vsi);
+
+ /* If this was the last thing on the VEB, except for the
+ * controlling VSI, remove the VEB, which puts the controlling
+ * VSI onto the next level down in the switch.
+ *
+ * Well, okay, there's one more exception here: don't remove
+ * the orphan VEBs yet. We'll wait for an explicit remove request
+ * from up the network stack.
+ */
+ for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] &&
+ pf->vsi[i]->uplink_seid == uplink_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ n++; /* count the VSIs */
+ }
+ }
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == uplink_seid)
+ n++; /* count the VEBs */
+ if (pf->veb[i]->seid == uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (n == 0 && veb && veb->uplink_seid != 0)
+ i40e_veb_release(veb);
+
+ return 0;
+}
+
+/**
+ * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
+ * @vsi: ptr to the VSI
+ *
+ * This should only be called after i40e_vsi_mem_alloc() which allocates the
+ * corresponding SW VSI structure and initializes num_queue_pairs for the
+ * newly allocated VSI.
+ *
+ * Returns 0 on success or negative on failure
+ **/
+static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
+{
+ int ret = -ENOENT;
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi->q_vectors) {
+ dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
+ vsi->seid);
+ return -EEXIST;
+ }
+
+ if (vsi->base_vector) {
+ dev_info(&pf->pdev->dev,
+ "VSI %d has non-zero base vector %d\n",
+ vsi->seid, vsi->base_vector);
+ return -EEXIST;
+ }
+
+ ret = i40e_alloc_q_vectors(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "failed to allocate %d q_vector for VSI %d, ret=%d\n",
+ vsi->num_q_vectors, vsi->seid, ret);
+ vsi->num_q_vectors = 0;
+ goto vector_setup_out;
+ }
+
+ vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+ vsi->num_q_vectors, vsi->idx);
+ if (vsi->base_vector < 0) {
+ dev_info(&pf->pdev->dev,
+ "failed to get q tracking for VSI %d, err=%d\n",
+ vsi->seid, vsi->base_vector);
+ i40e_vsi_free_q_vectors(vsi);
+ ret = -ENOENT;
+ goto vector_setup_out;
+ }
+
+vector_setup_out:
+ return ret;
+}
+
+/**
+ * i40e_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @type: VSI type
+ * @uplink_seid: the switch element to link to
+ * @param1: usage depends upon VSI type. For VF types, indicates VF id
+ *
+ * This allocates the sw VSI structure and its queue resources, then add a VSI
+ * to the identified VEB.
+ *
+ * Returns pointer to the successfully allocated and configure VSI sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
+ u16 uplink_seid, u32 param1)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_veb *veb = NULL;
+ int ret, i;
+ int v_idx;
+
+ /* The requested uplink_seid must be either
+ * - the PF's port seid
+ * no VEB is needed because this is the PF
+ * or this is a Flow Director special case VSI
+ * - seid of an existing VEB
+ * - seid of a VSI that owns an existing VEB
+ * - seid of a VSI that doesn't own a VEB
+ * a new VEB is created and the VSI becomes the owner
+ * - seid of the PF VSI, which is what creates the first VEB
+ * this is a special case of the previous
+ *
+ * Find which uplink_seid we were given and create a new VEB if needed
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
+ veb = pf->veb[i];
+ break;
+ }
+ }
+
+ if (!veb && uplink_seid != pf->mac_seid) {
+
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
+ vsi = pf->vsi[i];
+ break;
+ }
+ }
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
+ uplink_seid);
+ return NULL;
+ }
+
+ if (vsi->uplink_seid == pf->mac_seid)
+ veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+ else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
+ veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ vsi->tc_config.enabled_tc);
+
+ for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
+ if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
+ veb = pf->veb[i];
+ }
+ if (!veb) {
+ dev_info(&pf->pdev->dev, "couldn't add VEB\n");
+ return NULL;
+ }
+
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+ uplink_seid = veb->seid;
+ }
+
+ /* get vsi sw struct */
+ v_idx = i40e_vsi_mem_alloc(pf, type);
+ if (v_idx < 0)
+ goto err_alloc;
+ vsi = pf->vsi[v_idx];
+ vsi->type = type;
+ vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
+
+ if (type == I40E_VSI_MAIN)
+ pf->lan_vsi = v_idx;
+ else if (type == I40E_VSI_SRIOV)
+ vsi->vf_id = param1;
+ /* assign it some queues */
+ ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
+ if (ret < 0) {
+ dev_info(&pf->pdev->dev, "VSI %d get_lump failed %d\n",
+ vsi->seid, ret);
+ goto err_vsi;
+ }
+ vsi->base_queue = ret;
+
+ /* get a VSI from the hardware */
+ vsi->uplink_seid = uplink_seid;
+ ret = i40e_add_vsi(vsi);
+ if (ret)
+ goto err_vsi;
+
+ switch (vsi->type) {
+ /* setup the netdev if needed */
+ case I40E_VSI_MAIN:
+ case I40E_VSI_VMDQ2:
+ ret = i40e_config_netdev(vsi);
+ if (ret)
+ goto err_netdev;
+ ret = register_netdev(vsi->netdev);
+ if (ret)
+ goto err_netdev;
+ vsi->netdev_registered = true;
+ netif_carrier_off(vsi->netdev);
+ /* fall through */
+
+ case I40E_VSI_FDIR:
+ /* set up vectors and rings if needed */
+ ret = i40e_vsi_setup_vectors(vsi);
+ if (ret)
+ goto err_msix;
+
+ ret = i40e_alloc_rings(vsi);
+ if (ret)
+ goto err_rings;
+
+ /* map all of the rings to the q_vectors */
+ i40e_vsi_map_rings_to_vectors(vsi);
+
+ i40e_vsi_reset_stats(vsi);
+ break;
+
+ default:
+ /* no netdev or rings for the other VSI types */
+ break;
+ }
+
+ return vsi;
+
+err_rings:
+ i40e_vsi_free_q_vectors(vsi);
+err_msix:
+ if (vsi->netdev_registered) {
+ vsi->netdev_registered = false;
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+ }
+err_netdev:
+ i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
+err_vsi:
+ i40e_vsi_clear(vsi);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_veb_get_bw_info - Query VEB BW information
+ * @veb: the veb to query
+ *
+ * Query the Tx scheduler BW configuration data for given VEB
+ **/
+static int i40e_veb_get_bw_info(struct i40e_veb *veb)
+{
+ struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
+ struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
+ struct i40e_pf *pf = veb->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 tc_bw_max;
+ int ret = 0;
+ int i;
+
+ ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
+ &bw_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw config failed, aq_err=%d\n",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+
+ ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
+ &ets_data, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "query veb bw ets config failed, aq_err=%d\n",
+ hw->aq.asq_last_status);
+ goto out;
+ }
+
+ veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
+ veb->bw_max_quanta = ets_data.tc_bw_max;
+ veb->is_abs_credits = bw_data.absolute_credits_enable;
+ tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
+ (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
+ veb->bw_tc_limit_credits[i] =
+ le16_to_cpu(bw_data.tc_bw_limits[i]);
+ veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
+ * @pf: board private structure
+ *
+ * On error: returns error code (negative)
+ * On success: returns vsi index in PF (positive)
+ **/
+static int i40e_veb_mem_alloc(struct i40e_pf *pf)
+{
+ int ret = -ENOENT;
+ struct i40e_veb *veb;
+ int i;
+
+ /* Need to protect the allocation of switch elements at the PF level */
+ mutex_lock(&pf->switch_mutex);
+
+ /* VEB list may be fragmented if VEB creation/destruction has
+ * been happening. We can afford to do a quick scan to look
+ * for any free slots in the list.
+ *
+ * find next empty veb slot, looping back around if necessary
+ */
+ i = 0;
+ while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
+ i++;
+ if (i >= I40E_MAX_VEB) {
+ ret = -ENOMEM;
+ goto err_alloc_veb; /* out of VEB slots! */
+ }
+
+ veb = kzalloc(sizeof(*veb), GFP_KERNEL);
+ if (!veb) {
+ ret = -ENOMEM;
+ goto err_alloc_veb;
+ }
+ veb->pf = pf;
+ veb->idx = i;
+ veb->enabled_tc = 1;
+
+ pf->veb[i] = veb;
+ ret = i;
+err_alloc_veb:
+ mutex_unlock(&pf->switch_mutex);
+ return ret;
+}
+
+/**
+ * i40e_switch_branch_release - Delete a branch of the switch tree
+ * @branch: where to start deleting
+ *
+ * This uses recursion to find the tips of the branch to be
+ * removed, deleting until we get back to and can delete this VEB.
+ **/
+static void i40e_switch_branch_release(struct i40e_veb *branch)
+{
+ struct i40e_pf *pf = branch->pf;
+ u16 branch_seid = branch->seid;
+ u16 veb_idx = branch->idx;
+ int i;
+
+ /* release any VEBs on this VEB - RECURSION */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+ if (pf->veb[i]->uplink_seid == branch->seid)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Release the VSIs on this VEB, but not the owner VSI.
+ *
+ * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
+ * the VEB itself, so don't use (*branch) after this loop.
+ */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (!pf->vsi[i])
+ continue;
+ if (pf->vsi[i]->uplink_seid == branch_seid &&
+ (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
+ i40e_vsi_release(pf->vsi[i]);
+ }
+ }
+
+ /* There's one corner case where the VEB might not have been
+ * removed, so double check it here and remove it if needed.
+ * This case happens if the veb was created from the debugfs
+ * commands and no VSIs were added to it.
+ */
+ if (pf->veb[veb_idx])
+ i40e_veb_release(pf->veb[veb_idx]);
+}
+
+/**
+ * i40e_veb_clear - remove veb struct
+ * @veb: the veb to remove
+ **/
+static void i40e_veb_clear(struct i40e_veb *veb)
+{
+ if (!veb)
+ return;
+
+ if (veb->pf) {
+ struct i40e_pf *pf = veb->pf;
+
+ mutex_lock(&pf->switch_mutex);
+ if (pf->veb[veb->idx] == veb)
+ pf->veb[veb->idx] = NULL;
+ mutex_unlock(&pf->switch_mutex);
+ }
+
+ kfree(veb);
+}
+
+/**
+ * i40e_veb_release - Delete a VEB and free its resources
+ * @veb: the VEB being removed
+ **/
+void i40e_veb_release(struct i40e_veb *veb)
+{
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_pf *pf;
+ int i, n = 0;
+
+ pf = veb->pf;
+
+ /* find the remaining VSI and check for extras */
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
+ n++;
+ vsi = pf->vsi[i];
+ }
+ }
+ if (n != 1) {
+ dev_info(&pf->pdev->dev,
+ "can't remove VEB %d with %d VSIs left\n",
+ veb->seid, n);
+ return;
+ }
+
+ /* move the remaining VSI to uplink veb */
+ vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
+ if (veb->uplink_seid) {
+ vsi->uplink_seid = veb->uplink_seid;
+ if (veb->uplink_seid == pf->mac_seid)
+ vsi->veb_idx = I40E_NO_VEB;
+ else
+ vsi->veb_idx = veb->veb_idx;
+ } else {
+ /* floating VEB */
+ vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
+ }
+
+ i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
+ i40e_veb_clear(veb);
+
+ return;
+}
+
+/**
+ * i40e_add_veb - create the VEB in the switch
+ * @veb: the VEB to be instantiated
+ * @vsi: the controlling VSI
+ **/
+static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
+{
+ bool is_default = (vsi->idx == vsi->back->lan_vsi);
+ int ret;
+
+ /* get a VEB from the hardware */
+ ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+ veb->enabled_tc, is_default, &veb->seid, NULL);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't add VEB, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ return -EPERM;
+ }
+
+ /* get statistics counter */
+ ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+ &veb->stats_idx, NULL, NULL, NULL);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't get VEB statistics idx, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ return -EPERM;
+ }
+ ret = i40e_veb_get_bw_info(veb);
+ if (ret) {
+ dev_info(&veb->pf->pdev->dev,
+ "couldn't get VEB bw info, err %d, aq_err %d\n",
+ ret, veb->pf->hw.aq.asq_last_status);
+ i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+ return -ENOENT;
+ }
+
+ vsi->uplink_seid = veb->seid;
+ vsi->veb_idx = veb->idx;
+ vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
+
+ return 0;
+}
+
+/**
+ * i40e_veb_setup - Set up a VEB
+ * @pf: board private structure
+ * @flags: VEB setup flags
+ * @uplink_seid: the switch element to link to
+ * @vsi_seid: the initial VSI seid
+ * @enabled_tc: Enabled TC bit-map
+ *
+ * This allocates the sw VEB structure and links it into the switch
+ * It is possible and legal for this to be a duplicate of an already
+ * existing VEB. It is also possible for both uplink and vsi seids
+ * to be zero, in order to create a floating VEB.
+ *
+ * Returns pointer to the successfully allocated VEB sw struct on
+ * success, otherwise returns NULL on failure.
+ **/
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
+ u16 uplink_seid, u16 vsi_seid,
+ u8 enabled_tc)
+{
+ struct i40e_veb *veb, *uplink_veb = NULL;
+ int vsi_idx, veb_idx;
+ int ret;
+
+ /* if one seid is 0, the other must be 0 to create a floating relay */
+ if ((uplink_seid == 0 || vsi_seid == 0) &&
+ (uplink_seid + vsi_seid != 0)) {
+ dev_info(&pf->pdev->dev,
+ "one, not both seid's are 0: uplink=%d vsi=%d\n",
+ uplink_seid, vsi_seid);
+ return NULL;
+ }
+
+ /* make sure there is such a vsi and uplink */
+ for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+ if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
+ break;
+ if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+ dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
+ vsi_seid);
+ return NULL;
+ }
+
+ if (uplink_seid && uplink_seid != pf->mac_seid) {
+ for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
+ if (pf->veb[veb_idx] &&
+ pf->veb[veb_idx]->seid == uplink_seid) {
+ uplink_veb = pf->veb[veb_idx];
+ break;
+ }
+ }
+ if (!uplink_veb) {
+ dev_info(&pf->pdev->dev,
+ "uplink seid %d not found\n", uplink_seid);
+ return NULL;
+ }
+ }
+
+ /* get veb sw struct */
+ veb_idx = i40e_veb_mem_alloc(pf);
+ if (veb_idx < 0)
+ goto err_alloc;
+ veb = pf->veb[veb_idx];
+ veb->flags = flags;
+ veb->uplink_seid = uplink_seid;
+ veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
+ veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
+
+ /* create the VEB in the switch */
+ ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
+ if (ret)
+ goto err_veb;
+
+ return veb;
+
+err_veb:
+ i40e_veb_clear(veb);
+err_alloc:
+ return NULL;
+}
+
+/**
+ * i40e_setup_pf_switch_element - set pf vars based on switch type
+ * @pf: board private structure
+ * @ele: element we are building info from
+ * @num_reported: total number of elements
+ * @printconfig: should we print the contents
+ *
+ * helper function to assist in extracting a few useful SEID values.
+ **/
+static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
+ struct i40e_aqc_switch_config_element_resp *ele,
+ u16 num_reported, bool printconfig)
+{
+ u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
+ u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
+ u8 element_type = ele->element_type;
+ u16 seid = le16_to_cpu(ele->seid);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "type=%d seid=%d uplink=%d downlink=%d\n",
+ element_type, seid, uplink_seid, downlink_seid);
+
+ switch (element_type) {
+ case I40E_SWITCH_ELEMENT_TYPE_MAC:
+ pf->mac_seid = seid;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VEB:
+ /* Main VEB? */
+ if (uplink_seid != pf->mac_seid)
+ break;
+ if (pf->lan_veb == I40E_NO_VEB) {
+ int v;
+
+ /* find existing or else empty VEB */
+ for (v = 0; v < I40E_MAX_VEB; v++) {
+ if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
+ pf->lan_veb = v;
+ break;
+ }
+ }
+ if (pf->lan_veb == I40E_NO_VEB) {
+ v = i40e_veb_mem_alloc(pf);
+ if (v < 0)
+ break;
+ pf->lan_veb = v;
+ }
+ }
+
+ pf->veb[pf->lan_veb]->seid = seid;
+ pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
+ pf->veb[pf->lan_veb]->pf = pf;
+ pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_VSI:
+ if (num_reported != 1)
+ break;
+ /* This is immediately after a reset so we can assume this is
+ * the PF's VSI
+ */
+ pf->mac_seid = uplink_seid;
+ pf->pf_seid = downlink_seid;
+ pf->main_vsi_seid = seid;
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "pf_seid=%d main_vsi_seid=%d\n",
+ pf->pf_seid, pf->main_vsi_seid);
+ break;
+ case I40E_SWITCH_ELEMENT_TYPE_PF:
+ case I40E_SWITCH_ELEMENT_TYPE_VF:
+ case I40E_SWITCH_ELEMENT_TYPE_EMP:
+ case I40E_SWITCH_ELEMENT_TYPE_BMC:
+ case I40E_SWITCH_ELEMENT_TYPE_PE:
+ case I40E_SWITCH_ELEMENT_TYPE_PA:
+ /* ignore these for now */
+ break;
+ default:
+ dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
+ element_type, seid);
+ break;
+ }
+}
+
+/**
+ * i40e_fetch_switch_configuration - Get switch config from firmware
+ * @pf: board private structure
+ * @printconfig: should we print the contents
+ *
+ * Get the current switch configuration from the device and
+ * extract a few useful SEID values.
+ **/
+int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
+{
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ u16 next_seid = 0;
+ int ret = 0;
+ u8 *aq_buf;
+ int i;
+
+ aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
+ if (!aq_buf)
+ return -ENOMEM;
+
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ do {
+ u16 num_reported, num_total;
+
+ ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
+ I40E_AQ_LARGE_BUF,
+ &next_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "get switch config failed %d aq_err=%x\n",
+ ret, pf->hw.aq.asq_last_status);
+ kfree(aq_buf);
+ return -ENOENT;
+ }
+
+ num_reported = le16_to_cpu(sw_config->header.num_reported);
+ num_total = le16_to_cpu(sw_config->header.num_total);
+
+ if (printconfig)
+ dev_info(&pf->pdev->dev,
+ "header: %d reported %d total\n",
+ num_reported, num_total);
+
+ if (num_reported) {
+ int sz = sizeof(*sw_config) * num_reported;
+
+ kfree(pf->sw_config);
+ pf->sw_config = kzalloc(sz, GFP_KERNEL);
+ if (pf->sw_config)
+ memcpy(pf->sw_config, sw_config, sz);
+ }
+
+ for (i = 0; i < num_reported; i++) {
+ struct i40e_aqc_switch_config_element_resp *ele =
+ &sw_config->element[i];
+
+ i40e_setup_pf_switch_element(pf, ele, num_reported,
+ printconfig);
+ }
+ } while (next_seid != 0);
+
+ kfree(aq_buf);
+ return ret;
+}
+
+/**
+ * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
+ * @pf: board private structure
+ *
+ * Returns 0 on success, negative value on failure
+ **/
+static int i40e_setup_pf_switch(struct i40e_pf *pf)
+{
+ int ret;
+
+ /* find out what's out there already */
+ ret = i40e_fetch_switch_configuration(pf, false);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "couldn't fetch switch config, err %d, aq_err %d\n",
+ ret, pf->hw.aq.asq_last_status);
+ return ret;
+ }
+ i40e_pf_reset_stats(pf);
+
+ /* fdir VSI must happen first to be sure it gets queue 0, but only
+ * if there is enough room for the fdir VSI
+ */
+ if (pf->num_lan_qps > 1)
+ i40e_fdir_setup(pf);
+
+ /* first time setup */
+ if (pf->lan_vsi == I40E_NO_VSI) {
+ struct i40e_vsi *vsi = NULL;
+ u16 uplink_seid;
+
+ /* Set up the PF VSI associated with the PF's main VSI
+ * that is already in the HW switch
+ */
+ if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
+ uplink_seid = pf->veb[pf->lan_veb]->seid;
+ else
+ uplink_seid = pf->mac_seid;
+
+ vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (!vsi) {
+ dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
+ i40e_fdir_teardown(pf);
+ return -EAGAIN;
+ }
+ /* accommodate kcompat by copying the main VSI queue count
+ * into the pf, since this newer code pushes the pf queue
+ * info down a level into a VSI
+ */
+ pf->num_rx_queues = vsi->alloc_queue_pairs;
+ pf->num_tx_queues = vsi->alloc_queue_pairs;
+ } else {
+ /* force a reset of TC and queue layout configurations */
+ u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
+ pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
+ pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
+ i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ }
+ i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+
+ /* Setup static PF queue filter control settings */
+ ret = i40e_setup_pf_filter_control(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
+ ret);
+ /* Failure here should not stop continuing other steps */
+ }
+
+ /* enable RSS in the HW, even for only one queue, as the stack can use
+ * the hash
+ */
+ if ((pf->flags & I40E_FLAG_RSS_ENABLED))
+ i40e_config_rss(pf);
+
+ /* fill in link information and enable LSE reporting */
+ i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+ i40e_link_event(pf);
+
+ /* Initialize user-specifics link properties */
+ pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
+ I40E_AQ_AN_COMPLETED) ? true : false);
+ pf->hw.fc.requested_mode = I40E_FC_DEFAULT;
+ if (pf->hw.phy.link_info.an_info &
+ (I40E_AQ_LINK_PAUSE_TX | I40E_AQ_LINK_PAUSE_RX))
+ pf->hw.fc.current_mode = I40E_FC_FULL;
+ else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+ pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
+ else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+ pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
+ else
+ pf->hw.fc.current_mode = I40E_FC_DEFAULT;
+
+ return ret;
+}
+
+/**
+ * i40e_set_rss_size - helper to set rss_size
+ * @pf: board private structure
+ * @queues_left: how many queues
+ */
+static u16 i40e_set_rss_size(struct i40e_pf *pf, int queues_left)
+{
+ int num_tc0;
+
+ num_tc0 = min_t(int, queues_left, pf->rss_size_max);
+ num_tc0 = min_t(int, num_tc0, nr_cpus_node(numa_node_id()));
+ num_tc0 = rounddown_pow_of_two(num_tc0);
+
+ return num_tc0;
+}
+
+/**
+ * i40e_determine_queue_usage - Work out queue distribution
+ * @pf: board private structure
+ **/
+static void i40e_determine_queue_usage(struct i40e_pf *pf)
+{
+ int accum_tc_size;
+ int queues_left;
+
+ pf->num_lan_qps = 0;
+ pf->num_tc_qps = rounddown_pow_of_two(pf->num_tc_qps);
+ accum_tc_size = (I40E_MAX_TRAFFIC_CLASS - 1) * pf->num_tc_qps;
+
+ /* Find the max queues to be put into basic use. We'll always be
+ * using TC0, whether or not DCB is running, and TC0 will get the
+ * big RSS set.
+ */
+ queues_left = pf->hw.func_caps.num_tx_qp;
+
+ if (!((pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+ (pf->flags & I40E_FLAG_MQ_ENABLED)) ||
+ !(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FDIR_ENABLED | I40E_FLAG_DCB_ENABLED)) ||
+ (queues_left == 1)) {
+
+ /* one qp for PF, no queues for anything else */
+ queues_left = 0;
+ pf->rss_size = pf->num_lan_qps = 1;
+
+ /* make sure all the fancies are disabled */
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_MQ_ENABLED |
+ I40E_FLAG_FDIR_ENABLED |
+ I40E_FLAG_FDIR_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_SRIOV_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ pf->num_lan_qps = pf->rss_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ !(pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ /* save num_tc_qps queues for TCs 1 thru 7 and the rest
+ * are set up for RSS in TC0
+ */
+ queues_left -= accum_tc_size;
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for DCB\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ queues_left -= 1; /* save 1 queue for FD */
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for Flow Director\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size;
+
+ } else if (pf->flags & I40E_FLAG_RSS_ENABLED &&
+ (pf->flags & I40E_FLAG_FDIR_ENABLED) &&
+ (pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ /* save 1 queue for TCs 1 thru 7,
+ * 1 queue for flow director,
+ * and the rest are set up for RSS in TC0
+ */
+ queues_left -= 1;
+ queues_left -= accum_tc_size;
+
+ pf->rss_size = i40e_set_rss_size(pf, queues_left);
+ queues_left -= pf->rss_size;
+ if (queues_left < 0) {
+ dev_info(&pf->pdev->dev, "not enough queues for DCB and Flow Director\n");
+ return;
+ }
+
+ pf->num_lan_qps = pf->rss_size + accum_tc_size;
+
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Invalid configuration, flags=0x%08llx\n", pf->flags);
+ return;
+ }
+
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ pf->num_vf_qps && pf->num_req_vfs && queues_left) {
+ pf->num_req_vfs = min_t(int, pf->num_req_vfs, (queues_left /
+ pf->num_vf_qps));
+ queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
+ }
+
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
+ pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
+ (queues_left / pf->num_vmdq_qps));
+ queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
+ }
+
+ return;
+}
+
+/**
+ * i40e_setup_pf_filter_control - Setup PF static filter control
+ * @pf: PF to be setup
+ *
+ * i40e_setup_pf_filter_control sets up a pf's initial filter control
+ * settings. If PE/FCoE are enabled then it will also set the per PF
+ * based filter sizes required for them. It also enables Flow director,
+ * ethertype and macvlan type filter settings for the pf.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
+{
+ struct i40e_filter_control_settings *settings = &pf->filter_settings;
+
+ settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
+
+ /* Flow Director is enabled */
+ if (pf->flags & (I40E_FLAG_FDIR_ENABLED | I40E_FLAG_FDIR_ATR_ENABLED))
+ settings->enable_fdir = true;
+
+ /* Ethtype and MACVLAN filters enabled for PF */
+ settings->enable_ethtype = true;
+ settings->enable_macvlan = true;
+
+ if (i40e_set_filter_control(&pf->hw, settings))
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * i40e_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in i40e_pci_tbl
+ *
+ * i40e_probe initializes a pf identified by a pci_dev structure.
+ * The OS initialization, configuring of the pf private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct i40e_driver_version dv;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int err = 0;
+ u32 len;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+ return err;
+
+ /* set up for high or low dma */
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ /* coherent mask for the same size will always succeed if
+ * dma_set_mask does
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+ err = -EIO;
+ goto err_dma;
+ }
+
+ /* set up pci connections */
+ err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+ IORESOURCE_MEM), i40e_driver_name);
+ if (err) {
+ dev_info(&pdev->dev,
+ "pci_request_selected_regions failed %d\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_set_master(pdev);
+
+ /* Now that we have a PCI connection, we need to do the
+ * low level device setup. This is primarily setting up
+ * the Admin Queue structures and then querying for the
+ * device's current profile information.
+ */
+ pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ if (!pf) {
+ err = -ENOMEM;
+ goto err_pf_alloc;
+ }
+ pf->next_vsi = 0;
+ pf->pdev = pdev;
+ set_bit(__I40E_DOWN, &pf->state);
+
+ hw = &pf->hw;
+ hw->back = pf;
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
+ (unsigned int)pci_resource_start(pdev, 0),
+ (unsigned int)pci_resource_len(pdev, 0), err);
+ goto err_ioremap;
+ }
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+
+ /* Reset here to make sure all is clean and to define PF 'n' */
+ err = i40e_pf_reset(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
+ }
+ pf->pfr_count++;
+
+ hw->aq.num_arq_entries = I40E_AQ_LEN;
+ hw->aq.num_asq_entries = I40E_AQ_LEN;
+ hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
+ pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
+ snprintf(pf->misc_int_name, sizeof(pf->misc_int_name) - 1,
+ "%s-pf%d:misc",
+ dev_driver_string(&pf->pdev->dev), pf->hw.pf_id);
+
+ err = i40e_init_shared_code(hw);
+ if (err) {
+ dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+ goto err_pf_reset;
+ }
+
+ err = i40e_init_adminq(hw);
+ dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
+ if (err) {
+ dev_info(&pdev->dev,
+ "init_adminq failed: %d expecting API %02x.%02x\n",
+ err,
+ I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+ goto err_pf_reset;
+ }
+
+ err = i40e_get_capabilities(pf);
+ if (err)
+ goto err_adminq_setup;
+
+ err = i40e_sw_init(pf);
+ if (err) {
+ dev_info(&pdev->dev, "sw_init failed: %d\n", err);
+ goto err_sw_init;
+ }
+
+ err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+ if (err) {
+ dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
+ goto err_init_lan_hmc;
+ }
+
+ err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (err) {
+ dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
+ err = -ENOENT;
+ goto err_configure_lan_hmc;
+ }
+
+ i40e_get_mac_addr(hw, hw->mac.addr);
+ if (i40e_validate_mac_addr(hw->mac.addr)) {
+ dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
+ err = -EIO;
+ goto err_mac_addr;
+ }
+ dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
+ memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+
+ pci_set_drvdata(pdev, pf);
+ pci_save_state(pdev);
+
+ /* set up periodic task facility */
+ setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, &pf->state);
+ pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
+ pf->link_check_timeout = jiffies;
+
+ /* set up the main switch operations */
+ i40e_determine_queue_usage(pf);
+ i40e_init_interrupt_scheme(pf);
+
+ /* Set up the *vsi struct based on the number of VSIs in the HW,
+ * and set up our local tracking of the MAIN PF vsi.
+ */
+ len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+ pf->vsi = kzalloc(len, GFP_KERNEL);
+ if (!pf->vsi)
+ goto err_switch_setup;
+
+ err = i40e_setup_pf_switch(pf);
+ if (err) {
+ dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
+ goto err_vsis;
+ }
+
+ /* The main driver is (mostly) up and happy. We need to set this state
+ * before setting up the misc vector or we get a race and the vector
+ * ends up disabled forever.
+ */
+ clear_bit(__I40E_DOWN, &pf->state);
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+ if (err) {
+ dev_info(&pdev->dev,
+ "setup of misc vector failed: %d\n", err);
+ goto err_vsis;
+ }
+ }
+
+ /* prep for VF support */
+ if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+ (pf->flags & I40E_FLAG_MSIX_ENABLED)) {
+ u32 val;
+
+ /* disable link interrupts for VFs */
+ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
+ val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
+ wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
+ i40e_flush(hw);
+ }
+
+ i40e_dbg_pf_init(pf);
+
+ /* tell the firmware that we're starting */
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+
+ /* Unwind what we've done if something failed in the setup */
+err_vsis:
+ set_bit(__I40E_DOWN, &pf->state);
+err_switch_setup:
+ i40e_clear_interrupt_scheme(pf);
+ kfree(pf->vsi);
+ del_timer_sync(&pf->service_timer);
+err_mac_addr:
+err_configure_lan_hmc:
+ (void)i40e_shutdown_lan_hmc(hw);
+err_init_lan_hmc:
+ kfree(pf->qp_pile);
+ kfree(pf->irq_pile);
+err_sw_init:
+err_adminq_setup:
+ (void)i40e_shutdown_adminq(hw);
+err_pf_reset:
+ iounmap(hw->hw_addr);
+err_ioremap:
+ kfree(pf);
+err_pf_alloc:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * i40e_remove - Device removal routine
+ * @pdev: PCI device information struct
+ *
+ * i40e_remove is called by the PCI subsystem to alert the driver
+ * that is should release a PCI device. This could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void i40e_remove(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ i40e_status ret_code;
+ u32 reg;
+ int i;
+
+ i40e_dbg_pf_exit(pf);
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
+ /* no more scheduling of any task */
+ set_bit(__I40E_DOWN, &pf->state);
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ i40e_fdir_teardown(pf);
+
+ /* If there is a switch structure or any orphans, remove them.
+ * This will leave only the PF's VSI remaining.
+ */
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ if (!pf->veb[i])
+ continue;
+
+ if (pf->veb[i]->uplink_seid == pf->mac_seid ||
+ pf->veb[i]->uplink_seid == 0)
+ i40e_switch_branch_release(pf->veb[i]);
+ }
+
+ /* Now we can shutdown the PF's VSI, just before we kill
+ * adminq and hmc.
+ */
+ if (pf->vsi[pf->lan_vsi])
+ i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+
+ i40e_stop_misc_vector(pf);
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ synchronize_irq(pf->msix_entries[0].vector);
+ free_irq(pf->msix_entries[0].vector, pf);
+ }
+
+ /* shutdown and destroy the HMC */
+ ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n", ret_code);
+
+ /* shutdown the adminq */
+ i40e_aq_queue_shutdown(&pf->hw, true);
+ ret_code = i40e_shutdown_adminq(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the Admin Queue resources: %d\n",
+ ret_code);
+
+ /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
+ i40e_clear_interrupt_scheme(pf);
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ if (pf->vsi[i]) {
+ i40e_vsi_clear_rings(pf->vsi[i]);
+ i40e_vsi_clear(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < I40E_MAX_VEB; i++) {
+ kfree(pf->veb[i]);
+ pf->veb[i] = NULL;
+ }
+
+ kfree(pf->qp_pile);
+ kfree(pf->irq_pile);
+ kfree(pf->sw_config);
+ kfree(pf->vsi);
+
+ /* force a PF reset to clean anything leftover */
+ reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
+ wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+ i40e_flush(&pf->hw);
+
+ iounmap(pf->hw.hw_addr);
+ kfree(pf);
+ pci_release_selected_regions(pdev,
+ pci_select_bars(pdev, IORESOURCE_MEM));
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * i40e_pci_error_detected - warning that something funky happened in PCI land
+ * @pdev: PCI device information struct
+ *
+ * Called to warn that something happened and the error handling steps
+ * are in progress. Allows the driver to quiesce things, be ready for
+ * remediation.
+ **/
+static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state error)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
+
+ /* shutdown all operations */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * i40e_pci_error_slot_reset - a PCI slot reset just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to find if the driver can work with the device now that
+ * the pci slot has been reset. If a basic connection seems good
+ * (registers are readable and have sane content) then return a
+ * happy little PCI_ERS_RESULT_xxx.
+ **/
+static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+ u32 reg;
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+ if (pci_enable_device_mem(pdev)) {
+ dev_info(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
+ if (reg == 0)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err) {
+ dev_info(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+ err);
+ /* non-fatal, continue */
+ }
+
+ return result;
+}
+
+/**
+ * i40e_pci_error_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error
+ * and/or reset recovery has finished.
+ **/
+static void i40e_pci_error_resume(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "%s\n", __func__);
+ i40e_handle_reset_warning(pf);
+}
+
+static const struct pci_error_handlers i40e_err_handler = {
+ .error_detected = i40e_pci_error_detected,
+ .slot_reset = i40e_pci_error_slot_reset,
+ .resume = i40e_pci_error_resume,
+};
+
+static struct pci_driver i40e_driver = {
+ .name = i40e_driver_name,
+ .id_table = i40e_pci_tbl,
+ .probe = i40e_probe,
+ .remove = i40e_remove,
+ .err_handler = &i40e_err_handler,
+ .sriov_configure = i40e_pci_sriov_configure,
+};
+
+/**
+ * i40e_init_module - Driver registration routine
+ *
+ * i40e_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init i40e_init_module(void)
+{
+ pr_info("%s: %s - version %s\n", i40e_driver_name,
+ i40e_driver_string, i40e_driver_version_str);
+ pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
+ i40e_dbg_init();
+ return pci_register_driver(&i40e_driver);
+}
+module_init(i40e_init_module);
+
+/**
+ * i40e_exit_module - Driver exit cleanup routine
+ *
+ * i40e_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit i40e_exit_module(void)
+{
+ pci_unregister_driver(&i40e_driver);
+ i40e_dbg_exit();
+}
+module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
new file mode 100644
index 00000000000..97e1bb30ef8
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -0,0 +1,391 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e_prototype.h"
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers.
+ * @hw: pointer to the HW structure.
+ *
+ * Setups the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+i40e_status i40e_init_nvm(struct i40e_hw *hw)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+ i40e_status ret_code = 0;
+ u32 fla, gens;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the nvm programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens = rd32(hw, I40E_GLNVM_GENS);
+ sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+ I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+ /* Switching to words (sr_size contains power of 2KB). */
+ nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode. */
+ fla = rd32(hw, I40E_GLNVM_FLA);
+ if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */
+ /* Max NVM timeout. */
+ nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+ nvm->blank_nvm_mode = false;
+ } else { /* Blank programming mode. */
+ nvm->blank_nvm_mode = true;
+ ret_code = I40E_ERR_NVM_BLANK_MODE;
+ hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership.
+ * @hw: pointer to the HW structure.
+ * @access: NVM access type (read or write).
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
+{
+ i40e_status ret_code = 0;
+ u64 gtime, timeout;
+ u64 time = 0;
+
+ if (hw->nvm.blank_nvm_mode)
+ goto i40e_i40e_acquire_nvm_exit;
+
+ ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+ 0, &time, NULL);
+ /* Reading the Global Device Timer. */
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+ /* Store the timeout. */
+ hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
+
+ if (ret_code) {
+ /* Set the polling timeout. */
+ if (time > I40E_MAX_NVM_TIMEOUT)
+ timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
+ + gtime;
+ else
+ timeout = hw->nvm.hw_semaphore_timeout;
+ /* Poll until the current NVM owner timeouts. */
+ while (gtime < timeout) {
+ usleep_range(10000, 20000);
+ ret_code = i40e_aq_request_resource(hw,
+ I40E_NVM_RESOURCE_ID,
+ access, 0, &time,
+ NULL);
+ if (!ret_code) {
+ hw->nvm.hw_semaphore_timeout =
+ I40E_MS_TO_GTIME(time) + gtime;
+ break;
+ }
+ gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+ }
+ if (ret_code) {
+ hw->nvm.hw_semaphore_timeout = 0;
+ hw->nvm.hw_semaphore_wait =
+ I40E_MS_TO_GTIME(time) + gtime;
+ hw_dbg(hw, "NVM acquire timed out, wait %llu ms before trying again.\n",
+ time);
+ }
+ }
+
+i40e_i40e_acquire_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership.
+ * @hw: pointer to the HW structure.
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+ if (!hw->nvm.blank_nvm_mode)
+ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit.
+ * @hw: pointer to the HW structure.
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+ u32 srctl, wait_cnt;
+
+ /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */
+ for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+ srctl = rd32(hw, I40E_GLNVM_SRCTL);
+ if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+ ret_code = 0;
+ break;
+ }
+ udelay(5);
+ }
+ if (ret_code == I40E_ERR_TIMEOUT)
+ hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_srctl - Reads Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @data: word read from the Shadow RAM.
+ *
+ * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = I40E_ERR_TIMEOUT;
+ u32 sr_reg;
+
+ if (offset >= hw->nvm.sr_size) {
+ hw_dbg(hw, "NVM read error: Offset beyond Shadow RAM limit.\n");
+ ret_code = I40E_ERR_PARAM;
+ goto read_nvm_exit;
+ }
+
+ /* Poll the done bit first. */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ /* Write the address and start reading. */
+ sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+ (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+ wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+ /* Poll I40E_GLNVM_SRCTL until the done bit is set. */
+ ret_code = i40e_poll_sr_srctl_done_bit(hw);
+ if (!ret_code) {
+ sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+ *data = (u16)((sr_reg &
+ I40E_GLNVM_SRDATA_RDDATA_MASK)
+ >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+ }
+ }
+ if (ret_code)
+ hw_dbg(hw, "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+ offset);
+
+read_nvm_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM word.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @data: word read from the Shadow RAM.
+ *
+ * Reads 16 bit word from the Shadow RAM. Each read is preceded
+ * with the NVM ownership taking and followed by the release.
+ **/
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
+{
+ i40e_status ret_code = 0;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer.
+ * @hw: pointer to the HW structure.
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: number of words to read (in) &
+ * number of words read before the NVM ownership timeout (out).
+ * @data: words read from the Shadow RAM.
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
+{
+ i40e_status ret_code = 0;
+ u16 index, word;
+ u32 time;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ /* Loop thru the selected region. */
+ for (word = 0; word < *words; word++) {
+ index = offset + word;
+ ret_code = i40e_read_nvm_srctl(hw, index, &data[word]);
+ if (ret_code)
+ break;
+ /* Check if we didn't exceeded the semaphore timeout. */
+ time = rd32(hw, I40E_GLVFGEN_TIMER);
+ if (time >= hw->nvm.hw_semaphore_timeout) {
+ ret_code = I40E_ERR_TIMEOUT;
+ hw_dbg(hw, "NVM read error: timeout.\n");
+ break;
+ }
+ }
+ /* Update the number of words read from the Shadow RAM. */
+ *words = word;
+ /* Release the NVM ownership. */
+ i40e_release_nvm(hw);
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * This function calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ i40e_status ret_code = 0;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 vpd_module = 0;
+ u16 word = 0;
+ u32 i = 0;
+
+ /* read pointer to VPD area */
+ ret_code = i40e_read_nvm_srctl(hw, I40E_SR_VPD_PTR, &vpd_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* read pointer to PCIe Alt Auto-load module */
+ ret_code = i40e_read_nvm_srctl(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->nvm.sr_size; i++) {
+ /* Skip Checksum word */
+ if (i == I40E_SR_SW_CHECKSUM_WORD)
+ i++;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i == (u32)vpd_module) {
+ i += (I40E_SR_VPD_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i == (u32)pcie_alt_module) {
+ i += (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2);
+ if (i >= hw->nvm.sr_size)
+ break;
+ }
+
+ ret_code = i40e_read_nvm_srctl(hw, (u16)i, &word);
+ if (ret_code) {
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+ goto i40e_calc_nvm_checksum_exit;
+ }
+ checksum_local += word;
+ }
+
+ *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
+{
+ i40e_status ret_code = 0;
+ u16 checksum_sr = 0;
+ u16 checksum_local;
+
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (ret_code)
+ goto i40e_validate_nvm_checksum_exit;
+
+ ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+ if (ret_code)
+ goto i40e_validate_nvm_checksum_free;
+
+ /* Do not use i40e_read_nvm_word() because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ i40e_read_nvm_srctl(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+ /* Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (checksum_local != checksum_sr)
+ ret_code = I40E_ERR_NVM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum)
+ *checksum = checksum_local;
+
+i40e_validate_nvm_checksum_free:
+ i40e_release_nvm(hw);
+
+i40e_validate_nvm_checksum_exit:
+ return ret_code;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
new file mode 100644
index 00000000000..702c81ba86e
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/tcp.h>
+#include <linux/pci.h>
+#include <linux/highuid.h>
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+/* File to be the magic between shared code and
+ * actual OS primitives
+ */
+
+#define hw_dbg(hw, S, A...) do {} while (0)
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_dma_mem(h, m, unused, s, a) \
+ i40e_allocate_dma_mem_d(h, m, s, a)
+#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
+#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ pr_info("i40e %02x.%x " s, \
+ (h)->bus.device, (h)->bus.func, \
+ ##__VA_ARGS__); \
+} while (0)
+
+typedef enum i40e_status_code i40e_status;
+#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
new file mode 100644
index 00000000000..f75bb9ccc90
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -0,0 +1,239 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures. These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+i40e_status i40e_init_adminq(struct i40e_hw *hw);
+i40e_status i40e_shutdown_adminq(struct i40e_hw *hw);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw,
+ enum i40e_debug_mask mask,
+ void *desc,
+ void *buffer);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode);
+
+/* admin send queue commands */
+
+i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading);
+i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+ struct i40e_driver_version *dv,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_add_remove_vlan_element_data *v_list,
+ u8 count, struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+ enum i40e_aq_hmc_profile profile,
+ u8 pe_vf_enabled_count,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+/* i40e_common */
+i40e_status i40e_init_shared_code(struct i40e_hw *hw);
+i40e_status i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+bool i40e_get_link_status(struct i40e_hw *hw);
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
+ u8 *mac_addr);
+i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+/* prototype for functions used for NVM access */
+i40e_status i40e_init_nvm(struct i40e_hw *hw);
+i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+i40e_status i40e_read_nvm_srrd(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+
+/* prototype for functions used for SW locks */
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+ struct i40e_virtchnl_vf_resource *msg);
+i40e_status i40e_vf_reset(struct i40e_hw *hw);
+i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
new file mode 100644
index 00000000000..6bd333cde28
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -0,0 +1,4688 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_MAX_INDEX 127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_MAX_INDEX 127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_MAX_INDEX 127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_MAX_INDEX 127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_MAX_INDEX 127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_MAX_INDEX 127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_MAX_INDEX 127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_MAX_INDEX 127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_MAX_INDEX 127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQT_MAX_INDEX 127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TDPUC 0x00044100
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
+#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
+#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT 9
+#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
+#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
+#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
+#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT 29
+#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT 31
+#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MAX_INDEX 3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
+#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
+#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
+#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
+#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSRWD_MAX_INDEX 3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_PE_ENA 0x000B81A0
+#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
+#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
+#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
+#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
+#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT 3
+#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
+#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
+#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000
+#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
+#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
+#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
+#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x3FFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
+#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_SDPART_MAX_INDEX 15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
+#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_UFUSE 0x00094008
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT 31
+#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ITR0_MAX_INDEX 2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITRN_MAX_INDEX 2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATEN_MAX_INDEX 511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_MAX_INDEX 1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_TQCTL_MAX_INDEX 1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
+#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_MAX_INDEX 127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT 31
+#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ITR0_MAX_INDEX 2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN_MAX_INDEX 2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_AEQCTL_MAX_INDEX 127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_CEQCTL_MAX_INDEX 511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLST0_MAX_INDEX 127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_RATE0_MAX_INDEX 127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATEN_MAX_INDEX 511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
+#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
+#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_MAX_INDEX 1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_MAX_INDEX 1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_MAX_INDEX 1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
+#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT 2
+#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_MAX_INDEX 1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
+#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_MAX_INDEX 1535
+#define I40E_QTX_HEAD_HEAD_SHIFT 0
+#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_TAIL_MAX_INDEX 1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPLAN_MAPENA_MAX_INDEX 127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_QTABLE_MAX_INDEX 15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSILAN_QBASE_MAX_INDEX 383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QTABLE_MAX_INDEX 15
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT 16
+#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HLCTLA 0x001E4760
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
+#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
+#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSECTL1 0x001E3560
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
+#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
+#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
+#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
+#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
+#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
+#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x3FF << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_METF_MAX_INDEX 3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
+#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_MSIX_PBA_MAX_INDEX 5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MAX_INDEX 128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MAX_INDEX 128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MAX_INDEX 128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TVCTRL_MAX_INDEX 128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_VFMSIX_PBA1_MAX_INDEX 19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
+#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
+#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
+#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
+#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
+#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
+#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
+#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
+#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
+#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT 30
+#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
+#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
+#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SUBSYSID 0x000BE48C
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
+#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
+#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
+#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
+#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PFDEVID 0x000BE080
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_VFDEVID 0x000BE100
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
+#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
+#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPM_DMACR 0x000881F4
+#define I40E_GLPM_DMACR_DMACWT_SHIFT 0
+#define I40E_GLPM_DMACR_DMACWT_MASK (0xFFFF << I40E_GLPM_DMACR_DMACWT_SHIFT)
+#define I40E_GLPM_DMACR_EXIT_DC_SHIFT 29
+#define I40E_GLPM_DMACR_EXIT_DC_MASK (0x1 << I40E_GLPM_DMACR_EXIT_DC_SHIFT)
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT 30
+#define I40E_GLPM_DMACR_LX_COALESCING_INDICATION_MASK (0x1 << I40E_GLPM_DMACR_LX_COALESCING_INDICATION_SHIFT)
+#define I40E_GLPM_DMACR_DMAC_EN_SHIFT 31
+#define I40E_GLPM_DMACR_DMAC_EN_MASK (0x1 << I40E_GLPM_DMACR_DMAC_EN_SHIFT)
+#define I40E_GLPM_LTRC 0x000BE500
+#define I40E_GLPM_LTRC_SLTRV_SHIFT 0
+#define I40E_GLPM_LTRC_SLTRV_MASK (0x3FF << I40E_GLPM_LTRC_SLTRV_SHIFT)
+#define I40E_GLPM_LTRC_SSCALE_SHIFT 10
+#define I40E_GLPM_LTRC_SSCALE_MASK (0x7 << I40E_GLPM_LTRC_SSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT 15
+#define I40E_GLPM_LTRC_LTRS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRS_REQUIREMENT_SHIFT)
+#define I40E_GLPM_LTRC_NSLTRV_SHIFT 16
+#define I40E_GLPM_LTRC_NSLTRV_MASK (0x3FF << I40E_GLPM_LTRC_NSLTRV_SHIFT)
+#define I40E_GLPM_LTRC_NSSCALE_SHIFT 26
+#define I40E_GLPM_LTRC_NSSCALE_MASK (0x7 << I40E_GLPM_LTRC_NSSCALE_SHIFT)
+#define I40E_GLPM_LTRC_LTR_SEND_SHIFT 30
+#define I40E_GLPM_LTRC_LTR_SEND_MASK (0x1 << I40E_GLPM_LTRC_LTR_SEND_SHIFT)
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT 31
+#define I40E_GLPM_LTRC_LTRNS_REQUIREMENT_MASK (0x1 << I40E_GLPM_LTRC_LTRNS_REQUIREMENT_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
+#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT 2
+#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT 3
+#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_HPTC 0x000AC800
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT 0
+#define I40E_PRTPM_HPTC_HIGH_PRI_TC_MASK (0xFF << I40E_PRTPM_HPTC_HIGH_PRI_TC_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_MAX_INDEX 7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_MAX_INDEX 7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_MAX_INDEX 7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHT_MAX_INDEX 7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SLT_MAX_INDEX 7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_GLQF_CTL_HTOEP_SHIFT 1
+#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_DDPLPEN_SHIFT 7
+#define I40E_GLQF_CTL_DDPLPEN_MASK (0x1 << I40E_GLQF_CTL_DDPLPEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT 17
+#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
+#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
+#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_HSYM_MAX_INDEX 63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_PCNT_MAX_INDEX 511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_SWAP_MAX_INDEX 1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
+#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
+#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_HENA_MAX_INDEX 1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HKEY_MAX_INDEX 12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HLUT_MAX_INDEX 127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
+#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 6
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0xF << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HENA1_MAX_INDEX 1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HKEY1_MAX_INDEX 12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HREGION1_MAX_INDEX 7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPQF_CTL_MAX_INDEX 127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIQF_CTL_MAX_INDEX 383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_TCREGION_MAX_INDEX 7
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_MAX_INDEX 143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDDPEC(_i) (0x00314900 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPEC_MAX_INDEX 143
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT 0
+#define I40E_GL_FCOEDDPEC_CFOEDDPEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPEC_CFOEDDPEC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
+#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_MAX_INDEX 143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_MAX_INDEX 143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_MAX_INDEX 143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOERPDC_MAX_INDEX 143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_MAX_INDEX 3
+#define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_MAX_INDEX 3
+#define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_MAX_INDEX 3
+#define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_MAX_INDEX 3
+#define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
+#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_MAX_INDEX 3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_MAX_INDEX 3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_MAX_INDEX 3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_MAX_INDEX 3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_MAX_INDEX 3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MAX_INDEX 3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MAX_INDEX 3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MAX_INDEX 3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MAX_INDEX 3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MAX_INDEX 3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MAX_INDEX 3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_MAX_INDEX 3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_MAX_INDEX 3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_MAX_INDEX 3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_MAX_INDEX 3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_MAX_INDEX 3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_MAX_INDEX 3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_MAX_INDEX 3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_MAX_INDEX 3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_MAX_INDEX 3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_MAX_INDEX 3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_MAX_INDEX 3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_MAX_INDEX 3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_MAX_INDEX 3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_MAX_INDEX 3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_MAX_INDEX 3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_MAX_INDEX 3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_MAX_INDEX 3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_MAX_INDEX 3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUPP_MAX_INDEX 3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_STDC_MAX_INDEX 3
+#define I40E_GLPRT_STDC_STDC_SHIFT 0
+#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_MAX_INDEX 3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_MAX_INDEX 3
+#define I40E_GLPRT_TDPC_TDPC_SHIFT 0
+#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_MAX_INDEX 3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_MAX_INDEX 3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_MAX_INDEX 3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCL_MAX_INDEX 3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_MAX_INDEX 15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_MAX_INDEX 15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_MAX_INDEX 15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_MAX_INDEX 15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_MAX_INDEX 15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_MAX_INDEX 15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_MAX_INDEX 15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_MAX_INDEX 15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MAX_INDEX 15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MAX_INDEX 15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MAX_INDEX 15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MAX_INDEX 15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_MAX_INDEX 15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_MAX_INDEX 15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_MAX_INDEX 15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_MAX_INDEX 15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_MAX_INDEX 15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCL_MAX_INDEX 15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_MAX_INDEX 383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_MAX_INDEX 383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_MAX_INDEX 383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_MAX_INDEX 383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_MAX_INDEX 383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_MAX_INDEX 383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_MAX_INDEX 383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_MAX_INDEX 383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MAX_INDEX 383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MAX_INDEX 383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MAX_INDEX 383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MAX_INDEX 383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_MAX_INDEX 383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_MAX_INDEX 383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_MAX_INDEX 383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_MAX_INDEX 383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_MAX_INDEX 383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_MAX_INDEX 383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCL_MAX_INDEX 383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRT_MSCCNT 0x00256BA0
+#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
+#define I40E_PRT_SCSTS 0x00256C20
+#define I40E_PRT_SCSTS_BSCA_SHIFT 0
+#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
+#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
+#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
+#define I40E_PRT_SCSTS_MSCA_SHIFT 2
+#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
+#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
+#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
+#define I40E_PRT_SWT_BSCCNT 0x00256C60
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
+#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT 8
+#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT 31
+#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480
+#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 8
+#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
+#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT 31
+#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
+#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_MAX_INDEX 127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_TX_MAX_INDEX 127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
+#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_DATA(_i, _j) (0x00060000 + ((_i) * 4096 + (_j) * 128))
+#define I40E_PFPM_FHFT_DATA_MAX_INDEX 7
+#define I40E_PFPM_FHFT_DATA_DWORD_SHIFT 0
+#define I40E_PFPM_FHFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PFPM_FHFT_DATA_DWORD_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_FHFT_MASK(_i, _j) (0x00068000 + ((_i) * 1024 + (_j) * 128))
+#define I40E_PFPM_FHFT_MASK_MAX_INDEX 7
+#define I40E_PFPM_FHFT_MASK_MASK_SHIFT 0
+#define I40E_PFPM_FHFT_MASK_MASK_MASK (0xFFFF << I40E_PFPM_FHFT_MASK_MASK_SHIFT)
+#define I40E_PFPM_PROXYFC 0x00245A80
+#define I40E_PFPM_PROXYFC_PPROXYE_SHIFT 0
+#define I40E_PFPM_PROXYFC_PPROXYE_MASK (0x1 << I40E_PFPM_PROXYFC_PPROXYE_SHIFT)
+#define I40E_PFPM_PROXYFC_EX_SHIFT 1
+#define I40E_PFPM_PROXYFC_EX_MASK (0x1 << I40E_PFPM_PROXYFC_EX_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_SHIFT 4
+#define I40E_PFPM_PROXYFC_ARP_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_SHIFT)
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYFC_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_SHIFT 9
+#define I40E_PFPM_PROXYFC_NS_MASK (0x1 << I40E_PFPM_PROXYFC_NS_SHIFT)
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYFC_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYFC_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYFC_MLD_SHIFT 12
+#define I40E_PFPM_PROXYFC_MLD_MASK (0x1 << I40E_PFPM_PROXYFC_MLD_SHIFT)
+#define I40E_PFPM_PROXYS 0x00245B80
+#define I40E_PFPM_PROXYS_EX_SHIFT 1
+#define I40E_PFPM_PROXYS_EX_MASK (0x1 << I40E_PFPM_PROXYS_EX_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_SHIFT 4
+#define I40E_PFPM_PROXYS_ARP_MASK (0x1 << I40E_PFPM_PROXYS_ARP_SHIFT)
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT 5
+#define I40E_PFPM_PROXYS_ARP_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_ARP_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_NS_SHIFT 9
+#define I40E_PFPM_PROXYS_NS_MASK (0x1 << I40E_PFPM_PROXYS_NS_SHIFT)
+#define I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT 10
+#define I40E_PFPM_PROXYS_NS_DIRECTED_MASK (0x1 << I40E_PFPM_PROXYS_NS_DIRECTED_SHIFT)
+#define I40E_PFPM_PROXYS_MLD_SHIFT 12
+#define I40E_PFPM_PROXYS_MLD_MASK (0x1 << I40E_PFPM_PROXYS_MLD_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUFC_LNKC_SHIFT 0
+#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT 1
+#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT 3
+#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT 16
+#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT 17
+#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT 18
+#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT 19
+#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT 20
+#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT 21
+#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT 22
+#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT 23
+#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUS_LNKC_SHIFT 0
+#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT 1
+#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT 3
+#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT 16
+#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT 17
+#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT 18
+#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT 19
+#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT 20
+#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT 21
+#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT 22
+#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT 23
+#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_MAX_INDEX 3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
+#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT 31
+#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAL_MAX_INDEX 3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
+#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT 31
+#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ITR01_MAX_INDEX 2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITRN1_MAX_INDEX 2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_MAX_INDEX 15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QTX_TAIL1_MAX_INDEX 15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MAX_INDEX 16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MAX_INDEX 16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MAX_INDEX 16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT 17
+#define I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEUPPERIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFQF_HENA_MAX_INDEX 1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HKEY_MAX_INDEX 12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HLUT_MAX_INDEX 15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HREGION_MAX_INDEX 7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
new file mode 100644
index 00000000000..5e5bcddac57
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+ I40E_SUCCESS = 0,
+ I40E_ERR_NVM = -1,
+ I40E_ERR_NVM_CHECKSUM = -2,
+ I40E_ERR_PHY = -3,
+ I40E_ERR_CONFIG = -4,
+ I40E_ERR_PARAM = -5,
+ I40E_ERR_MAC_TYPE = -6,
+ I40E_ERR_UNKNOWN_PHY = -7,
+ I40E_ERR_LINK_SETUP = -8,
+ I40E_ERR_ADAPTER_STOPPED = -9,
+ I40E_ERR_INVALID_MAC_ADDR = -10,
+ I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
+ I40E_ERR_MASTER_REQUESTS_PENDING = -12,
+ I40E_ERR_INVALID_LINK_SETTINGS = -13,
+ I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
+ I40E_ERR_RESET_FAILED = -15,
+ I40E_ERR_SWFW_SYNC = -16,
+ I40E_ERR_NO_AVAILABLE_VSI = -17,
+ I40E_ERR_NO_MEMORY = -18,
+ I40E_ERR_BAD_PTR = -19,
+ I40E_ERR_RING_FULL = -20,
+ I40E_ERR_INVALID_PD_ID = -21,
+ I40E_ERR_INVALID_QP_ID = -22,
+ I40E_ERR_INVALID_CQ_ID = -23,
+ I40E_ERR_INVALID_CEQ_ID = -24,
+ I40E_ERR_INVALID_AEQ_ID = -25,
+ I40E_ERR_INVALID_SIZE = -26,
+ I40E_ERR_INVALID_ARP_INDEX = -27,
+ I40E_ERR_INVALID_FPM_FUNC_ID = -28,
+ I40E_ERR_QP_INVALID_MSG_SIZE = -29,
+ I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
+ I40E_ERR_INVALID_FRAG_COUNT = -31,
+ I40E_ERR_QUEUE_EMPTY = -32,
+ I40E_ERR_INVALID_ALIGNMENT = -33,
+ I40E_ERR_FLUSHED_QUEUE = -34,
+ I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
+ I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
+ I40E_ERR_TIMEOUT = -37,
+ I40E_ERR_OPCODE_MISMATCH = -38,
+ I40E_ERR_CQP_COMPL_ERROR = -39,
+ I40E_ERR_INVALID_VF_ID = -40,
+ I40E_ERR_INVALID_HMCFN_ID = -41,
+ I40E_ERR_BACKING_PAGE_ERROR = -42,
+ I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
+ I40E_ERR_INVALID_PBLE_INDEX = -44,
+ I40E_ERR_INVALID_SD_INDEX = -45,
+ I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
+ I40E_ERR_INVALID_SD_TYPE = -47,
+ I40E_ERR_MEMCPY_FAILED = -48,
+ I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
+ I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
+ I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
+ I40E_ERR_SRQ_ENABLED = -52,
+ I40E_ERR_ADMIN_QUEUE_ERROR = -53,
+ I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
+ I40E_ERR_BUF_TOO_SHORT = -55,
+ I40E_ERR_ADMIN_QUEUE_FULL = -56,
+ I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
+ I40E_ERR_BAD_IWARP_CQE = -58,
+ I40E_ERR_NVM_BLANK_MODE = -59,
+ I40E_ERR_NOT_IMPLEMENTED = -60,
+ I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
+ I40E_ERR_DIAG_TEST_FAILED = -62,
+ I40E_ERR_NOT_READY = -63,
+ I40E_NOT_SUPPORTED = -64,
+ I40E_ERR_FIRMWARE_API_VERSION = -65,
+};
+
+#endif /* _I40E_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
new file mode 100644
index 00000000000..49d2cfa9b0c
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -0,0 +1,1817 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
+{
+ return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
+ ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
+ ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
+ ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
+ ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
+}
+
+/**
+ * i40e_program_fdir_filter - Program a Flow Director filter
+ * @fdir_input: Packet data that will be filter parameters
+ * @pf: The pf pointer
+ * @add: True for add/update, False for remove
+ **/
+int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
+ struct i40e_pf *pf, bool add)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ struct i40e_ring *tx_ring;
+ struct i40e_vsi *vsi;
+ struct device *dev;
+ dma_addr_t dma;
+ u32 td_cmd = 0;
+ u16 i;
+
+ /* find existing FDIR VSI */
+ vsi = NULL;
+ for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
+ vsi = pf->vsi[i];
+ if (!vsi)
+ return -ENOENT;
+
+ tx_ring = &vsi->tx_rings[0];
+ dev = tx_ring->dev;
+
+ dma = dma_map_single(dev, fdir_data->raw_packet,
+ I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_fail;
+
+ /* grab the next descriptor */
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
+ << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+ & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+
+ fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
+ << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+ & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+
+ fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
+ << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+ & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+
+ /* Use LAN VSI Id if not programmed by user */
+ if (fdir_data->dest_vsi == 0)
+ fdir_desc->qindex_flex_ptype_vsi |=
+ cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
+ << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+ else
+ fdir_desc->qindex_flex_ptype_vsi |=
+ cpu_to_le32((fdir_data->dest_vsi
+ << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+ & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+
+ fdir_desc->dtype_cmd_cntindex =
+ cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+
+ if (add)
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
+ << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+ else
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
+ << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
+ << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+ & I40E_TXD_FLTR_QW1_DEST_MASK);
+
+ fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
+ (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+ & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+
+ if (fdir_data->cnt_index != 0) {
+ fdir_desc->dtype_cmd_cntindex |=
+ cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
+ fdir_desc->dtype_cmd_cntindex |=
+ cpu_to_le32((fdir_data->cnt_index
+ << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+ & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+ }
+
+ fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
+
+ /* Now program a dummy descriptor */
+ tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
+ tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ td_cmd = I40E_TX_DESC_CMD_EOP |
+ I40E_TX_DESC_CMD_RS |
+ I40E_TX_DESC_CMD_DUMMY;
+
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
+
+ /* Mark the data descriptor to be watched */
+ tx_buf->next_to_watch = tx_desc;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(tx_ring->next_to_use, tx_ring->tail);
+ return 0;
+
+dma_fail:
+ return -1;
+}
+
+/**
+ * i40e_fd_handle_status - check the Programming Status for FD
+ * @rx_ring: the Rx ring for this descriptor
+ * @qw: the descriptor data
+ * @prog_id: the id originally used for programming
+ *
+ * This is used to verify if the FD programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
+{
+ struct pci_dev *pdev = rx_ring->vsi->back->pdev;
+ u32 error;
+
+ error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+ /* for now just print the Status */
+ dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n",
+ prog_id, error);
+}
+
+/**
+ * i40e_unmap_tx_resource - Release a Tx buffer
+ * @ring: the ring that owns the buffer
+ * @tx_buffer: the buffer to free
+ **/
+static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
+ struct i40e_tx_buffer *tx_buffer)
+{
+ if (tx_buffer->dma) {
+ if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
+ dma_unmap_page(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(ring->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
+ }
+ tx_buffer->dma = 0;
+ tx_buffer->time_stamp = 0;
+}
+
+/**
+ * i40e_clean_tx_ring - Free any empty Tx buffers
+ * @tx_ring: ring to be cleaned
+ **/
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
+{
+ struct i40e_tx_buffer *tx_buffer;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!tx_ring->tx_bi)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer = &tx_ring->tx_bi[i];
+ i40e_unmap_tx_resource(tx_ring, tx_buffer);
+ if (tx_buffer->skb)
+ dev_kfree_skb_any(tx_buffer->skb);
+ tx_buffer->skb = NULL;
+ }
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * i40e_free_tx_resources - Free Tx resources per queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void i40e_free_tx_resources(struct i40e_ring *tx_ring)
+{
+ i40e_clean_tx_ring(tx_ring);
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+
+ if (tx_ring->desc) {
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_get_tx_pending - how many tx descriptors not processed
+ * @tx_ring: the ring of descriptors
+ *
+ * Since there is no access to the ring head register
+ * in XL710, we need to use our local copies
+ **/
+static u32 i40e_get_tx_pending(struct i40e_ring *ring)
+{
+ u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
+ ? ring->next_to_use
+ : ring->next_to_use + ring->count);
+ return ntu - ring->next_to_clean;
+}
+
+/**
+ * i40e_check_tx_hang - Is there a hang in the Tx queue
+ * @tx_ring: the ring of descriptors
+ **/
+static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
+{
+ u32 tx_pending = i40e_get_tx_pending(tx_ring);
+ bool ret = false;
+
+ clear_check_for_tx_hang(tx_ring);
+
+ /* Check for a hung queue, but be thorough. This verifies
+ * that a transmit has been completed since the previous
+ * check AND there is at least one packet pending. The
+ * ARMED bit is set to indicate a potential hang. The
+ * bit is cleared if a pause frame is received to remove
+ * false hang detection due to PFC or 802.3x frames. By
+ * requiring this to fail twice we avoid races with
+ * PFC clearing the ARMED bit and conditions where we
+ * run the check_tx_hang logic with a transmit completion
+ * pending but without time to complete it yet.
+ */
+ if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+ tx_pending) {
+ /* make sure it is true for two checks in a row */
+ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
+ &tx_ring->state);
+ } else {
+ /* update completed stats and disarm the hang check */
+ tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+ clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_clean_tx_irq - Reclaim resources after transmit completes
+ * @tx_ring: tx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+{
+ u16 i = tx_ring->next_to_clean;
+ struct i40e_tx_buffer *tx_buf;
+ struct i40e_tx_desc *tx_desc;
+ unsigned int total_packets = 0;
+ unsigned int total_bytes = 0;
+
+ tx_buf = &tx_ring->tx_bi[i];
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+
+ for (; budget; budget--) {
+ struct i40e_tx_desc *eop_desc;
+
+ eop_desc = tx_buf->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->cmd_type_offset_bsz &
+ cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* count the packet as being completed */
+ tx_ring->tx_stats.completed++;
+ tx_buf->next_to_watch = NULL;
+ tx_buf->time_stamp = 0;
+
+ /* set memory barrier before eop_desc is verified */
+ rmb();
+
+ do {
+ i40e_unmap_tx_resource(tx_ring, tx_buf);
+
+ /* clear dtype status */
+ tx_desc->cmd_type_offset_bsz &=
+ ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+
+ if (likely(tx_desc == eop_desc)) {
+ eop_desc = NULL;
+
+ dev_kfree_skb_any(tx_buf->skb);
+ tx_buf->skb = NULL;
+
+ total_bytes += tx_buf->bytecount;
+ total_packets += tx_buf->gso_segs;
+ }
+
+ tx_buf++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buf = tx_ring->tx_bi;
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ }
+ } while (eop_desc);
+ }
+
+ tx_ring->next_to_clean = i;
+ tx_ring->tx_stats.bytes += total_bytes;
+ tx_ring->tx_stats.packets += total_packets;
+ tx_ring->q_vector->tx.total_bytes += total_bytes;
+ tx_ring->q_vector->tx.total_packets += total_packets;
+ if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
+ /* schedule immediate reset if we believe we hung */
+ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
+ " VSI <%d>\n"
+ " Tx Queue <%d>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n",
+ tx_ring->vsi->seid,
+ tx_ring->queue_index,
+ tx_ring->next_to_use, i);
+ dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->tx_bi[i].time_stamp, jiffies);
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ dev_info(tx_ring->dev,
+ "tx hang detected on queue %d, resetting adapter\n",
+ tx_ring->queue_index);
+
+ tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
+ }
+
+ return budget > 0;
+}
+
+/**
+ * i40e_set_new_dynamic_itr - Find new ITR level
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte counts during
+ * the last interrupt. The advantage of per interrupt computation
+ * is faster updates and more accurate ITR for the current traffic
+ * pattern. Constants in this function were computed based on
+ * theoretical maximum wire speed and thresholds were set based on
+ * testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+{
+ enum i40e_latency_range new_latency_range = rc->latency_range;
+ u32 new_itr = rc->itr;
+ int bytes_per_int;
+
+ if (rc->total_packets == 0 || !rc->itr)
+ return;
+
+ /* simple throttlerate management
+ * 0-10MB/s lowest (100000 ints/s)
+ * 10-20MB/s low (20000 ints/s)
+ * 20-1249MB/s bulk (8000 ints/s)
+ */
+ bytes_per_int = rc->total_bytes / rc->itr;
+ switch (rc->itr) {
+ case I40E_LOWEST_LATENCY:
+ if (bytes_per_int > 10)
+ new_latency_range = I40E_LOW_LATENCY;
+ break;
+ case I40E_LOW_LATENCY:
+ if (bytes_per_int > 20)
+ new_latency_range = I40E_BULK_LATENCY;
+ else if (bytes_per_int <= 10)
+ new_latency_range = I40E_LOWEST_LATENCY;
+ break;
+ case I40E_BULK_LATENCY:
+ if (bytes_per_int <= 20)
+ rc->latency_range = I40E_LOW_LATENCY;
+ break;
+ }
+
+ switch (new_latency_range) {
+ case I40E_LOWEST_LATENCY:
+ new_itr = I40E_ITR_100K;
+ break;
+ case I40E_LOW_LATENCY:
+ new_itr = I40E_ITR_20K;
+ break;
+ case I40E_BULK_LATENCY:
+ new_itr = I40E_ITR_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_itr != rc->itr) {
+ /* do an exponential smoothing */
+ new_itr = (10 * new_itr * rc->itr) /
+ ((9 * new_itr) + rc->itr);
+ rc->itr = new_itr & I40E_MAX_ITR;
+ }
+
+ rc->total_bytes = 0;
+ rc->total_packets = 0;
+}
+
+/**
+ * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
+ * @q_vector: the vector to adjust
+ **/
+static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
+{
+ u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
+ struct i40e_hw *hw = &q_vector->vsi->back->hw;
+ u32 reg_addr;
+ u16 old_itr;
+
+ reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
+ old_itr = q_vector->rx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->rx);
+ if (old_itr != q_vector->rx.itr)
+ wr32(hw, reg_addr, q_vector->rx.itr);
+
+ reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
+ old_itr = q_vector->tx.itr;
+ i40e_set_new_dynamic_itr(&q_vector->tx);
+ if (old_itr != q_vector->tx.itr)
+ wr32(hw, reg_addr, q_vector->tx.itr);
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_clean_programming_status - clean the programming status descriptor
+ * @rx_ring: the rx ring that has this descriptor
+ * @rx_desc: the rx descriptor written back by HW
+ *
+ * Flow director should handle FD_FILTER_STATUS to check its filter programming
+ * status being successful or not and take actions accordingly. FCoE should
+ * handle its context/filter programming/invalidation status and take actions.
+ *
+ **/
+static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ u64 qw;
+ u8 id;
+
+ qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
+
+ if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
+ i40e_fd_handle_status(rx_ring, qw, id);
+}
+
+/**
+ * i40e_setup_tx_descriptors - Allocate the Tx descriptors
+ * @tx_ring: the tx ring to set up
+ *
+ * Return 0 on success, negative on error
+ **/
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int bi_size;
+
+ if (!dev)
+ return -ENOMEM;
+
+ bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
+ tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!tx_ring->tx_bi)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_ring->size);
+ goto err;
+ }
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ return 0;
+
+err:
+ kfree(tx_ring->tx_bi);
+ tx_ring->tx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_clean_rx_ring - Free Rx buffers
+ * @rx_ring: ring to be cleaned
+ **/
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ struct i40e_rx_buffer *rx_bi;
+ unsigned long bi_size;
+ u16 i;
+
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_bi)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ rx_bi = &rx_ring->rx_bi[i];
+ if (rx_bi->dma) {
+ dma_unmap_single(dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+ if (rx_bi->skb) {
+ dev_kfree_skb(rx_bi->skb);
+ rx_bi->skb = NULL;
+ }
+ if (rx_bi->page) {
+ if (rx_bi->page_dma) {
+ dma_unmap_page(dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ __free_page(rx_bi->page);
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
+ }
+ }
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_bi, 0, bi_size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * i40e_free_rx_resources - Free Rx resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+{
+ i40e_clean_rx_ring(rx_ring);
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+
+ if (rx_ring->desc) {
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+ rx_ring->desc = NULL;
+ }
+}
+
+/**
+ * i40e_setup_rx_descriptors - Allocate Rx descriptors
+ * @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int bi_size;
+
+ bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
+ rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
+ ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
+ : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+
+ if (!rx_ring->desc) {
+ dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rx_ring->size);
+ goto err;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+err:
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * i40e_release_rx_desc - Store the new tail and head values
+ * @rx_ring: ring to bump
+ * @val: new head index
+ **/
+static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
+{
+ rx_ring->next_to_use = val;
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(val, rx_ring->tail);
+}
+
+/**
+ * i40e_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union i40e_rx_desc *rx_desc;
+ struct i40e_rx_buffer *bi;
+ struct sk_buff *skb;
+
+ /* do nothing if no valid netdev defined */
+ if (!rx_ring->netdev || !cleaned_count)
+ return;
+
+ while (cleaned_count--) {
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_bi[i];
+ skb = bi->skb;
+
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+ /* initialize queue mapping */
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
+ }
+
+ if (!bi->dma) {
+ bi->dma = dma_map_single(rx_ring->dev,
+ skb->data,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ if (!bi->page) {
+ bi->page = alloc_page(GFP_ATOMIC);
+ if (!bi->page) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(rx_ring->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_ring->dev,
+ bi->page_dma)) {
+ rx_ring->rx_stats.alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ rx_desc->read.hdr_addr = 0;
+ }
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i)
+ i40e_release_rx_desc(rx_ring, i);
+}
+
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u64 flags = vsi->back->flags;
+
+ if (vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ if (flags & I40E_FLAG_IN_NETPOLL)
+ netif_rx(skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+}
+
+/**
+ * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
+ * @vsi: the VSI we care about
+ * @skb: skb currently being received and modified
+ * @rx_status: status value of last descriptor in packet
+ * @rx_error: error value of last descriptor in packet
+ **/
+static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
+ struct sk_buff *skb,
+ u32 rx_status,
+ u32 rx_error)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum enabled and ip headers found? */
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* IP or L4 checksum error */
+ if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+ vsi->back->hw_csum_rx_error++;
+ return;
+ }
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/**
+ * i40e_rx_hash - returns the hash value from the Rx descriptor
+ * @ring: descriptor ring
+ * @rx_desc: specific descriptor
+ **/
+static inline u32 i40e_rx_hash(struct i40e_ring *ring,
+ union i40e_rx_desc *rx_desc)
+{
+ if (ring->netdev->features & NETIF_F_RXHASH) {
+ if ((le64_to_cpu(rx_desc->wb.qword1.status_error_len) >>
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+ I40E_RX_DESC_FLTSTAT_RSS_HASH)
+ return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
+ }
+ return 0;
+}
+
+/**
+ * i40e_clean_rx_irq - Reclaim resources after receive completes
+ * @rx_ring: rx ring to clean
+ * @budget: how many cleans we're allowed
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
+ u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ const int current_node = numa_node_id();
+ struct i40e_vsi *vsi = rx_ring->vsi;
+ u16 i = rx_ring->next_to_clean;
+ union i40e_rx_desc *rx_desc;
+ u32 rx_error, rx_status;
+ u64 qword;
+
+ rx_desc = I40E_RX_DESC(rx_ring, i);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+
+ while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
+ union i40e_rx_desc *next_rxd;
+ struct i40e_rx_buffer *rx_bi;
+ struct sk_buff *skb;
+ u16 vlan_tag;
+ if (i40e_rx_is_programming_status(qword)) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+ goto next_desc;
+ }
+ rx_bi = &rx_ring->rx_bi[i];
+ skb = rx_bi->skb;
+ prefetch(skb->data);
+
+ rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+ rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
+ >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+ rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK)
+ >> I40E_RXD_QW1_LENGTH_SPH_SHIFT;
+
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK)
+ >> I40E_RXD_QW1_ERROR_SHIFT;
+ rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+ rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+
+ rx_bi->skb = NULL;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * STATUS_DD bit is set
+ */
+ rmb();
+
+ /* Get the header and possibly the whole packet
+ * If this is an skb from previous receive dma will be 0
+ */
+ if (rx_bi->dma) {
+ u16 len;
+
+ if (rx_hbo)
+ len = I40E_RX_HDR_SIZE;
+ else if (rx_sph)
+ len = rx_header_len;
+ else if (rx_packet_len)
+ len = rx_packet_len; /* 1buf/no split found */
+ else
+ len = rx_header_len; /* split always mode */
+
+ skb_put(skb, len);
+ dma_unmap_single(rx_ring->dev,
+ rx_bi->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ rx_bi->dma = 0;
+ }
+
+ /* Get the rest of the data if this was a header split */
+ if (ring_is_ps_enabled(rx_ring) && rx_packet_len) {
+
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_bi->page,
+ rx_bi->page_offset,
+ rx_packet_len);
+
+ skb->len += rx_packet_len;
+ skb->data_len += rx_packet_len;
+ skb->truesize += rx_packet_len;
+
+ if ((page_count(rx_bi->page) == 1) &&
+ (page_to_nid(rx_bi->page) == current_node))
+ get_page(rx_bi->page);
+ else
+ rx_bi->page = NULL;
+
+ dma_unmap_page(rx_ring->dev,
+ rx_bi->page_dma,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ rx_bi->page_dma = 0;
+ }
+ I40E_RX_NEXT_DESC_PREFETCH(rx_ring, i, next_rxd);
+
+ if (unlikely(
+ !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+ struct i40e_rx_buffer *next_buffer;
+
+ next_buffer = &rx_ring->rx_bi[i];
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ rx_bi->skb = next_buffer->skb;
+ rx_bi->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ }
+ rx_ring->rx_stats.non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ dev_kfree_skb_any(skb);
+ goto next_desc;
+ }
+
+ skb->rxhash = i40e_rx_hash(rx_ring, rx_desc);
+ i40e_rx_checksum(vsi, skb, rx_status, rx_error);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+ ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
+ : 0;
+ i40e_receive_skb(rx_ring, skb, vlan_tag);
+
+ rx_ring->netdev->last_rx = jiffies;
+ budget--;
+next_desc:
+ rx_desc->wb.qword1.status_error_len = 0;
+ if (!budget)
+ break;
+
+ cleaned_count++;
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
+ >> I40E_RXD_QW1_STATUS_SHIFT;
+ }
+
+ rx_ring->next_to_clean = i;
+ rx_ring->rx_stats.packets += total_rx_packets;
+ rx_ring->rx_stats.bytes += total_rx_bytes;
+ rx_ring->q_vector->rx.total_packets += total_rx_packets;
+ rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+
+ if (cleaned_count)
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return budget > 0;
+}
+
+/**
+ * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ *
+ * Returns the amount of work done
+ **/
+int i40e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct i40e_q_vector *q_vector =
+ container_of(napi, struct i40e_q_vector, napi);
+ struct i40e_vsi *vsi = q_vector->vsi;
+ bool clean_complete = true;
+ int budget_per_ring;
+ int i;
+
+ if (test_bit(__I40E_DOWN, &vsi->state)) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ * Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+ for (i = 0; i < q_vector->num_ringpairs; i++) {
+ clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
+ vsi->work_limit);
+ clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
+ budget_per_ring);
+ }
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ /* Work is done so exit the polling mode and re-enable the interrupt */
+ napi_complete(napi);
+ if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
+ ITR_IS_DYNAMIC(vsi->tx_itr_setting))
+ i40e_update_dynamic_itr(q_vector);
+
+ if (!test_bit(__I40E_DOWN, &vsi->state)) {
+ if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+ i40e_irq_dynamic_enable(vsi,
+ q_vector->v_idx + vsi->base_vector);
+ } else {
+ struct i40e_hw *hw = &vsi->back->hw;
+ /* We re-enable the queue 0 cause, but
+ * don't worry about dynamic_enable
+ * because we left it on for the other
+ * possible interrupts during napi
+ */
+ u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
+ qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), qval);
+
+ qval = rd32(hw, I40E_QINT_TQCTL(0));
+ qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), qval);
+ i40e_flush(hw);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_atr - Add a Flow Director ATR filter
+ * @tx_ring: ring to add programming descriptor to
+ * @skb: send buffer
+ * @flags: send flags
+ * @protocol: wire protocol
+ **/
+static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 flags, __be16 protocol)
+{
+ struct i40e_filter_program_desc *fdir_desc;
+ struct i40e_pf *pf = tx_ring->vsi->back;
+ union {
+ unsigned char *network;
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ struct tcphdr *th;
+ unsigned int hlen;
+ u32 flex_ptype, dtype_cmd;
+
+ /* make sure ATR is enabled */
+ if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
+ return;
+
+ /* if sampling is disabled do nothing */
+ if (!tx_ring->atr_sample_rate)
+ return;
+
+ tx_ring->atr_count++;
+
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ if (protocol == htons(ETH_P_IP)) {
+ if (hdr.ipv4->protocol != IPPROTO_TCP)
+ return;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ return;
+
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return;
+ }
+
+ th = (struct tcphdr *)(hdr.network + hlen);
+
+ /* sample on all syn/fin packets or once every atr sample rate */
+ if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
+ return;
+
+ tx_ring->atr_count = 0;
+
+ /* grab the next descriptor */
+ fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW0_QINDEX_MASK;
+ flex_ptype |= (protocol == htons(ETH_P_IP)) ?
+ (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+ (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+ flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+ dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+ dtype_cmd |= th->fin ?
+ (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+ (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+ I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+ I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+ dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+ fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+ fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+}
+
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+/**
+ * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ * @flags: the tx flags to be set
+ *
+ * Checks the skb and set up correspondingly several generic transmit flags
+ * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
+ *
+ * Returns error code indicate the frame should be dropped upon error and the
+ * otherwise returns 0 to indicate the flags has been set properly.
+ **/
+static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
+{
+ __be16 protocol = skb->protocol;
+ u32 tx_flags = 0;
+
+ /* if we have a HW VLAN tag being added, default to the HW one */
+ if (vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ /* else if it is a SW VLAN, check the next protocol and store the tag */
+ } else if (protocol == __constant_htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vhdr, _vhdr;
+ vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
+ if (!vhdr)
+ return -EINVAL;
+
+ protocol = vhdr->h_vlan_encapsulated_proto;
+ tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= I40E_TX_FLAGS_SW_VLAN;
+ }
+
+ /* Insert 802.1p priority into VLAN header */
+ if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
+ ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
+ (skb->priority != TC_PRIO_CONTROL))) {
+ tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
+ tx_flags |= (skb->priority & 0x7) <<
+ I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
+ if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ if (skb_header_cloned(skb) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return -ENOMEM;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(tx_flags >>
+ I40E_TX_FLAGS_VLAN_SHIFT);
+ } else {
+ tx_flags |= I40E_TX_FLAGS_HW_VLAN;
+ }
+ }
+ *flags = tx_flags;
+ return 0;
+}
+
+/**
+ * i40e_tx_csum - is checksum offload requested
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ *
+ * Returns true if checksum offload is requested
+ **/
+static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol)
+{
+ if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
+ !(tx_flags & I40E_TX_FLAGS_TXSW)) {
+ if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
+ return false;
+ }
+
+ return skb->ip_summed == CHECKSUM_PARTIAL;
+}
+
+/**
+ * i40e_tso - set up the tso context descriptor
+ * @tx_ring: ptr to the ring to send
+ * @skb: ptr to the skb we're sending
+ * @tx_flags: the collected send information
+ * @protocol: the send protocol
+ * @hdr_len: ptr to the size of the packet header
+ * @cd_tunneling: ptr to context descriptor bits
+ *
+ * Returns 0 if no TSO can happen, 1 if tso is going, or error
+ **/
+static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len,
+ u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+{
+ u32 cd_cmd, cd_tso_len, cd_mss;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u32 l4len;
+ int err;
+ struct ipv6hdr *ipv6h;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ } else if (skb_is_gso_v6(skb)) {
+
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
+ : ipv6_hdr(skb);
+ tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
+ ipv6h->payload_len = 0;
+ tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
+ *hdr_len = (skb->encapsulation
+ ? (skb_inner_transport_header(skb) - skb->data)
+ : skb_transport_offset(skb)) + l4len;
+
+ /* find the field values */
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = skb->len - *hdr_len;
+ cd_mss = skb_shinfo(skb)->gso_size;
+ *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT)
+ | ((u64)cd_tso_len
+ << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+ | ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+ return 1;
+}
+
+/**
+ * i40e_tx_enable_csum - Enable Tx checksum offloads
+ * @skb: send buffer
+ * @tx_flags: Tx flags currently set
+ * @td_cmd: Tx descriptor command bits to set
+ * @td_offset: Tx descriptor header offsets to set
+ * @cd_tunneling: ptr to context desc bits
+ **/
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+ u32 *td_cmd, u32 *td_offset,
+ struct i40e_ring *tx_ring,
+ u32 *cd_tunneling)
+{
+ struct ipv6hdr *this_ipv6_hdr;
+ unsigned int this_tcp_hdrlen;
+ struct iphdr *this_ip_hdr;
+ u32 network_hdr_len;
+ u8 l4_hdr = 0;
+
+ if (skb->encapsulation) {
+ network_hdr_len = skb_inner_network_header_len(skb);
+ this_ip_hdr = inner_ip_hdr(skb);
+ this_ipv6_hdr = inner_ipv6_hdr(skb);
+ this_tcp_hdrlen = inner_tcp_hdrlen(skb);
+
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ ip_hdr(skb)->check = 0;
+ } else {
+ *cd_tunneling |=
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ }
+ }
+
+ /* Now set the ctx descriptor fields */
+ *cd_tunneling |= (skb_network_header_len(skb) >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
+ I40E_TXD_CTX_UDP_TUNNELING |
+ ((skb_inner_network_offset(skb) -
+ skb_transport_offset(skb)) >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+
+ } else {
+ network_hdr_len = skb_network_header_len(skb);
+ this_ip_hdr = ip_hdr(skb);
+ this_ipv6_hdr = ipv6_hdr(skb);
+ this_tcp_hdrlen = tcp_hdrlen(skb);
+ }
+
+ /* Enable IP checksum offloads */
+ if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ l4_hdr = this_ip_hdr->protocol;
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (tx_flags & I40E_TX_FLAGS_TSO) {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ this_ip_hdr->check = 0;
+ } else {
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+ }
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ l4_hdr = this_ipv6_hdr->nexthdr;
+ *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+ /* Now set the td_offset for IP header length */
+ *td_offset = (network_hdr_len >> 2) <<
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+ /* words in MACLEN + dwords in IPLEN + dwords in L4Len */
+ *td_offset |= (skb_network_offset(skb) >> 1) <<
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (this_tcp_hdrlen >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+ *td_offset |= (sizeof(struct sctphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+ *td_offset |= (sizeof(struct udphdr) >> 2) <<
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_create_tx_ctx Build the Tx context descriptor
+ * @tx_ring: ring to create the descriptor on
+ * @cd_type_cmd_tso_mss: Quad Word 1
+ * @cd_tunneling: Quad Word 0 - bits 0-31
+ * @cd_l2tag2: Quad Word 0 - bits 32-63
+ **/
+static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
+ const u64 cd_type_cmd_tso_mss,
+ const u32 cd_tunneling, const u32 cd_l2tag2)
+{
+ struct i40e_tx_context_desc *context_desc;
+
+ if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
+ return;
+
+ /* grab the next descriptor */
+ context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+
+ /* cpu_to_le32 and assign to struct fields */
+ context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
+ context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+ context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+}
+
+/**
+ * i40e_tx_map - Build the Tx descriptor
+ * @tx_ring: ring to send buffer on
+ * @skb: send buffer
+ * @first: first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len: size of the packet header
+ * @td_cmd: the command field in the descriptor
+ * @td_offset: offset for checksum or crc
+ **/
+static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int data_len = skb->data_len;
+ unsigned int size = skb_headlen(skb);
+ struct device *dev = tx_ring->dev;
+ u32 paylen = skb->len - hdr_len;
+ u16 i = tx_ring->next_to_use;
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_tx_desc *tx_desc;
+ u32 buf_offset = 0;
+ u32 td_tag = 0;
+ dma_addr_t dma;
+ u16 gso_segs;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
+ td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+ td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
+ I40E_TX_FLAGS_VLAN_SHIFT;
+ }
+
+ tx_desc = I40E_TX_DESC(tx_ring, i);
+ for (;;) {
+ while (size > I40E_MAX_DATA_PER_TXD) {
+ tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(td_cmd, td_offset,
+ I40E_MAX_DATA_PER_TXD, td_tag);
+
+ buf_offset += I40E_MAX_DATA_PER_TXD;
+ size -= I40E_MAX_DATA_PER_TXD;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ }
+
+ tx_bi = &tx_ring->tx_bi[i];
+ tx_bi->length = buf_offset + size;
+ tx_bi->tx_flags = tx_flags;
+ tx_bi->dma = dma;
+
+ tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+ tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+ size, td_tag);
+
+ if (likely(!data_len))
+ break;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+ buf_offset = 0;
+ tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ tx_desc++;
+ i++;
+ if (i == tx_ring->count) {
+ tx_desc = I40E_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+
+ frag++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+ gso_segs = skb_shinfo(skb)->gso_segs;
+ else
+ gso_segs = 1;
+
+ /* multiply data chunks by size of headers */
+ tx_bi->bytecount = paylen + (gso_segs * hdr_len);
+ tx_bi->gso_segs = gso_segs;
+ tx_bi->skb = skb;
+
+ /* set the timestamp and next to watch values */
+ first->time_stamp = jiffies;
+ first->next_to_watch = tx_desc;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(i, tx_ring->tail);
+ return;
+
+dma_error:
+ dev_info(dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_bi map */
+ for (;;) {
+ tx_bi = &tx_ring->tx_bi[i];
+ i40e_unmap_tx_resource(tx_ring, tx_bi);
+ if (tx_bi == first)
+ break;
+ if (i == 0)
+ i = tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ tx_ring->next_to_use = i;
+}
+
+/**
+ * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40e_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40e_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns number of data descriptors needed for this skb. Returns 0 to indicate
+ * there is not enough descriptors available in this ring since we need at least
+ * one descriptor.
+ **/
+static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ unsigned int f;
+#endif
+ int count = 0;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ if (i40e_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return 0;
+ }
+ return count;
+}
+
+/**
+ * i40e_xmit_frame_ring - Sends buffer on Tx ring
+ * @skb: send buffer
+ * @tx_ring: ring to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
+{
+ u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
+ u32 cd_tunneling = 0, cd_l2tag2 = 0;
+ struct i40e_tx_buffer *first;
+ u32 td_offset = 0;
+ u32 tx_flags = 0;
+ __be16 protocol;
+ u32 td_cmd = 0;
+ u8 hdr_len = 0;
+ int tso;
+ if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ /* prepare the xmit flags */
+ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ goto out_drop;
+
+ /* obtain protocol of skb */
+ protocol = skb->protocol;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+ /* setup IPv4/IPv6 offloads */
+ if (protocol == __constant_htons(ETH_P_IP))
+ tx_flags |= I40E_TX_FLAGS_IPV4;
+ else if (protocol == __constant_htons(ETH_P_IPV6))
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+
+ tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ &cd_type_cmd_tso_mss, &cd_tunneling);
+
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
+ tx_flags |= I40E_TX_FLAGS_TSO;
+
+ skb_tx_timestamp(skb);
+
+ /* Always offload the checksum, since it's in the data descriptor */
+ if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+ tx_flags |= I40E_TX_FLAGS_CSUM;
+
+ /* always enable offload insertion */
+ td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
+ if (tx_flags & I40E_TX_FLAGS_CSUM)
+ i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ tx_ring, &cd_tunneling);
+
+ i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
+ cd_tunneling, cd_l2tag2);
+
+ /* Add Flow Director ATR if it's enabled.
+ *
+ * NOTE: this must always be directly before the data descriptor.
+ */
+ i40e_atr(tx_ring, skb, tx_flags, protocol);
+
+ i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
+
+ i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ **/
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (unlikely(skb->len < I40E_MIN_TX_LEN)) {
+ if (skb_pad(skb, I40E_MIN_TX_LEN - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = I40E_MIN_TX_LEN;
+ skb_set_tail_pointer(skb, I40E_MIN_TX_LEN);
+ }
+
+ return i40e_xmit_frame_ring(skb, tx_ring);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
new file mode 100644
index 00000000000..b1d7722d98a
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -0,0 +1,259 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+
+#define I40E_MAX_ITR 0x07FF
+#define I40E_MIN_ITR 0x0001
+#define I40E_ITR_USEC_RESOLUTION 2
+#define I40E_MAX_IRATE 0x03F
+#define I40E_MIN_IRATE 0x001
+#define I40E_IRATE_USEC_RESOLUTION 4
+#define I40E_ITR_100K 0x0005
+#define I40E_ITR_20K 0x0019
+#define I40E_ITR_8K 0x003E
+#define I40E_ITR_4K 0x007A
+#define I40E_ITR_RX_DEF I40E_ITR_8K
+#define I40E_ITR_TX_DEF I40E_ITR_4K
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
+#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
+#define I40E_DEFAULT_IRQ_WORK 256
+#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
+#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
+#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+#define I40E_QUEUE_END_OF_LIST 0x7FF
+
+#define I40E_ITR_NONE 3
+#define I40E_RX_ITR 0
+#define I40E_TX_ITR 1
+#define I40E_PE_ITR 2
+/* Supported Rx Buffer Sizes */
+#define I40E_RXBUFFER_512 512 /* Used for packet split */
+#define I40E_RXBUFFER_2048 2048
+#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
+#define I40E_RXBUFFER_4096 4096
+#define I40E_RXBUFFER_8192 8192
+#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
+
+/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
+ * this adds up to 512 bytes of extra data meaning the smallest allocation
+ * we could have is 1K.
+ * i.e. RXBUFFER_512 --> size-1024 slab
+ */
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_NEXT_DESC(r, i, n) \
+ do { \
+ (i)++; \
+ if ((i) == (r)->count) \
+ i = 0; \
+ (n) = I40E_RX_DESC((r), (i)); \
+ } while (0)
+
+#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
+ do { \
+ I40E_RX_NEXT_DESC((r), (i), (n)); \
+ prefetch((n)); \
+ } while (0)
+
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+#define I40E_MIN_TX_LEN 17
+#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
+#define I40E_TX_FLAGS_CSUM (u32)(1)
+#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
+#define I40E_TX_FLAGS_SW_VLAN (u32)(1 << 2)
+#define I40E_TX_FLAGS_TSO (u32)(1 << 3)
+#define I40E_TX_FLAGS_IPV4 (u32)(1 << 4)
+#define I40E_TX_FLAGS_IPV6 (u32)(1 << 5)
+#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
+#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
+#define I40E_TX_FLAGS_TXSW (u32)(1 << 8)
+#define I40E_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 9)
+#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
+#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define I40E_TX_FLAGS_VLAN_SHIFT 16
+
+struct i40e_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u32 tx_flags;
+ struct i40e_tx_desc *next_to_watch;
+ unsigned int bytecount;
+ u16 gso_segs;
+ u8 mapped_as_page;
+};
+
+struct i40e_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct i40e_tx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 completed;
+ u64 tx_done_old;
+};
+
+struct i40e_rx_queue_stats {
+ u64 packets;
+ u64 bytes;
+ u64 non_eop_descs;
+ u64 alloc_rx_page_failed;
+ u64 alloc_rx_buff_failed;
+};
+
+enum i40e_ring_state_t {
+ __I40E_TX_FDIR_INIT_DONE,
+ __I40E_TX_XPS_INIT_DONE,
+ __I40E_TX_DETECT_HANG,
+ __I40E_HANG_CHECK_ARMED,
+ __I40E_RX_PS_ENABLED,
+ __I40E_RX_LRO_ENABLED,
+ __I40E_RX_16BYTE_DESC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+ test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+ set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+ clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+ test_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_lro_enabled(ring) \
+ test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define set_ring_lro_enabled(ring) \
+ set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define clear_ring_lro_enabled(ring) \
+ clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
+#define ring_is_16byte_desc_enabled(ring) \
+ test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define set_ring_16byte_desc_enabled(ring) \
+ set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+#define clear_ring_16byte_desc_enabled(ring) \
+ clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+
+/* struct that defines a descriptor ring, associated with a VSI */
+struct i40e_ring {
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ struct net_device *netdev; /* netdev ring maps to */
+ union {
+ struct i40e_tx_buffer *tx_bi;
+ struct i40e_rx_buffer *rx_bi;
+ };
+ unsigned long state;
+ u16 queue_index; /* Queue number of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 __iomem *tail;
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 rx_hdr_len;
+ u16 rx_buf_len;
+ u8 dtype;
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 1
+#define I40E_RX_DTYPE_HEADER_SPLIT 2
+ u8 hsplit;
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
+
+ /* used in interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ u8 atr_sample_rate;
+ u8 atr_count;
+
+ bool ring_active; /* is ring online or not */
+
+ /* stats structs */
+ union {
+ struct i40e_tx_queue_stats tx_stats;
+ struct i40e_rx_queue_stats rx_stats;
+ };
+
+ unsigned int size; /* length of descriptor ring in bytes */
+ dma_addr_t dma; /* physical address of ring */
+
+ struct i40e_vsi *vsi; /* Backreference to associated VSI */
+ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+} ____cacheline_internodealigned_in_smp;
+
+enum i40e_latency_range {
+ I40E_LOWEST_LATENCY = 0,
+ I40E_LOW_LATENCY = 1,
+ I40E_BULK_LATENCY = 2,
+};
+
+struct i40e_ring_container {
+#define I40E_MAX_RINGPAIR_PER_VECTOR 8
+ /* array of pointers to rings */
+ struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 count;
+ enum i40e_latency_range latency_range;
+ u16 itr;
+};
+
+void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
+netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
+void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
+int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring);
+int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
+void i40e_free_tx_resources(struct i40e_ring *tx_ring);
+void i40e_free_rx_resources(struct i40e_ring *rx_ring);
+int i40e_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
new file mode 100644
index 00000000000..f3f22b20f02
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -0,0 +1,1154 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+
+/* Device IDs */
+#define I40E_SFP_XL710_DEVICE_ID 0x1572
+#define I40E_SFP_X710_DEVICE_ID 0x1573
+#define I40E_QEMU_DEVICE_ID 0x1574
+#define I40E_KX_A_DEVICE_ID 0x157F
+#define I40E_KX_B_DEVICE_ID 0x1580
+#define I40E_KX_C_DEVICE_ID 0x1581
+#define I40E_KX_D_DEVICE_ID 0x1582
+#define I40E_QSFP_A_DEVICE_ID 0x1583
+#define I40E_QSFP_B_DEVICE_ID 0x1584
+#define I40E_QSFP_C_DEVICE_ID 0x1585
+#define I40E_VF_DEVICE_ID 0x154C
+#define I40E_VF_HV_DEVICE_ID 0x1571
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0000
+
+#define I40E_MAX_VSI_QP 16
+#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_CHAINED_RX_BUFFERS 5
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT 18000
+
+/* Check whether address is multicast. This is little-endian specific check.*/
+#define I40E_IS_MULTICAST(address) \
+ (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address) \
+ ((((u8 *)(address))[0] == ((u8)0xff)) && \
+ (((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS 6
+
+/* Data type manipulation macros. */
+
+#define I40E_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE 0x0
+#define I40E_QTX_CTL_PF_QUEUE 0x2
+
+/* debug masks */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000, /* for i40e_debug() */
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000, /* for i40e_debug_aq() */
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with. This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed. For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+ I40E_MAC_UNKNOWN = 0,
+ I40E_MAC_X710,
+ I40E_MAC_XL710,
+ I40E_MAC_VF,
+ I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+ I40E_MEDIA_TYPE_UNKNOWN = 0,
+ I40E_MEDIA_TYPE_FIBER,
+ I40E_MEDIA_TYPE_BASET,
+ I40E_MEDIA_TYPE_BACKPLANE,
+ I40E_MEDIA_TYPE_CX4,
+ I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+ I40E_FC_NONE = 0,
+ I40E_FC_RX_PAUSE,
+ I40E_FC_TX_PAUSE,
+ I40E_FC_FULL,
+ I40E_FC_PFC,
+ I40E_FC_DEFAULT
+};
+
+enum i40e_vsi_type {
+ I40E_VSI_MAIN = 0,
+ I40E_VSI_VMDQ1,
+ I40E_VSI_VMDQ2,
+ I40E_VSI_CTRL,
+ I40E_VSI_FCOE,
+ I40E_VSI_MIRROR,
+ I40E_VSI_SRIOV,
+ I40E_VSI_FDIR,
+ I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+ I40E_QUEUE_TYPE_RX = 0,
+ I40E_QUEUE_TYPE_TX,
+ I40E_QUEUE_TYPE_PE_CEQ,
+ I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+ enum i40e_aq_phy_type phy_type;
+ enum i40e_aq_link_speed link_speed;
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ /* is Link Status Event notification to SW enabled */
+ bool lse_enable;
+};
+
+struct i40e_phy_info {
+ struct i40e_link_status link_info;
+ struct i40e_link_status link_info_old;
+ u32 autoneg_advertised;
+ u32 phy_id;
+ u32 module_type;
+ bool get_link_info;
+ enum i40e_media_type media_type;
+};
+
+#define I40E_HW_CAP_MAX_GPIO 30
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+ u32 switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB 0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
+
+ u32 management_mode;
+ u32 npar_enable;
+ u32 os2bmc;
+ u32 valid_functions;
+ bool sr_iov_1_1;
+ bool vmdq;
+ bool evb_802_1_qbg; /* Edge Virtual Bridging */
+ bool evb_802_1_qbh; /* Bridge Port Extension */
+ bool dcb;
+ bool fcoe;
+ bool mfp_mode_1;
+ bool mgmt_cem;
+ bool ieee_1588;
+ bool iwarp;
+ bool fd;
+ u32 fd_filters_guaranteed;
+ u32 fd_filters_best_effort;
+ bool rss;
+ u32 rss_table_size;
+ u32 rss_table_entry_width;
+ bool led[I40E_HW_CAP_MAX_GPIO];
+ bool sdp[I40E_HW_CAP_MAX_GPIO];
+ u32 nvm_image_type;
+ u32 num_flow_director_filters;
+ u32 num_vfs;
+ u32 vf_base_id;
+ u32 num_vsis;
+ u32 num_rx_qp;
+ u32 num_tx_qp;
+ u32 base_queue;
+ u32 num_msix_vectors;
+ u32 num_msix_vectors_vf;
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+ u32 mdio_port_num;
+ u32 mdio_port_mode;
+ u8 rx_buf_chain_len;
+ u32 enabled_tcmap;
+ u32 maxtc;
+};
+
+struct i40e_mac_info {
+ enum i40e_mac_type type;
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+ I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+ I40E_RESOURCE_READ = 1,
+ I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+ u64 hw_semaphore_timeout; /* 2usec global time (GTIME resolution) */
+ u64 hw_semaphore_wait; /* - || - */
+ u32 timeout; /* [ms] */
+ u16 sr_size; /* Shadow RAM size in words */
+ bool blank_nvm_mode; /* is NVM empty (no FW present)*/
+ u16 version; /* NVM package version */
+ u32 eetrack; /* NVM data version */
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+ i40e_bus_type_unknown = 0,
+ i40e_bus_type_pci,
+ i40e_bus_type_pcix,
+ i40e_bus_type_pci_express,
+ i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+ i40e_bus_speed_unknown = 0,
+ i40e_bus_speed_33 = 33,
+ i40e_bus_speed_66 = 66,
+ i40e_bus_speed_100 = 100,
+ i40e_bus_speed_120 = 120,
+ i40e_bus_speed_133 = 133,
+ i40e_bus_speed_2500 = 2500,
+ i40e_bus_speed_5000 = 5000,
+ i40e_bus_speed_8000 = 8000,
+ i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+ i40e_bus_width_unknown = 0,
+ i40e_bus_width_pcie_x1 = 1,
+ i40e_bus_width_pcie_x2 = 2,
+ i40e_bus_width_pcie_x4 = 4,
+ i40e_bus_width_pcie_x8 = 8,
+ i40e_bus_width_32 = 32,
+ i40e_bus_width_64 = 64,
+ i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+ enum i40e_bus_speed speed;
+ enum i40e_bus_width width;
+ enum i40e_bus_type type;
+
+ u16 func;
+ u16 device;
+ u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+ enum i40e_fc_mode current_mode; /* FC mode in effect */
+ enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS 8
+#define I40E_MAX_USER_PRIORITY 8
+#define I40E_DCBX_MAX_APPS 32
+#define I40E_LLDPDU_SIZE 1500
+
+/* IEEE 802.1Qaz ETS Configuration data */
+struct i40e_ieee_ets_config {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz ETS Recommendation data */
+struct i40e_ieee_ets_recommend {
+ u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+ u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* IEEE 802.1Qaz PFC Configuration data */
+struct i40e_ieee_pfc_config {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcenable;
+};
+
+/* IEEE 802.1Qaz Application Priority data */
+struct i40e_ieee_app_priority_table {
+ u8 priority;
+ u8 selector;
+ u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+ u32 numapps;
+ struct i40e_ieee_ets_config etscfg;
+ struct i40e_ieee_ets_recommend etsrec;
+ struct i40e_ieee_pfc_config pfc;
+ struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+ u8 __iomem *hw_addr;
+ void *back;
+
+ /* function pointer structs */
+ struct i40e_phy_info phy;
+ struct i40e_mac_info mac;
+ struct i40e_bus_info bus;
+ struct i40e_nvm_info nvm;
+ struct i40e_fc_info fc;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ u8 port;
+ bool adapter_stopped;
+
+ /* capabilities for entire device and PCI func */
+ struct i40e_hw_capabilities dev_caps;
+ struct i40e_hw_capabilities func_caps;
+
+ /* Flow Director shared filter space */
+ u16 fdir_shared_filter_count;
+
+ /* device profile info */
+ u8 pf_id;
+ u16 main_vsi_seid;
+
+ /* Closest numa node to the device */
+ u16 numa_node;
+
+ /* Admin Queue info */
+ struct i40e_adminq_info aq;
+
+ /* HMC info */
+ struct i40e_hmc_info hmc; /* HMC info struct */
+
+ /* LLDP/DCBX Status */
+ u16 dcbx_status;
+
+ /* DCBX info */
+ struct i40e_dcbx_config local_dcbx_config;
+ struct i40e_dcbx_config remote_dcbx_config;
+
+ /* debug mask */
+ u32 debug_mask;
+};
+
+struct i40e_driver_version {
+ u8 major_version;
+ u8 minor_version;
+ u8 build_version;
+ u8 subbuild_version;
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fd_id; /* Flow director filter id */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* ext status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ } wb; /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_buffer_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
+ } read;
+ struct {
+ struct {
+ struct {
+ union {
+ __le16 mirroring_status;
+ __le16 fcoe_ctx_id;
+ } mirr_fcoe;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ __le32 fcoe_param; /* FCoE DDP Context id */
+ } hi_dword;
+ } qword0;
+ struct {
+ /* status/error/pktype/length */
+ __le64 status_error_len;
+ } qword1;
+ struct {
+ __le16 ext_status; /* extended status */
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ union {
+ __le32 flex_bytes_lo;
+ __le32 pe_status;
+ } lo_dword;
+ union {
+ __le32 flex_bytes_hi;
+ __le32 fd_id;
+ } hi_dword;
+ } qword3;
+ } wb; /* writeback */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_STATUS_DD_SHIFT = 0,
+ I40E_RX_DESC_STATUS_EOF_SHIFT = 1,
+ I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2,
+ I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3,
+ I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
+ I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 3 BITS */
+ I40E_RX_DESC_STATUS_PIF_SHIFT = 8,
+ I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
+ I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
+ I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */
+ I40E_RX_DESC_STATUS_LPBK_SHIFT = 14
+};
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x7UL << \
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+ I40E_RX_DESC_FLTSTAT_NO_DATA = 0,
+ I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */
+ I40E_RX_DESC_FLTSTAT_RSV = 2,
+ I40E_RX_DESC_FLTSTAT_RSS_HASH = 3,
+};
+
+#define I40E_RXD_QW1_ERROR_SHIFT 19
+#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_ERROR_RXE_SHIFT = 0,
+ I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1,
+ I40E_RX_DESC_ERROR_HBO_SHIFT = 2,
+ I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */
+ I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
+ I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
+ I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+ I40E_RX_DESC_ERROR_L3L4E_NONE = 0,
+ I40E_RX_DESC_ERROR_L3L4E_PROT = 1,
+ I40E_RX_DESC_ERROR_L3L4E_FC = 2,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3,
+ I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT 30
+#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+ I40E_RX_PTYPE_L2_RESERVED = 0,
+ I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
+ I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
+ I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
+ I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
+ I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
+ I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
+ I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ I40E_RX_PTYPE_L2_ARP = 11,
+ I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
+ I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
+ I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
+ I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
+ I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
+ I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
+ I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21
+};
+
+struct i40e_rx_ptype_decoded {
+ u32 ptype:8;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:1;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+ I40E_RX_PTYPE_OUTER_L2 = 0,
+ I40E_RX_PTYPE_OUTER_IP = 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+ I40E_RX_PTYPE_OUTER_NONE = 0,
+ I40E_RX_PTYPE_OUTER_IPV4 = 0,
+ I40E_RX_PTYPE_OUTER_IPV6 = 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+ I40E_RX_PTYPE_NOT_FRAG = 0,
+ I40E_RX_PTYPE_FRAG = 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+ I40E_RX_PTYPE_TUNNEL_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+ I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
+ I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+ I40E_RX_PTYPE_INNER_PROT_NONE = 0,
+ I40E_RX_PTYPE_INNER_PROT_UDP = 1,
+ I40E_RX_PTYPE_INNER_PROT_TCP = 2,
+ I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
+ I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
+ I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+ I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \
+ I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK (0x1ULL << \
+ I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0,
+ I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
+ I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
+ I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
+ I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
+ I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
+ I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
+};
+
+enum i40e_rx_desc_pe_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */
+ I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */
+ I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */
+ I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24,
+ I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25,
+ I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26,
+ I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27,
+ I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28,
+ I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \
+ I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+ I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+ /* Note: These are predefined bit offsets */
+ I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
+ I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1,
+ I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
+ I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
+};
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+ I40E_TX_DESC_DTYPE_DATA = 0x0,
+ I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */
+ I40E_TX_DESC_DTYPE_CONTEXT = 0x1,
+ I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2,
+ I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8,
+ I40E_TX_DESC_DTYPE_DDP_CTX = 0x9,
+ I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC,
+ I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD,
+ I40E_TX_DESC_DTYPE_DESC_DONE = 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT 4
+#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+ I40E_TX_DESC_CMD_EOP = 0x0001,
+ I40E_TX_DESC_CMD_RS = 0x0002,
+ I40E_TX_DESC_CMD_ICRC = 0x0004,
+ I40E_TX_DESC_CMD_IL2TAG1 = 0x0008,
+ I40E_TX_DESC_CMD_DUMMY = 0x0010,
+ I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
+ I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
+ I40E_TX_DESC_CMD_FCOET = 0x0080,
+ I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */
+ I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT 16
+#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \
+ I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+ /* Note: These are predefined bit offsets */
+ I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */
+ I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \
+ I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT 48
+#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd;
+ __le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT 4
+#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+ I40E_TX_CTX_DESC_TSO = 0x01,
+ I40E_TX_CTX_DESC_TSYN = 0x02,
+ I40E_TX_CTX_DESC_IL2TAG2 = 0x04,
+ I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08,
+ I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ I40E_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ I40E_TX_CTX_DESC_SWPE = 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT 50
+#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \
+ I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT 50
+#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \
+ I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+ I40E_TX_CTX_EXT_IP_NONE = 0x0,
+ I40E_TX_CTX_EXT_IP_IPV6 = 0x1,
+ I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ I40E_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT 9
+#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK (0x1ULL << \
+ I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
+ I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+struct i40e_filter_program_desc {
+ __le32 qindex_flex_ptype_vsi;
+ __le32 rsvd;
+ __le32 dtype_cmd_cntindex;
+ __le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \
+ I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \
+ I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \
+ I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+ /* Note: Value 0-25 are reserved for future use */
+ I40E_FILTER_PCTYPE_IPV4_TEREDO_UDP = 26,
+ I40E_FILTER_PCTYPE_IPV6_TEREDO_UDP = 27,
+ I40E_FILTER_PCTYPE_NONF_IPV4_1588_UDP = 28,
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
+ I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
+ I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
+ /* Note: Value 37 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV6_1588_UDP = 38,
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
+ I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
+ I40E_FILTER_PCTYPE_FRAG_IPV6 = 46,
+ /* Note: Value 47 is reserved for future use */
+ I40E_FILTER_PCTYPE_FCOE_OX = 48,
+ I40E_FILTER_PCTYPE_FCOE_RX = 49,
+ /* Note: Value 50-62 are reserved for future use */
+ I40E_FILTER_PCTYPE_L2_PAYLOAD = 63,
+};
+
+enum i40e_filter_program_desc_dest {
+ I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1,
+ I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2,
+ I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4
+#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+ I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1,
+ I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
+ I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+ I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+ I40E_FLOW_DIRECTOR_FLTR = 0,
+ I40E_PE_QUAD_HASH_FLTR = 1,
+ I40E_ETHERTYPE_FLTR,
+ I40E_FCOE_CTX_FLTR,
+ I40E_MAC_VLAN_FLTR,
+ I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 vsi_number;
+ u16 vsis_allocated;
+ u16 vsis_unallocated;
+ u16 flags;
+ u8 pf_num;
+ u8 vf_num;
+ u8 connection_type;
+ struct i40e_aqc_vsi_properties_data info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+ u64 rx_bytes; /* gorc */
+ u64 rx_unicast; /* uprc */
+ u64 rx_multicast; /* mprc */
+ u64 rx_broadcast; /* bprc */
+ u64 rx_discards; /* rdpc */
+ u64 rx_errors; /* repc */
+ u64 rx_missed; /* rmpc */
+ u64 rx_unknown_protocol; /* rupp */
+ u64 tx_bytes; /* gotc */
+ u64 tx_unicast; /* uptc */
+ u64 tx_multicast; /* mptc */
+ u64 tx_broadcast; /* bptc */
+ u64 tx_discards; /* tdpc */
+ u64 tx_errors; /* tepc */
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+ /* eth stats collected by the port */
+ struct i40e_eth_stats eth;
+
+ /* additional port specific stats */
+ u64 tx_dropped_link_down; /* tdold */
+ u64 crc_errors; /* crcerrs */
+ u64 illegal_bytes; /* illerrc */
+ u64 error_bytes; /* errbc */
+ u64 mac_local_faults; /* mlfc */
+ u64 mac_remote_faults; /* mrfc */
+ u64 rx_length_errors; /* rlec */
+ u64 link_xon_rx; /* lxonrxc */
+ u64 link_xoff_rx; /* lxoffrxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 link_xon_tx; /* lxontxc */
+ u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
+ u64 rx_size_64; /* prc64 */
+ u64 rx_size_127; /* prc127 */
+ u64 rx_size_255; /* prc255 */
+ u64 rx_size_511; /* prc511 */
+ u64 rx_size_1023; /* prc1023 */
+ u64 rx_size_1522; /* prc1522 */
+ u64 rx_size_big; /* prc9522 */
+ u64 rx_undersize; /* ruc */
+ u64 rx_fragments; /* rfc */
+ u64 rx_oversize; /* roc */
+ u64 rx_jabber; /* rjc */
+ u64 tx_size_64; /* ptc64 */
+ u64 tx_size_127; /* ptc127 */
+ u64 tx_size_255; /* ptc255 */
+ u64 tx_size_511; /* ptc511 */
+ u64 tx_size_1023; /* ptc1023 */
+ u64 tx_size_1522; /* ptc1522 */
+ u64 tx_size_big; /* ptc9522 */
+ u64 mac_short_packet_dropped; /* mspdc */
+ u64 checksum_error; /* xec */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD 0x00
+#define I40E_SR_EMP_MODULE_PTR 0x0F
+#define I40E_SR_NVM_IMAGE_VERSION 0x18
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27
+#define I40E_SR_NVM_EETRACK_LO 0x2D
+#define I40E_SR_NVM_EETRACK_HI 0x2E
+#define I40E_SR_VPD_PTR 0x2F
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define I40E_SR_SW_CHECKSUM_WORD 0x3F
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE 1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06
+#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define I40E_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE 0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+
+enum i40e_switch_element_types {
+ I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
+ I40E_SWITCH_ELEMENT_TYPE_PF = 2,
+ I40E_SWITCH_ELEMENT_TYPE_VF = 3,
+ I40E_SWITCH_ELEMENT_TYPE_EMP = 4,
+ I40E_SWITCH_ELEMENT_TYPE_BMC = 6,
+ I40E_SWITCH_ELEMENT_TYPE_PE = 16,
+ I40E_SWITCH_ELEMENT_TYPE_VEB = 17,
+ I40E_SWITCH_ELEMENT_TYPE_PA = 18,
+ I40E_SWITCH_ELEMENT_TYPE_VSI = 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+ I40E_ETHER_TYPE_1588 = 0,
+ I40E_ETHER_TYPE_FIP = 1,
+ I40E_ETHER_TYPE_OUI_EXTENDED = 2,
+ I40E_ETHER_TYPE_MAC_CONTROL = 3,
+ I40E_ETHER_TYPE_LLDP = 4,
+ I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5,
+ I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6,
+ I40E_ETHER_TYPE_QCN_CNM = 7,
+ I40E_ETHER_TYPE_8021X = 8,
+ I40E_ETHER_TYPE_ARP = 9,
+ I40E_ETHER_TYPE_RSV1 = 10,
+ I40E_ETHER_TYPE_RSV2 = 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE 1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+ I40E_HASH_FILTER_SIZE_1K = 0,
+ I40E_HASH_FILTER_SIZE_2K = 1,
+ I40E_HASH_FILTER_SIZE_4K = 2,
+ I40E_HASH_FILTER_SIZE_8K = 3,
+ I40E_HASH_FILTER_SIZE_16K = 4,
+ I40E_HASH_FILTER_SIZE_32K = 5,
+ I40E_HASH_FILTER_SIZE_64K = 6,
+ I40E_HASH_FILTER_SIZE_128K = 7,
+ I40E_HASH_FILTER_SIZE_256K = 8,
+ I40E_HASH_FILTER_SIZE_512K = 9,
+ I40E_HASH_FILTER_SIZE_1M = 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE 512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+ I40E_DMA_CNTX_SIZE_512 = 0,
+ I40E_DMA_CNTX_SIZE_1K = 1,
+ I40E_DMA_CNTX_SIZE_2K = 2,
+ I40E_DMA_CNTX_SIZE_4K = 3,
+ I40E_DMA_CNTX_SIZE_8K = 4,
+ I40E_DMA_CNTX_SIZE_16K = 5,
+ I40E_DMA_CNTX_SIZE_32K = 6,
+ I40E_DMA_CNTX_SIZE_64K = 7,
+ I40E_DMA_CNTX_SIZE_128K = 8,
+ I40E_DMA_CNTX_SIZE_256K = 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+ I40E_HASH_LUT_SIZE_128 = 0,
+ I40E_HASH_LUT_SIZE_512 = 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+ /* number of PE Quad Hash filter buckets */
+ enum i40e_hash_filter_size pe_filt_num;
+ /* number of PE Quad Hash contexts */
+ enum i40e_dma_cntx_size pe_cntx_num;
+ /* number of FCoE filter buckets */
+ enum i40e_hash_filter_size fcoe_filt_num;
+ /* number of FCoE DDP contexts */
+ enum i40e_dma_cntx_size fcoe_cntx_num;
+ /* size of the Hash LUT */
+ enum i40e_hash_lut_size hash_lut_size;
+ /* enable FDIR filters for PF and its VFs */
+ bool enable_fdir;
+ /* enable Ethertype filters for PF and its VFs */
+ bool enable_ethtype;
+ /* enable MAC/VLAN filters for PF and its VFs */
+ bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+ u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */
+ u16 etype_used; /* Used perfect EtherType filters */
+ u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */
+ u16 etype_free; /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+ I40E_RESET_POR = 0,
+ I40E_RESET_CORER = 1,
+ I40E_RESET_GLOBR = 2,
+ I40E_RESET_EMPR = 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR 0xF
+struct i40e_lldp_variables {
+ u16 length;
+ u16 adminstatus;
+ u16 msgfasttx;
+ u16 msgtxinterval;
+ u16 txparams;
+ u16 timers;
+ u16 crc8;
+};
+
+#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
new file mode 100644
index 00000000000..cc6654f1dac
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -0,0 +1,368 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI. Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* VF sends req. to pf for the following
+ * ops.
+ */
+ I40E_VIRTCHNL_OP_UNKNOWN = 0,
+ I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+ I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_VIRTCHNL_OP_FCOE,
+/* PF sends status change events to vfs using
+ * the following op.
+ */
+ I40E_VIRTCHNL_OP_EVENT,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+ u8 pad[8]; /* AQ flags/opcode/len/retval fields */
+ enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+ i40e_status v_retval; /* ditto for desc->retval */
+ u32 vfid; /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR 1
+#define I40E_VIRTCHNL_VERSION_MINOR 0
+struct i40e_virtchnl_version_info {
+ u32 major;
+ u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * VF sends this request to PF with no parameters
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ enum i40e_vsi_type vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2 0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE 0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
+
+struct i40e_virtchnl_vf_resource {
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+
+ u32 vf_offload_flags;
+ u32 max_fcoe_contexts;
+ u32 max_fcoe_filters;
+
+ struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len; /* number of descriptors, multiple of 8 */
+ u16 headwb_enabled;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len; /* number of descriptors, multiple of 32 */
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u64 dma_ring_addr;
+ enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+ /* NOTE: vsi_id and queue_id should be identical for both queues. */
+ struct i40e_virtchnl_txq_info txq;
+ struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+ u16 num_vectors;
+ struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+ u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+ u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+ u16 vsi_id;
+ u16 num_elements;
+ struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+ u16 vsi_id;
+ u16 num_elements;
+ u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+ u16 vsi_id;
+ u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC 0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC 0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+ I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+ I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+ I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+ I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO 0
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM 255
+
+struct i40e_virtchnl_pf_event {
+ enum i40e_virtchnl_event_codes event;
+ union {
+ struct {
+ enum i40e_aq_link_speed link_speed;
+ bool link_status;
+ } link_event;
+ } event_data;
+
+ int severity;
+};
+
+/* The following are TBD, not necessary for LAN functionality.
+ * I40E_VIRTCHNL_OP_FCOE
+ */
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+ I40E_VFR_INPROGRESS = 0,
+ I40E_VFR_COMPLETED,
+ I40E_VFR_VFACTIVE,
+ I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
new file mode 100644
index 00000000000..8967e58e240
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -0,0 +1,2335 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include "i40e.h"
+
+/***********************misc routines*****************************/
+
+/**
+ * i40e_vc_isvalid_vsi_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vf relative vsi id
+ *
+ * check for the valid vsi id
+ **/
+static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u8 vsi_id)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return pf->vsi[vsi_id]->vf_id == vf->vf_id;
+}
+
+/**
+ * i40e_vc_isvalid_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_id: vsi id
+ * @qid: vsi relative queue id
+ *
+ * check for the valid queue id
+ **/
+static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u8 vsi_id,
+ u8 qid)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return qid < pf->vsi[vsi_id]->num_queue_pairs;
+}
+
+/**
+ * i40e_vc_isvalid_vector_id
+ * @vf: pointer to the vf info
+ * @vector_id: vf relative vector id
+ *
+ * check for the valid vector id
+ **/
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
+}
+
+/***********************vf resource mgmt routines*****************/
+
+/**
+ * i40e_vc_get_pf_queue_id
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue id
+ *
+ * return pf relative queue id
+ **/
+static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u8 vsi_idx,
+ u8 vsi_queue_id)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vsi_idx];
+ u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
+
+ if (le16_to_cpu(vsi->info.mapping_flags) &
+ I40E_AQ_VSI_QUE_MAP_NONCONTIG)
+ pf_queue_id =
+ le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
+ else
+ pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
+ vsi_queue_id;
+
+ return pf_queue_id;
+}
+
+/**
+ * i40e_ctrl_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ enum i40e_queue_ctrl ctrl)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool writeback = false;
+ u16 pf_queue_id;
+ int ret = 0;
+ u32 reg;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = rd32(hw, I40E_QTX_ENA(pf_queue_id));
+
+ switch (ctrl) {
+ case I40E_QUEUE_CTRL_ENABLE:
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_ENABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+ break;
+ case I40E_QUEUE_CTRL_DISABLE:
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_DISABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLE:
+ reg |= I40E_QTX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+ ret = (reg & I40E_QTX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ if (!ret) {
+ reg &= ~I40E_QTX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (writeback) {
+ wr32(hw, I40E_QTX_ENA(pf_queue_id), reg);
+ i40e_flush(hw);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_ctrl_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @ctrl: control flags
+ *
+ * enable/disable/enable check/disable check
+ **/
+static int i40e_ctrl_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ enum i40e_queue_ctrl ctrl)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool writeback = false;
+ u16 pf_queue_id;
+ int ret = 0;
+ u32 reg;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = rd32(hw, I40E_QRX_ENA(pf_queue_id));
+
+ switch (ctrl) {
+ case I40E_QUEUE_CTRL_ENABLE:
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_ENABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? 0 : -EPERM;
+ break;
+ case I40E_QUEUE_CTRL_DISABLE:
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_DISABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLE:
+ reg |= I40E_QRX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ break;
+ case I40E_QUEUE_CTRL_FASTDISABLECHECK:
+ ret = (reg & I40E_QRX_ENA_QENA_STAT_MASK) ? -EPERM : 0;
+ if (!ret) {
+ reg &= ~I40E_QRX_ENA_FAST_QDIS_MASK;
+ writeback = true;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (writeback) {
+ wr32(hw, I40E_QRX_ENA(pf_queue_id), reg);
+ i40e_flush(hw);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_config_irq_link_list
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vecmap: irq map info
+ *
+ * configure irq link list from the map
+ **/
+static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
+ struct i40e_virtchnl_vector_map *vecmap)
+{
+ unsigned long linklistmap = 0, tempmap;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u16 vsi_queue_id, pf_queue_id;
+ enum i40e_queue_type qtype;
+ u16 next_q, vector_id;
+ u32 reg, reg_idx;
+ u16 itr_idx = 0;
+
+ vector_id = vecmap->vector_id;
+ /* setup the head */
+ if (0 == vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+ * vf->vf_id) + (vector_id - 1));
+
+ if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
+ /* Special case - No queues mapped on this vector */
+ wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto irq_list_done;
+ }
+ tempmap = vecmap->rxq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ linklistmap |= (1 <<
+ (I40E_VIRTCHNL_SUPPORTED_QTYPES *
+ vsi_queue_id));
+ vsi_queue_id =
+ find_next_bit(&tempmap, I40E_MAX_VSI_QP, vsi_queue_id + 1);
+ }
+
+ tempmap = vecmap->txq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ linklistmap |= (1 <<
+ (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
+ + 1));
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ next_q = find_first_bit(&linklistmap,
+ (I40E_MAX_VSI_QP *
+ I40E_VIRTCHNL_SUPPORTED_QTYPES));
+ vsi_queue_id = next_q/I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q%I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+ reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
+
+ wr32(hw, reg_idx, reg);
+
+ while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(pf_queue_id);
+ itr_idx = vecmap->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(pf_queue_id);
+ itr_idx = vecmap->txitr_idx;
+ break;
+ default:
+ break;
+ }
+
+ next_q = find_next_bit(&linklistmap,
+ (I40E_MAX_VSI_QP *
+ I40E_VIRTCHNL_SUPPORTED_QTYPES),
+ next_q + 1);
+ if (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx,
+ vsi_queue_id);
+ } else {
+ pf_queue_id = I40E_QUEUE_END_OF_LIST;
+ qtype = 0;
+ }
+
+ /* format for the RQCTL & TQCTL regs is same */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ wr32(hw, reg_idx, reg);
+ }
+
+irq_list_done:
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_config_vsi_tx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure tx queue
+ **/
+static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ struct i40e_virtchnl_txq_info *info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_txq tx_ctx;
+ u16 pf_queue_id;
+ u32 qtx_ctl;
+ int ret = 0;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+ /* clear the context structure first */
+ memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
+
+ /* only set the required fields */
+ tx_ctx.base = info->dma_ring_addr / 128;
+ tx_ctx.qlen = info->ring_len;
+ tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
+ tx_ctx.rdylist_act = 0;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Tx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Tx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_context;
+ }
+
+ /* associate this queue with the PCI VF function */
+ qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
+ qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+ & I40E_QTX_CTL_PF_INDX_MASK);
+ qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
+ << I40E_QTX_CTL_VFVM_INDX_SHIFT)
+ & I40E_QTX_CTL_VFVM_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
+ i40e_flush(hw);
+
+error_context:
+ return ret;
+}
+
+/**
+ * i40e_config_vsi_rx_queue
+ * @vf: pointer to the vf info
+ * @vsi_idx: index of VSI in PF struct
+ * @vsi_queue_id: vsi relative queue index
+ * @info: config. info
+ *
+ * configure rx queue
+ **/
+static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
+ u16 vsi_queue_id,
+ struct i40e_virtchnl_rxq_info *info)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
+ u16 pf_queue_id;
+ int ret = 0;
+
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_idx, vsi_queue_id);
+
+ /* clear the context structure first */
+ memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+
+ /* only set the required fields */
+ rx_ctx.base = info->dma_ring_addr / 128;
+ rx_ctx.qlen = info->ring_len;
+
+ if (info->splithdr_enabled) {
+ rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
+ I40E_RX_SPLIT_IP |
+ I40E_RX_SPLIT_TCP_UDP |
+ I40E_RX_SPLIT_SCTP;
+ /* header length validation */
+ if (info->hdr_size > ((2 * 1024) - 64)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ /* set splitalways mode 10b */
+ rx_ctx.dtype = 0x2;
+ }
+
+ /* databuffer length validation */
+ if (info->databuffer_size > ((16 * 1024) - 128)) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+ /* max pkt. length validation */
+ if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
+ ret = -EINVAL;
+ goto error_param;
+ }
+ rx_ctx.rxmax = info->max_pkt_size;
+
+ /* enable 32bytes desc always */
+ rx_ctx.dsize = 1;
+
+ /* default values */
+ rx_ctx.tphrdesc_ena = 1;
+ rx_ctx.tphwdesc_ena = 1;
+ rx_ctx.tphdata_ena = 1;
+ rx_ctx.tphhead_ena = 1;
+ rx_ctx.lrxqthresh = 2;
+ rx_ctx.crcstrip = 1;
+
+ /* clear the context in the HMC */
+ ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to clear VF LAN Rx queue context %d, error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+ /* set the context in the HMC */
+ ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set VF LAN Rx queue context %d error: %d\n",
+ pf_queue_id, ret);
+ ret = -ENOENT;
+ goto error_param;
+ }
+
+error_param:
+ return ret;
+}
+
+/**
+ * i40e_alloc_vsi_res
+ * @vf: pointer to the vf info
+ * @type: type of VSI to allocate
+ *
+ * alloc vf vsi context & resources
+ **/
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+{
+ struct i40e_mac_filter *f = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vsi *vsi;
+ int ret = 0;
+
+ vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev,
+ "add vsi failed for vf %d, aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ ret = -ENOENT;
+ goto error_alloc_vsi_res;
+ }
+ if (type == I40E_VSI_SRIOV) {
+ vf->lan_vsi_index = vsi->idx;
+ vf->lan_vsi_id = vsi->id;
+ dev_info(&pf->pdev->dev,
+ "LAN VSI index %d, VSI id %d\n",
+ vsi->idx, vsi->id);
+ f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+ 0, true, false);
+ }
+ if (!f) {
+ dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
+ ret = -ENOMEM;
+ goto error_alloc_vsi_res;
+ }
+
+ /* program mac filter */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ goto error_alloc_vsi_res;
+ }
+
+ /* accept bcast pkts. by default */
+ ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "set vsi bcast failed for vf %d, vsi %d, aq_err %d\n",
+ vf->vf_id, vsi->idx, pf->hw.aq.asq_last_status);
+ ret = -EINVAL;
+ }
+
+error_alloc_vsi_res:
+ return ret;
+}
+
+/**
+ * i40e_reset_vf
+ * @vf: pointer to the vf structure
+ * @flr: VFLR was issued or not
+ *
+ * reset the vf
+ **/
+int i40e_reset_vf(struct i40e_vf *vf, bool flr)
+{
+ int ret = -ENOENT;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, msix_vf;
+ bool rsd = false;
+ u16 pf_queue_id;
+ int i, j;
+
+ /* warn the VF */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_INPROGRESS);
+
+ clear_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+ /* PF triggers VFR only when VF requests, in case of
+ * VFLR, HW triggers VFR
+ */
+ if (!flr) {
+ /* reset vf using VPGEN_VFRTRIG reg */
+ reg = I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+ }
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ for (i = 0; i < 4; i++) {
+ /* vf reset requires driver to first reset the
+ * vf & than poll the status register to make sure
+ * that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
+ rsd = true;
+ break;
+ }
+ }
+
+ if (!rsd)
+ dev_err(&pf->pdev->dev, "VF reset check timeout %d\n",
+ vf->vf_id);
+
+ /* fast disable qps */
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLE);
+ ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLE);
+ }
+
+ /* Queue enable/disable requires driver to
+ * first reset the vf & than poll the status register
+ * to make sure that the requested op was completed
+ * successfully
+ */
+ udelay(10);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ ret = i40e_ctrl_vsi_tx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Queue control check failed on Tx queue %d of VSI %d VF %d\n",
+ vf->lan_vsi_index, j, vf->vf_id);
+ ret = i40e_ctrl_vsi_rx_queue(vf, vf->lan_vsi_index, j,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK);
+ if (ret)
+ dev_info(&pf->pdev->dev,
+ "Queue control check failed on Rx queue %d of VSI %d VF %d\n",
+ vf->lan_vsi_index, j, vf->vf_id);
+ }
+
+ /* clear the irq settings */
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ wr32(hw, reg_idx, reg);
+ i40e_flush(hw);
+ }
+ /* disable interrupts so the VF starts in a known state */
+ for (i = 0; i < msix_vf; i++) {
+ /* format is same for both registers */
+ if (0 == i)
+ reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
+ else
+ reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
+ (vf->vf_id))
+ + (i - 1));
+ wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ i40e_flush(hw);
+ }
+
+ /* set the defaults for the rqctl & tqctl registers */
+ reg = (I40E_QINT_RQCTL_NEXTQ_INDX_MASK | I40E_QINT_RQCTL_ITR_INDX_MASK |
+ I40E_QINT_RQCTL_NEXTQ_TYPE_MASK);
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ pf_queue_id = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ wr32(hw, I40E_QINT_RQCTL(pf_queue_id), reg);
+ wr32(hw, I40E_QINT_TQCTL(pf_queue_id), reg);
+ }
+
+ /* clear the reset bit in the VPGEN_VFRTRIG reg */
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ /* tell the VF the reset is done */
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_COMPLETED);
+ i40e_flush(hw);
+
+ return ret;
+}
+
+/**
+ * i40e_enable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * enable vf mappings
+ **/
+static void i40e_enable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, total_queue_pairs = 0;
+ int j;
+
+ /* Tell the hardware we're using noncontiguous mapping. HW requires
+ * that VF queues be mapped using this method, even when they are
+ * contiguous in real life
+ */
+ wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* enable VF vplan_qtable mappings */
+ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
+
+ /* map PF queues to VF queues */
+ for (j = 0; j < pf->vsi[vf->lan_vsi_index]->num_queue_pairs; j++) {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index, j);
+ reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
+ total_queue_pairs++;
+ }
+
+ /* map PF queues to VSI */
+ for (j = 0; j < 7; j++) {
+ if (j * 2 >= pf->vsi[vf->lan_vsi_index]->num_queue_pairs) {
+ reg = 0x07FF07FF; /* unused */
+ } else {
+ u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ j * 2);
+ reg = qid;
+ qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_index,
+ (j * 2) + 1);
+ reg |= qid << 16;
+ }
+ wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg);
+ }
+
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_disable_vf_mappings
+ * @vf: pointer to the vf info
+ *
+ * disable vf mappings
+ **/
+static void i40e_disable_vf_mappings(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ /* disable qp mappings */
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
+ for (i = 0; i < I40E_MAX_VSI_QP; i++)
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
+ I40E_QUEUE_END_OF_LIST);
+ i40e_flush(hw);
+}
+
+/**
+ * i40e_free_vf_res
+ * @vf: pointer to the vf info
+ *
+ * free vf resources
+ **/
+static void i40e_free_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+
+ /* free vsi & disconnect it from the parent uplink */
+ if (vf->lan_vsi_index) {
+ i40e_vsi_release(pf->vsi[vf->lan_vsi_index]);
+ vf->lan_vsi_index = 0;
+ vf->lan_vsi_id = 0;
+ }
+ /* reset some of the state varibles keeping
+ * track of the resources
+ */
+ vf->num_queue_pairs = 0;
+ vf->vf_states = 0;
+}
+
+/**
+ * i40e_alloc_vf_res
+ * @vf: pointer to the vf info
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vf_res(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int total_queue_pairs = 0;
+ int ret;
+
+ /* allocate hw vsi context & associated resources */
+ ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+ if (ret)
+ goto error_alloc;
+ total_queue_pairs += pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ /* store the total qps number for the runtime
+ * vf req validation
+ */
+ vf->num_queue_pairs = total_queue_pairs;
+
+ /* vf is now completely initialized */
+ set_bit(I40E_VF_STAT_INIT, &vf->vf_states);
+
+error_alloc:
+ if (ret)
+ i40e_free_vf_res(vf);
+
+ return ret;
+}
+
+/**
+ * i40e_vfs_are_assigned
+ * @pf: pointer to the pf structure
+ *
+ * Determine if any VFs are assigned to VMs
+ **/
+static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
+{
+ struct pci_dev *pdev = pf->pdev;
+ struct pci_dev *vfdev;
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, I40E_VF_DEVICE_ID , NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && pci_physfn(vfdev) == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ I40E_VF_DEVICE_ID,
+ vfdev);
+ }
+
+ return false;
+}
+
+/**
+ * i40e_free_vfs
+ * @pf: pointer to the pf structure
+ *
+ * free vf resources
+ **/
+void i40e_free_vfs(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int i;
+
+ if (!pf->vf)
+ return;
+
+ /* Disable interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, I40E_PFINT_DYN_CTL0, 0);
+ i40e_flush(hw);
+
+ /* free up vf resources */
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
+ i40e_free_vf_res(&pf->vf[i]);
+ /* disable qp mappings */
+ i40e_disable_vf_mappings(&pf->vf[i]);
+ }
+
+ kfree(pf->vf);
+ pf->vf = NULL;
+ pf->num_alloc_vfs = 0;
+
+ if (!i40e_vfs_are_assigned(pf))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev,
+ "unable to disable SR-IOV because VFs are assigned.\n");
+
+ /* Re-enable interrupt 0. */
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
+ i40e_flush(hw);
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * i40e_alloc_vfs
+ * @pf: pointer to the pf structure
+ * @num_alloc_vfs: number of vfs to allocate
+ *
+ * allocate vf resources
+ **/
+static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+{
+ struct i40e_vf *vfs;
+ int i, ret = 0;
+
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "pci_enable_sriov failed with error %d!\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
+
+ /* allocate memory */
+ vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
+ if (!vfs) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* apply default profile */
+ for (i = 0; i < num_alloc_vfs; i++) {
+ vfs[i].pf = pf;
+ vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
+ vfs[i].vf_id = i;
+
+ /* assign default capabilities */
+ set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+
+ ret = i40e_alloc_vf_res(&vfs[i]);
+ i40e_reset_vf(&vfs[i], true);
+ if (ret)
+ break;
+
+ /* enable vf vplan_qtable mappings */
+ i40e_enable_vf_mappings(&vfs[i]);
+ }
+ pf->vf = vfs;
+ pf->num_alloc_vfs = num_alloc_vfs;
+
+err_alloc:
+ if (ret)
+ i40e_free_vfs(pf);
+err_iov:
+ return ret;
+}
+
+#endif
+/**
+ * i40e_pci_sriov_enable
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs
+ **/
+static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int pre_existing_vfs = pci_num_vf(pdev);
+ int err = 0;
+
+ dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ i40e_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ goto out;
+
+ if (num_vfs > pf->num_req_vfs) {
+ err = -EPERM;
+ goto err_out;
+ }
+
+ err = i40e_alloc_vfs(pf, num_vfs);
+ if (err) {
+ dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
+ goto err_out;
+ }
+
+out:
+ return num_vfs;
+
+err_out:
+ return err;
+#endif
+ return 0;
+}
+
+/**
+ * i40e_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ if (num_vfs)
+ return i40e_pci_sriov_enable(pdev, num_vfs);
+
+ i40e_free_vfs(pf);
+ return 0;
+}
+
+/***********************virtual channel routines******************/
+
+/**
+ * i40e_vc_send_msg_to_vf
+ * @vf: pointer to the vf info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to vf
+ **/
+static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ /* single place to detect unsuccessful return values */
+ if (v_retval) {
+ vf->num_invalid_msgs++;
+ dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n",
+ v_opcode, v_retval);
+ if (vf->num_invalid_msgs >
+ I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
+ dev_err(&pf->pdev->dev,
+ "Number of invalid messages exceeded for VF %d\n",
+ vf->vf_id);
+ dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
+ set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+ }
+ } else {
+ vf->num_valid_msgs++;
+ }
+
+ aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_vc_send_resp_to_vf
+ * @vf: pointer to the vf info
+ * @opcode: operation code
+ * @retval: return value
+ *
+ * send resp msg to vf
+ **/
+static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
+ enum i40e_virtchnl_ops opcode,
+ i40e_status retval)
+{
+ return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
+}
+
+/**
+ * i40e_vc_get_version_msg
+ * @vf: pointer to the vf info
+ *
+ * called from the vf to request the API version used by the PF
+ **/
+static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_version_info info = {
+ I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
+ };
+
+ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS, (u8 *)&info,
+ sizeof(struct
+ i40e_virtchnl_version_info));
+}
+
+/**
+ * i40e_vc_get_vf_resources_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to request its resources
+ **/
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_vf_resource *vfres = NULL;
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+ int i = 0, len = 0;
+ int num_vsis = 1;
+ int ret;
+
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ len = (sizeof(struct i40e_virtchnl_vf_resource) +
+ sizeof(struct i40e_virtchnl_vsi_resource) * num_vsis);
+
+ vfres = kzalloc(len, GFP_KERNEL);
+ if (!vfres) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!vsi->info.pvid)
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->num_vsis = num_vsis;
+ vfres->num_queue_pairs = vf->num_queue_pairs;
+ vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ if (vf->lan_vsi_index) {
+ vfres->vsi_res[i].vsi_id = vf->lan_vsi_index;
+ vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[i].num_queue_pairs =
+ pf->vsi[vf->lan_vsi_index]->num_queue_pairs;
+ memcpy(vfres->vsi_res[i].default_mac_addr,
+ vf->default_lan_addr.addr, ETH_ALEN);
+ i++;
+ }
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+
+err:
+ /* send the response back to the vf */
+ ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ aq_ret, (u8 *)vfres, len);
+
+ kfree(vfres);
+ return ret;
+}
+
+/**
+ * i40e_vc_reset_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to reset itself,
+ * unlike other virtchnl messages, pf driver
+ * doesn't send the response back to the vf
+ **/
+static int i40e_vc_reset_vf_msg(struct i40e_vf *vf)
+{
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
+ return -ENOENT;
+
+ return i40e_reset_vf(vf, false);
+}
+
+/**
+ * i40e_vc_config_promiscuous_mode_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the promiscuous mode of
+ * vf vsis
+ **/
+static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
+ u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_promisc_info *info =
+ (struct i40e_virtchnl_promisc_info *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ bool allmulti = false;
+ bool promisc = false;
+ i40e_status aq_ret;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
+ (pf->vsi[info->vsi_id]->type != I40E_VSI_FCOE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ promisc = true;
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
+ promisc, NULL);
+ if (aq_ret)
+ goto error_param;
+
+ if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
+ allmulti, NULL);
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the rx/tx
+ * queues
+ **/
+static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *qci =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ struct i40e_virtchnl_queue_pair_info *qpi;
+ u16 vsi_id, vsi_queue_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi_id = qci->vsi_id;
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ vsi_queue_id = qpi->txq.queue_id;
+ if ((qpi->txq.vsi_id != vsi_id) ||
+ (qpi->rxq.vsi_id != vsi_id) ||
+ (qpi->rxq.queue_id != vsi_queue_id) ||
+ !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->rxq) ||
+ i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
+ &qpi->txq)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_irq_map_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to configure the irq to
+ * queue map
+ **/
+static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_irq_map_info *irqmap_info =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+ struct i40e_virtchnl_vector_map *map;
+ u16 vsi_id, vsi_queue_id, vector_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < irqmap_info->num_vectors; i++) {
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* validate msg params */
+ if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ tempmap = map->rxq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ tempmap = map->txq_map;
+ vsi_queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (vsi_queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ vsi_queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi_queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ vsi_queue_id + 1);
+ }
+
+ i40e_config_irq_link_list(vf, vsi_id, map);
+ }
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_enable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to enable all or specific queue(s)
+ **/
+static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ u16 vsi_id = vqs->vsi_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ u16 queue_id;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ /* Poll the status register to make sure that the
+ * requested op was completed successfully
+ */
+ udelay(10);
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on RX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_ENABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on TX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_disable_queues_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to disable all or specific
+ * queue(s)
+ **/
+static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ u16 vsi_id = vqs->vsi_id;
+ i40e_status aq_ret = 0;
+ unsigned long tempmap;
+ u16 queue_id;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLE);
+
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ /* Poll the status register to make sure that the
+ * requested op was completed successfully
+ */
+ udelay(10);
+
+ tempmap = vqs->rx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_rx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on RX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+ tempmap = vqs->tx_queues;
+ queue_id = find_first_bit(&tempmap, I40E_MAX_VSI_QP);
+ while (queue_id < I40E_MAX_VSI_QP) {
+ if (i40e_ctrl_vsi_tx_queue(vf, vsi_id, queue_id,
+ I40E_QUEUE_CTRL_DISABLECHECK)) {
+ dev_err(&pf->pdev->dev,
+ "Queue control check failed on TX queue %d of VSI %d VF %d\n",
+ queue_id, vsi_id, vf->vf_id);
+ }
+ queue_id = find_next_bit(&tempmap, I40E_MAX_VSI_QP,
+ queue_id + 1);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_stats_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf to get vsi stats
+ **/
+static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_queue_select *vqs =
+ (struct i40e_virtchnl_queue_select *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_eth_stats stats;
+ i40e_status aq_ret = 0;
+ struct i40e_vsi *vsi;
+
+ memset(&stats, 0, sizeof(struct i40e_eth_stats));
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = pf->vsi[vqs->vsi_id];
+ if (!vsi) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ i40e_update_eth_stats(vsi);
+ memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
+
+error_param:
+ /* send the response back to the vf */
+ return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS, aq_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * i40e_vc_add_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * add guest mac address filter
+ **/
+static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_ether_addr_list *al =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = al->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ if (is_broadcast_ether_addr(al->list[i].addr) ||
+ is_zero_ether_addr(al->list[i].addr)) {
+ dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
+ al->list[i].addr);
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ vsi = pf->vsi[vsi_id];
+
+ /* add new addresses to the list */
+ for (i = 0; i < al->num_elements; i++) {
+ struct i40e_mac_filter *f;
+
+ f = i40e_find_mac(vsi, al->list[i].addr, true, false);
+ if (f) {
+ if (i40e_is_vsi_in_vlan(vsi))
+ f = i40e_put_mac_in_vlan(vsi, al->list[i].addr,
+ true, false);
+ else
+ f = i40e_add_filter(vsi, al->list[i].addr, -1,
+ true, false);
+ }
+
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF MAC filter\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ /* program the updated filter list */
+ if (i40e_sync_vsi_filters(vsi))
+ dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_del_mac_addr_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove guest mac address filter
+ **/
+static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_ether_addr_list *al =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = al->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ vsi = pf->vsi[vsi_id];
+
+ /* delete addresses from the list */
+ for (i = 0; i < al->num_elements; i++)
+ i40e_del_filter(vsi, al->list[i].addr,
+ I40E_VLAN_ANY, true, false);
+
+ /* program the updated filter list */
+ if (i40e_sync_vsi_filters(vsi))
+ dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n");
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * program guest vlan id
+ **/
+static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vfl->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+ vsi = pf->vsi[vsi_id];
+ if (vsi->info.pvid) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ i40e_vlan_stripping_enable(vsi);
+ for (i = 0; i < vfl->num_elements; i++) {
+ /* add new VLAN filter */
+ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF vlan filter %d, error %d\n",
+ vfl->vlan_id[i], ret);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_remove_vlan_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * remove programmed guest vlan id
+ **/
+static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vfl->vsi_id;
+ i40e_status aq_ret = 0;
+ int i;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi = pf->vsi[vsi_id];
+ if (vsi->info.pvid) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Unable to delete VF vlan filter %d, error %d\n",
+ vfl->vlan_id[i], ret);
+ }
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
+}
+
+/**
+ * i40e_vc_fcoe_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the vf for the fcoe msgs
+ **/
+static int i40e_vc_fcoe_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !test_bit(I40E_VF_STAT_FCOEENA, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ aq_ret = I40E_ERR_NOT_IMPLEMENTED;
+
+error_param:
+ /* send the response to the vf */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_FCOE, aq_ret);
+}
+
+/**
+ * i40e_vc_validate_vf_msg
+ * @vf: pointer to the vf info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * validate msg
+ **/
+static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ bool err_msg_format = false;
+ int valid_len;
+
+ /* Check if VF is disabled. */
+ if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
+ return I40E_ERR_PARAM;
+
+ /* Validate message length. */
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ valid_len = sizeof(struct i40e_virtchnl_version_info);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ valid_len = 0;
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_txq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vsi_queue_config_info *vqc =
+ (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+ valid_len += (vqc->num_queue_pairs *
+ sizeof(struct
+ i40e_virtchnl_queue_pair_info));
+ if (vqc->num_queue_pairs == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_irq_map_info *vimi =
+ (struct i40e_virtchnl_irq_map_info *)msg;
+ valid_len += (vimi->num_vectors *
+ sizeof(struct i40e_virtchnl_vector_map));
+ if (vimi->num_vectors == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_ether_addr_list *veal =
+ (struct i40e_virtchnl_ether_addr_list *)msg;
+ valid_len += veal->num_elements *
+ sizeof(struct i40e_virtchnl_ether_addr);
+ if (veal->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_vlan_filter_list *vfl =
+ (struct i40e_virtchnl_vlan_filter_list *)msg;
+ valid_len += vfl->num_elements * sizeof(u16);
+ if (vfl->num_elements == 0)
+ err_msg_format = true;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ valid_len = sizeof(struct i40e_virtchnl_queue_select);
+ break;
+ /* These are always errors coming from the VF. */
+ case I40E_VIRTCHNL_OP_EVENT:
+ case I40E_VIRTCHNL_OP_UNKNOWN:
+ default:
+ return -EPERM;
+ break;
+ }
+ /* few more checks */
+ if ((valid_len != msglen) || (err_msg_format)) {
+ i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * i40e_vc_process_vf_msg
+ * @pf: pointer to the pf structure
+ * @vf_id: source vf id
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @msghndl: msg handle
+ *
+ * called from the common aeq/arq handler to
+ * process request from vf
+ **/
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen)
+{
+ struct i40e_vf *vf = &(pf->vf[vf_id]);
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ pf->vf_aq_requests++;
+ /* perform basic checks on the msg */
+ ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
+
+ if (ret) {
+ dev_err(&pf->pdev->dev, "invalid message from vf %d\n", vf_id);
+ return ret;
+ }
+ wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+ switch (v_opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ ret = i40e_vc_get_version_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ ret = i40e_vc_get_vf_resources_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ ret = i40e_vc_reset_vf_msg(vf);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ret = i40e_vc_config_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ ret = i40e_vc_get_stats_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_FCOE:
+ ret = i40e_vc_fcoe_msg(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(&pf->pdev->dev,
+ "Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
+ ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
+ I40E_ERR_NOT_IMPLEMENTED);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_vc_process_vflr_event
+ * @pf: pointer to the pf structure
+ *
+ * called from the vlfr irq handler to
+ * free up vf resources and state variables
+ **/
+int i40e_vc_process_vflr_event(struct i40e_pf *pf)
+{
+ u32 reg, reg_idx, bit_idx, vf_id;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+
+ if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
+ return 0;
+
+ clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
+ for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
+ reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr vfs */
+ vf = &pf->vf[vf_id];
+ reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
+ if (reg & (1 << bit_idx)) {
+ /* clear the bit in GLGEN_VFLRSTAT */
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+
+ if (i40e_reset_vf(vf, true))
+ dev_err(&pf->pdev->dev,
+ "Unable to reset the VF %d\n", vf_id);
+ /* free up vf resources to destroy vsi state */
+ i40e_free_vf_res(vf);
+
+ /* allocate new vf resources with the default state */
+ if (i40e_alloc_vf_res(vf))
+ dev_err(&pf->pdev->dev,
+ "Unable to allocate VF resources %d\n",
+ vf_id);
+
+ i40e_enable_vf_mappings(vf);
+ }
+ }
+
+ /* re-enable vflr interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40e_vc_vf_broadcast
+ * @pf: pointer to the pf structure
+ * @opcode: operation code
+ * @retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send a message to all VFs on a given PF
+ **/
+static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
+ enum i40e_virtchnl_ops v_opcode,
+ i40e_status v_retval, u8 *msg,
+ u16 msglen)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++) {
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ vf++;
+ }
+}
+
+/**
+ * i40e_vc_notify_link_state
+ * @pf: pointer to the pf structure
+ *
+ * send a link status message to all VFs on a given PF
+ **/
+void i40e_vc_notify_link_state(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
+ pfe.event_data.link_event.link_status =
+ pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
+ pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed;
+
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_reset
+ * @pf: pointer to the pf structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ **/
+void i40e_vc_notify_reset(struct i40e_pf *pf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS,
+ (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event));
+}
+
+/**
+ * i40e_vc_notify_vf_reset
+ * @vf: pointer to the vf structure
+ *
+ * indicate a pending reset to the given VF
+ **/
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
+{
+ struct i40e_virtchnl_pf_event pfe;
+
+ pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (u8 *)&pfe,
+ sizeof(struct i40e_virtchnl_pf_event), NULL);
+}
+
+/**
+ * i40e_ndo_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @mac: mac address
+ *
+ * program vf mac address
+ **/
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_mac_filter *f;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev,
+ "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&pf->pdev->dev,
+ "Invalid VF ethernet address\n");
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ /* delete the temporary mac address */
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+
+ /* add the new mac address */
+ f = i40e_add_filter(vsi, mac, 0, true, false);
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add VF ucast filter\n");
+ ret = -ENOMEM;
+ goto error_param;
+ }
+
+ dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
+ /* program mac filter */
+ if (i40e_sync_vsi_filters(vsi)) {
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ ret = -EIO;
+ goto error_param;
+ }
+ memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
+ ret = 0;
+
+error_param:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @vlan_id: mac address
+ * @qos: priority setting
+ *
+ * program vf vlan id and/or qos
+ **/
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+ int vf_id, u16 vlan_id, u8 qos)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
+ dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_pvid;
+ }
+
+ if (vsi->info.pvid) {
+ /* kill old VLAN */
+ ret = i40e_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
+ VLAN_VID_MASK));
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "remove VLAN failed, ret=%d, aq_err=%d\n",
+ ret, pf->hw.aq.asq_last_status);
+ }
+ }
+ if (vlan_id || qos)
+ ret = i40e_vsi_add_pvid(vsi,
+ vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
+ else
+ i40e_vlan_stripping_disable(vsi);
+
+ if (vlan_id) {
+ dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
+ vlan_id, qos, vf_id);
+
+ /* add new VLAN filter */
+ ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
+ vsi->back->hw.aq.asq_last_status);
+ goto error_pvid;
+ }
+ }
+
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
+ goto error_pvid;
+ }
+ ret = 0;
+
+error_pvid:
+ return ret;
+}
+
+/**
+ * i40e_ndo_set_vf_bw
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @tx_rate: tx rate
+ *
+ * configure vf tx rate
+ **/
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * i40e_ndo_get_vf_config
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @ivi: vf configuration structure
+ *
+ * return vf configuration
+ **/
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_mac_filter *f, *ftmp;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ /* first vsi is always the LAN vsi */
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id);
+ ret = -EINVAL;
+ goto error_param;
+ }
+
+ ivi->vf = vf_id;
+
+ /* first entry of the list is the default ethernet address */
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
+ memcpy(&ivi->mac, f->macaddr, I40E_ETH_LENGTH_OF_ADDRESS);
+ break;
+ }
+
+ ivi->tx_rate = 0;
+ ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
+ ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
+ I40E_VLAN_PRIORITY_SHIFT;
+ ret = 0;
+
+error_param:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
new file mode 100644
index 00000000000..360382cf304
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_VIRTCHNL_PF_H_
+#define _I40E_VIRTCHNL_PF_H_
+
+#include "i40e.h"
+
+#define I40E_MAX_MACVLAN_FILTERS 256
+#define I40E_MAX_VLAN_FILTERS 256
+#define I40E_MAX_VLANID 4095
+
+#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
+
+#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
+#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
+
+#define I40E_VLAN_PRIORITY_SHIFT 12
+#define I40E_VLAN_MASK 0xFFF
+#define I40E_PRIORITY_MASK 0x7000
+
+/* Various queue ctrls */
+enum i40e_queue_ctrl {
+ I40E_QUEUE_CTRL_UNKNOWN = 0,
+ I40E_QUEUE_CTRL_ENABLE,
+ I40E_QUEUE_CTRL_ENABLECHECK,
+ I40E_QUEUE_CTRL_DISABLE,
+ I40E_QUEUE_CTRL_DISABLECHECK,
+ I40E_QUEUE_CTRL_FASTDISABLE,
+ I40E_QUEUE_CTRL_FASTDISABLECHECK,
+};
+
+/* VF states */
+enum i40e_vf_states {
+ I40E_VF_STAT_INIT = 0,
+ I40E_VF_STAT_ACTIVE,
+ I40E_VF_STAT_FCOEENA,
+ I40E_VF_STAT_DISABLED,
+};
+
+/* VF capabilities */
+enum i40e_vf_capabilities {
+ I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+ I40E_VIRTCHNL_VF_CAP_L2,
+};
+
+/* VF information structure */
+struct i40e_vf {
+ struct i40e_pf *pf;
+
+ /* vf id in the pf space */
+ u16 vf_id;
+ /* all vf vsis connect to the same parent */
+ enum i40e_switch_element_types parent_type;
+
+ /* vf Port Extender (PE) stag if used */
+ u16 stag;
+
+ struct i40e_virtchnl_ether_addr default_lan_addr;
+ struct i40e_virtchnl_ether_addr default_fcoe_addr;
+
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u8 lan_vsi_index; /* index into PF struct */
+ u8 lan_vsi_id; /* ID as used by firmware */
+
+ u8 num_queue_pairs; /* num of qps assigned to vf vsis */
+ u64 num_mdd_events; /* num of mdd events detected */
+ u64 num_invalid_msgs; /* num of malformed or invalid msgs detected */
+ u64 num_valid_msgs; /* num of valid msgs detected */
+
+ unsigned long vf_caps; /* vf's adv. capabilities */
+ unsigned long vf_states; /* vf's runtime states */
+};
+
+void i40e_free_vfs(struct i40e_pf *pf);
+int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+ u32 v_retval, u8 *msg, u16 msglen);
+int i40e_vc_process_vflr_event(struct i40e_pf *pf);
+int i40e_reset_vf(struct i40e_vf *vf, bool flr);
+void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
+
+/* vf configuration related iplink handlers */
+int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
+ int vf_id, u16 vlan_id, u8 qos);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_get_vf_config(struct net_device *netdev,
+ int vf_id, struct ifla_vf_info *ivi);
+void i40e_vc_notify_link_state(struct i40e_pf *pf);
+void i40e_vc_notify_reset(struct i40e_pf *pf);
+
+#endif /* _I40E_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index f21a91a299a..47c2d10df82 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -176,7 +176,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
@@ -238,6 +238,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
+
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
@@ -250,86 +251,52 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = 15;
nvm->word_size = 1 << size;
- if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
-
- switch (nvm->override) {
- case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
- nvm->address_bits = 16;
- break;
- case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
- nvm->address_bits = 8;
- break;
- default:
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
- 16 : 8;
- break;
- }
- if (nvm->word_size == (1 << 15))
- nvm->page_size = 128;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
- nvm->type = e1000_nvm_eeprom_spi;
- } else {
- nvm->type = e1000_nvm_flash_hw;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
}
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->type = e1000_nvm_eeprom_spi;
/* NVM Function Pointers */
+ nvm->ops.acquire = igb_acquire_nvm_82575;
+ nvm->ops.release = igb_release_nvm_82575;
+ nvm->ops.write = igb_write_nvm_spi;
+ nvm->ops.validate = igb_validate_nvm_checksum;
+ nvm->ops.update = igb_update_nvm_checksum;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = igb_read_nvm_eerd;
+ else
+ nvm->ops.read = igb_read_nvm_spi;
+
+ /* override generic family function pointers for specific descendants */
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
case e1000_i354:
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
- break;
- case e1000_i210:
- nvm->ops.validate = igb_validate_nvm_checksum_i210;
- nvm->ops.update = igb_update_nvm_checksum_i210;
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_srrd_i210;
- nvm->ops.write = igb_write_nvm_srwr_i210;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- break;
- case e1000_i211:
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_i211;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- nvm->ops.validate = NULL;
- nvm->ops.update = NULL;
- nvm->ops.write = NULL;
break;
default:
- nvm->ops.validate = igb_validate_nvm_checksum;
- nvm->ops.update = igb_update_nvm_checksum;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
}
@@ -516,6 +483,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
@@ -601,6 +570,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* NVM initialization */
ret_val = igb_init_nvm_params_82575(hw);
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = igb_init_nvm_params_i210(hw);
+ break;
+ default:
+ break;
+ }
+
if (ret_val)
goto out;
@@ -741,6 +719,10 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
u32 ctrl_ext;
u32 mdic;
+ /* Extra read required for some PHY's on i354 */
+ if (hw->mac.type == e1000_i354)
+ igb_get_phy_id(hw);
+
/* For SGMII PHYs, we try the list of possible addresses until
* we find one that works. For non-SGMII PHYs
* (e.g. integrated copper PHYs), an address of 1 should
@@ -1163,6 +1145,31 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
}
/**
+ * igb_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+ ret_val = igb_get_speed_and_duplex_copper(hw, speed,
+ duplex);
+
+ return ret_val;
+}
+
+/**
* igb_check_for_link_82575 - Check for link
* @hw: pointer to the HW structure
*
@@ -1239,7 +1246,7 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
u16 *duplex)
{
struct e1000_mac_info *mac = &hw->mac;
- u32 pcs;
+ u32 pcs, status;
/* Set up defaults for the return values of this function */
mac->serdes_has_link = false;
@@ -1260,20 +1267,31 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
mac->serdes_has_link = true;
/* Detect and store PCS speed */
- if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+ if (pcs & E1000_PCS_LSTS_SPEED_1000)
*speed = SPEED_1000;
- } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+ else if (pcs & E1000_PCS_LSTS_SPEED_100)
*speed = SPEED_100;
- } else {
+ else
*speed = SPEED_10;
- }
/* Detect and store PCS duplex */
- if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
*duplex = FULL_DUPLEX;
- } else {
+ else
*duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = rd32(E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ hw_dbg("2500 Mbs, ");
+ hw_dbg("Full Duplex\n");
+ }
}
+
}
return 0;
@@ -1320,7 +1338,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
**/
static s32 igb_reset_hw_82575(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -1365,7 +1383,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
/* Install any alternate MAC address into RAR0 */
ret_val = igb_check_alt_mac_addr(hw);
@@ -1443,11 +1461,18 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
wr32(E1000_CTRL, ctrl);
- /* Clear Go Link Disconnect bit */
- if (hw->mac.type >= e1000_82580) {
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
phpm_reg &= ~E1000_82580_PM_GO_LINKD;
wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
}
ret_val = igb_setup_serdes_link_82575(hw);
@@ -1470,7 +1495,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
break;
@@ -2103,10 +2128,9 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
s32 ret_val = 0;
/* BH SW mailbox bit in SW_FW_SYNC */
u16 swmbsw_mask = E1000_SW_SYNCH_MB;
- u32 ctrl, icr;
+ u32 ctrl;
bool global_device_reset = hw->dev_spec._82575.global_device_reset;
-
hw->dev_spec._82575.global_device_reset = false;
/* due to hw errata, global device reset doesn't always
@@ -2165,7 +2189,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
ret_val = igb_reset_mdicnfg_82580(hw);
if (ret_val)
@@ -2500,28 +2524,28 @@ s32 igb_set_eee_i354(struct e1000_hw *hw)
u16 phy_data;
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ (phy->id != M88E1543_E_PHY_ID))
goto out;
if (!hw->dev_spec._82575.eee_disable) {
/* Switch to PHY page 18. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
&phy_data);
if (ret_val)
goto out;
- phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
phy_data);
if (ret_val)
goto out;
/* Return the PHY to page 0. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
if (ret_val)
goto out;
@@ -2572,7 +2596,7 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
/* Check if EEE is supported on this device. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ (phy->id != M88E1543_E_PHY_ID))
goto out;
ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
@@ -2728,7 +2752,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
.check_for_link = igb_check_for_link_82575,
.rar_set = igb_rar_set,
.read_mac_addr = igb_read_mac_addr_82575,
- .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+ .get_speed_and_duplex = igb_get_link_up_info_82575,
#ifdef CONFIG_IGB_HWMON
.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index aa201abb8ad..978eca31ced 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -620,6 +620,7 @@
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
@@ -627,6 +628,11 @@
#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_FLUDONE_ATTEMPTS 20000
@@ -665,20 +671,26 @@
#define NVM_INIT_CTRL_4 0x0013
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
-
-/* NVM version defines */
#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
-#define NVM_MAJOR_MASK 0xF000
-#define NVM_MINOR_MASK 0x0FF0
-#define NVM_BUILD_MASK 0x000F
-#define NVM_COMB_VER_MASK 0x00FF
-#define NVM_MAJOR_SHIFT 12
-#define NVM_MINOR_SHIFT 4
-#define NVM_COMB_VER_SHFT 8
-#define NVM_VER_INVALID 0xFFFF
-#define NVM_ETRACK_SHIFT 16
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
#define NVM_ETS_CFG 0x003E
#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
#define NVM_ETS_LTHRES_DELTA_SHIFT 6
@@ -775,7 +787,7 @@
#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
-#define M88E1545_E_PHY_ID 0x01410EA0
+#define M88E1543_E_PHY_ID 0x01410EA0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -897,9 +909,9 @@
#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
-#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */
-#define E1000_M88E1545_EEE_CTRL_1 0x0
-#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 94d7866b9c2..37a9c06a6c6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -67,6 +67,8 @@ struct e1000_hw;
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
#define E1000_DEV_ID_I211_COPPER 0x1539
#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
#define E1000_DEV_ID_I354_SGMII 0x1F41
@@ -110,6 +112,7 @@ enum e1000_nvm_type {
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
+ e1000_nvm_invm,
e1000_nvm_flash_sw
};
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index ddb3cf51b9b..0c0393316a3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -335,57 +335,101 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_read_nvm_i211 - Read NVM wrapper function for I211
+ * igb_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ hw_dbg("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ hw_dbg("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/**
+ * igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
* @words: number of words to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
-s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 words __always_unused, u16 *data)
{
s32 ret_val = E1000_SUCCESS;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
case NVM_MAC_ADDR:
- ret_val = igb_read_invm_i211(hw, offset, &data[0]);
- ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
- ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_INIT_CTRL_4:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_1_CFG:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_0_2_CFG:
- igb_read_invm_i211(hw, offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_ID_LED_SETTINGS:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
@@ -411,48 +455,6 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_read_invm_i211 - Reads OTP
- * @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
- * @data: pointer to the data read
- *
- * Reads 16-bit words from the OTP. Return error when the word is not
- * stored in OTP.
- **/
-s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
-{
- s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u32 invm_dword;
- u16 i;
- u8 record_type, word_address;
-
- for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = rd32(E1000_INVM_DATA_REG(i));
- /* Get record type */
- record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
- if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
- break;
- if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
- i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
- i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
- word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
- if (word_address == (u8)address) {
- *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
- hw_dbg("Read INVM Word 0x%02x = %x",
- address, *data);
- status = E1000_SUCCESS;
- break;
- }
- }
- }
- if (status != E1000_SUCCESS)
- hw_dbg("Requested word 0x%02x not found in OTP\n", address);
- return status;
-}
-
-/**
* igb_read_invm_version - Reads iNVM version and image type
* @hw: pointer to the HW structure
* @invm_ver: version structure for the version read
@@ -661,6 +663,23 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
}
/**
+ * igb_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool igb_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ eec = rd32(E1000_EECD);
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
@@ -786,3 +805,33 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
{
return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}
+
+/**
+ * igb_init_nvm_params_i210 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ nvm->ops.acquire = igb_acquire_nvm_i210;
+ nvm->ops.release = igb_release_nvm_i210;
+ nvm->ops.valid_led_default = igb_valid_led_default_i210;
+
+ /* NVM Function Pointers */
+ if (igb_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
+ nvm->ops.validate = igb_validate_nvm_checksum_i210;
+ nvm->ops.update = igb_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = igb_read_invm_i210;
+ nvm->ops.write = NULL;
+ nvm->ops.validate = NULL;
+ nvm->ops.update = NULL;
+ }
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5caa332e755..dde3c4b7ea9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -35,20 +35,19 @@ extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
-extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
extern s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 *data);
extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 data);
+extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index bab556a47fc..298f0ed5067 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -712,6 +712,7 @@ static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
static s32 igb_set_default_fc(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ u16 lan_offset;
u16 nvm_data;
/* Read and store word 0x0F of the EEPROM. This word contains bits
@@ -722,7 +723,14 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
* control setting, then the variable hw->fc will
* be initialized based on a value in the EEPROM.
*/
- ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+ if (hw->mac.type == e1000_i350) {
+ lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
+ + lan_offset, 1, &nvm_data);
+ } else {
+ ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
+ 1, &nvm_data);
+ }
if (ret_val) {
hw_dbg("NVM Read Error\n");
@@ -1171,17 +1179,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
hw_dbg("Half Duplex\n");
}
- /* Check if it is an I354 2.5Gb backplane connection. */
- if (hw->mac.type == e1000_i354) {
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- *speed = SPEED_2500;
- *duplex = FULL_DUPLEX;
- hw_dbg("2500 Mbs, ");
- hw_dbg("Full Duplex\n");
- }
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 7f9cd7cbd35..a7db7f3db91 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -709,11 +709,16 @@ out:
**/
void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
{
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 fw_version;
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+ /* basic eeprom version numbers and bits used vary by part and by tool
+ * used to create the nvm images. Check which data format we have.
+ */
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
switch (hw->mac.type) {
case e1000_i211:
igb_read_invm_version(hw, fw_vers);
@@ -721,30 +726,30 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
case e1000_82575:
case e1000_82576:
case e1000_82580:
- case e1000_i354:
- case e1000_i350:
- case e1000_i210:
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
break;
- default:
- return;
- }
- /* basic eeprom version numbers */
- hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
-
- /* etrack id */
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
- fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
-
- switch (hw->mac.type) {
case e1000_i210:
- case e1000_i354:
+ if (!(igb_get_flash_presence_i210(hw))) {
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
case e1000_i350:
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ 1), 1, &comb_verh);
@@ -760,15 +765,42 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
fw_vers->or_major =
comb_verl >> NVM_COMB_VER_SHFT;
fw_vers->or_build =
- ((comb_verl << NVM_COMB_VER_SHFT)
- | (comb_verh >> NVM_COMB_VER_SHFT));
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
fw_vers->or_patch =
comb_verh & NVM_COMB_VER_MASK;
}
}
break;
default:
- break;
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
}
return;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 6bfc0c43aac..433b7419cb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -44,6 +44,7 @@ struct e1000_fw_version {
u32 etrack_id;
u16 eep_major;
u16 eep_minor;
+ u16 eep_build;
u8 invm_major;
u8 invm_minor;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 60461946f98..e7266759a10 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -731,15 +731,13 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- if (phy->reset_disable) {
- ret_val = 0;
- goto out;
- }
+ if (phy->reset_disable)
+ return 0;
/* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Options:
* MDI/MDI-X = 0 (default)
@@ -780,23 +778,36 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
/* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Commit the changes. */
ret_val = igb_phy_sw_reset(hw);
if (ret_val) {
hw_dbg("Error committing the PHY changes\n");
- goto out;
+ return ret_val;
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -1806,7 +1817,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 15ea8dc9dad..6807b098eda 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -343,6 +343,8 @@ struct hwmon_buff {
};
#endif
+#define IGB_RETA_SIZE 128
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -444,6 +446,10 @@ struct igb_adapter {
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
struct i2c_client *i2c_client;
+ u32 rss_indir_tbl_init;
+ u8 rss_indir_tbl[IGB_RETA_SIZE];
+
+ unsigned long link_check_timeout;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -455,6 +461,7 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
+#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -480,6 +487,7 @@ extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
+extern void igb_write_rss_indir_tbl(struct igb_adapter *);
extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 85fe7b52f43..86d51429a18 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -172,10 +172,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_Autoneg |
SUPPORTED_Pause);
ecmd->advertising = ADVERTISED_FIBRE;
- if (hw->mac.type == e1000_i354) {
- ecmd->supported |= SUPPORTED_2500baseX_Full;
- ecmd->advertising |= ADVERTISED_2500baseX_Full;
- }
+
if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising |= ADVERTISED_1000baseT_Full;
@@ -209,16 +206,23 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU) {
- if ((hw->mac.type == e1000_i354) &&
- (status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER))
- ecmd->speed = SPEED_2500;
- else if (status & E1000_STATUS_SPEED_1000)
+ if (hw->mac.type == e1000_i354) {
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->supported = SUPPORTED_2500baseX_Full;
+ ecmd->advertising = ADVERTISED_2500baseX_Full;
+ ecmd->speed = SPEED_2500;
+ } else {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ }
+ } else if (status & E1000_STATUS_SPEED_1000) {
ecmd->speed = SPEED_1000;
- else if (status & E1000_STATUS_SPEED_100)
+ } else if (status & E1000_STATUS_SPEED_100) {
ecmd->speed = SPEED_100;
- else
+ } else {
ecmd->speed = SPEED_10;
+ }
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
ecmd->duplex = DUPLEX_FULL;
@@ -1335,12 +1339,23 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
+
*data = 0;
- /* Validate eeprom on all parts but i211 */
- if (adapter->hw.mac.type != e1000_i211) {
+ /* Validate eeprom on all parts but flashless */
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
+ *data = 2;
+ }
+ break;
+ default:
if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
+ break;
}
return *data;
@@ -1592,6 +1607,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0);
igb_write_phy_reg(hw, PHY_CONTROL, 0x4140);
}
+ } else if (hw->phy.type == e1000_phy_82580) {
+ /* enable MII loopback */
+ igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
}
/* add small delay to avoid loopback test failure */
@@ -2672,7 +2690,9 @@ static int igb_set_eee(struct net_device *netdev,
igb_set_eee_i350(hw);
/* reset link */
- if (!netif_running(netdev))
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
igb_reset(adapter);
}
@@ -2771,6 +2791,90 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
+static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return IGB_RETA_SIZE;
+}
+
+static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+
+ return 0;
+}
+
+void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg = E1000_RETA(0);
+ u32 shift = 0;
+ int i = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ shift = 3;
+ break;
+ default:
+ break;
+ }
+
+ while (i < IGB_RETA_SIZE) {
+ u32 val = 0;
+ int j;
+
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+ wr32(reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+}
+
+static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 num_queues;
+
+ num_queues = adapter->rss_queues;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_queues = 2;
+ break;
+ default:
+ break;
+ }
+
+ /* Verify user input. */
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ if (indir[i] >= num_queues)
+ return -EINVAL;
+
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+
+ igb_write_rss_indir_tbl(adapter);
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2804,6 +2908,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+ .get_rxfh_indir = igb_get_rxfh_indir,
+ .set_rxfh_indir = igb_set_rxfh_indir,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c1d72c03cb5..8cf44f2a8cc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -62,7 +62,7 @@
#define MAJ 5
#define MIN 0
-#define BUILD 3
+#define BUILD 5
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -85,6 +85,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@ -1013,7 +1015,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* ixgbe_get_stats64() might access the rings on this vector,
+ /* igb_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
@@ -1669,6 +1671,8 @@ void igb_down(struct igb_adapter *adapter)
igb_irq_disable(adapter);
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
for (i = 0; i < adapter->num_q_vectors; i++) {
napi_synchronize(&(adapter->q_vector[i]->napi));
napi_disable(&(adapter->q_vector[i]->napi));
@@ -1929,12 +1933,17 @@ void igb_set_fw_version(struct igb_adapter *adapter)
igb_get_fw_version(hw, &fw);
switch (hw->mac.type) {
+ case e1000_i210:
case e1000_i211:
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor, fw.invm_img_type);
- break;
-
+ if (!(igb_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
+ break;
+ }
+ /* fall through */
default:
/* if option is rom valid, display its version too */
if (fw.or_valid) {
@@ -1944,11 +1953,16 @@ void igb_set_fw_version(struct igb_adapter *adapter)
fw.eep_major, fw.eep_minor, fw.etrack_id,
fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
- } else {
+ } else if (fw.etrack_id != 0X0000) {
snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor, fw.etrack_id);
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
@@ -2166,15 +2180,28 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
hw->mac.ops.reset_hw(hw);
- /* make sure the NVM is good , i211 parts have special NVM that
- * doesn't contain a checksum
+ /* make sure the NVM is good , i211/i210 parts can have special NVM
+ * that doesn't contain a checksum
*/
- if (hw->mac.type != e1000_i211) {
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (hw->nvm.ops.validate(hw) < 0) {
+ dev_err(&pdev->dev,
+ "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+ break;
+ default:
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
+ break;
}
/* copy the MAC address out of the NVM */
@@ -2342,7 +2369,14 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Width x1" : "unknown"), netdev->dev_addr);
}
- ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
+ if ((hw->mac.type >= e1000_i210 ||
+ igb_get_flash_presence_i210(hw))) {
+ ret_val = igb_read_part_string(hw, part_str,
+ E1000_PBANUM_LENGTH);
+ } else {
+ ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ }
+
if (ret_val)
strcpy(part_str, "Unknown");
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
@@ -2436,6 +2470,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
+ if (!adapter->msix_entries) {
+ err = -EPERM;
+ goto out;
+ }
+
if (!num_vfs)
goto out;
else if (old_vfs && old_vfs == num_vfs)
@@ -3096,7 +3135,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0;
+ u32 j, num_rx_queues;
static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
0xA32DCB77, 0x0CF23080, 0x3BB7426A,
@@ -3109,35 +3148,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
num_rx_queues = adapter->rss_queues;
switch (hw->mac.type) {
- case e1000_82575:
- shift = 6;
- break;
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
- if (adapter->vfs_allocated_count) {
- shift = 3;
+ if (adapter->vfs_allocated_count)
num_rx_queues = 2;
- }
break;
default:
break;
}
- /* Populate the indirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (j = 0; j < 32; j++) {
- /* first pass generates n and n+2 */
- u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
- u32 reta = (base & 0x07800780) >> (7 - shift);
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * num_rx_queues;
- reta |= (base & 0x07800780) << (1 + shift);
-
- wr32(E1000_RETA(j), reta);
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGB_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
}
+ igb_write_rss_indir_tbl(adapter);
/* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback. No need to enable TCP/UDP/IP checksum
@@ -3844,7 +3869,6 @@ bool igb_has_link(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
- s32 ret_val = 0;
/* get_link_status is set on LSC (link status) interrupt or
* rx sequence error interrupt. get_link_status will stay
@@ -3853,22 +3877,28 @@ bool igb_has_link(struct igb_adapter *adapter)
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
- if (hw->mac.get_link_status) {
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- } else {
- link_active = true;
- }
- break;
+ if (!hw->mac.get_link_status)
+ return true;
case e1000_media_type_internal_serdes:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = hw->mac.serdes_has_link;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
break;
default:
case e1000_media_type_unknown:
break;
}
+ if (((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) &&
+ (hw->phy.id == I210_I_PHY_ID)) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
+ }
+
return link_active;
}
@@ -3913,6 +3943,14 @@ static void igb_watchdog_task(struct work_struct *work)
int i;
link = igb_has_link(adapter);
+
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ else
+ link = false;
+ }
+
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4037,9 +4075,14 @@ static void igb_watchdog_task(struct work_struct *work)
igb_ptp_rx_hang(adapter);
/* Reset the timer */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
}
enum latency_range {
@@ -4814,6 +4857,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
@@ -4865,6 +4912,8 @@ void igb_update_stats(struct igb_adapter *adapter,
bytes = 0;
packets = 0;
+
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
@@ -4900,6 +4949,7 @@ void igb_update_stats(struct igb_adapter *adapter,
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
+ rcu_read_unlock();
/* read stats registers */
adapter->stats.crcerrs += rd32(E1000_CRCERRS);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7e8c477b0ab..5a54e3dc535 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -97,14 +97,14 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
+ u32 lo, hi;
u64 val;
- u32 lo, hi, jk;
/* The timestamp latches on lowest register read. For the 82580
* the lowest register is SYSTIMR instead of SYSTIML. However we only
* need to provide nanosecond resolution, so we just ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
lo = rd32(E1000_SYSTIML);
hi = rd32(E1000_SYSTIMH);
@@ -118,13 +118,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
{
struct e1000_hw *hw = &adapter->hw;
- u32 sec, nsec, jk;
+ u32 sec, nsec;
/* The timestamp latches on lowest register read. For I210/I211, the
* lowest register is SYSTIMR. Since we only need to provide nanosecond
* resolution, we can ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
nsec = rd32(E1000_SYSTIML);
sec = rd32(E1000_SYSTIMH);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fce3e92f9d1..9f6b236828e 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -718,8 +718,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a6494e5daff..0ac6b11c6e4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -618,9 +618,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
-#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11)
/* Tx fast path data */
int num_tx_queues;
@@ -754,7 +753,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
- __IXGBE_READ_I2C,
+ __IXGBE_PTP_RUNNING,
};
struct ixgbe_cb {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4a5bfb6b3af..a26f3fee4f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ u16 gssr;
u32 i;
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ return IXGBE_ERR_SWFW_SYNC;
+
if (hw->phy.type == ixgbe_phy_nl) {
/*
* phy SDA/SCL registers are at addresses 0xC30A to
@@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
*/
sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
- hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
- MDIO_MMD_PMAPMD,
- sfp_addr);
+ hw->phy.ops.write_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ MDIO_MMD_PMAPMD,
+ sfp_addr);
/* Poll status */
for (i = 0; i < 100; i++) {
- hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
- MDIO_MMD_PMAPMD,
- &sfp_stat);
+ hw->phy.ops.read_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ MDIO_MMD_PMAPMD,
+ &sfp_stat);
sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
break;
@@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
}
/* Read data */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
- MDIO_MMD_PMAPMD, &sfp_data);
+ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ MDIO_MMD_PMAPMD, &sfp_data);
*eeprom_data = (u8)(sfp_data >> 8);
} else {
@@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
}
out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
return status;
}
@@ -1321,11 +1331,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
static struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82598,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
+ .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
+ .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 0b82d38bc97..007a0083a63 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -49,6 +49,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -58,6 +59,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
@@ -137,11 +142,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
goto setup_sfp_out;
}
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
while (data_value != 0xffff) {
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
IXGBE_WRITE_FLUSH(hw);
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
}
/* Release the semaphore */
@@ -187,6 +194,17 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
setup_sfp_out:
return ret_val;
+
+setup_sfp_err:
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access,
+ * semaphore_delay is in ms usleep_range needs us.
+ */
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+ return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
}
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
@@ -219,6 +237,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val = 0;
+ u32 esdp;
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = true;
+
+ /* Initialize access to QSFP+ I2C bus */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0_DIR;
+ esdp &= ~IXGBE_ESDP_SDP1_DIR;
+ esdp &= ~IXGBE_ESDP_SDP0;
+ esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+ esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
+ }
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
@@ -342,8 +379,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = true;
+ IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* QSFP must not enable auto-negotiation */
+ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
+ *autoneg = false;
+ else
+ *autoneg = true;
}
out:
@@ -397,6 +439,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_LS:
media_type = ixgbe_media_type_fiber_lco;
break;
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ media_type = ixgbe_media_type_fiber_qsfp;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -406,6 +451,24 @@ out:
}
/**
+ * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ * @hw: pointer to hardware structure
+ *
+ * Disables link, should be called during D3 power down sequence.
+ *
+ */
+static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+ u32 autoc2_reg;
+
+ if (!hw->mng_fw_enabled && !hw->wol_enabled) {
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ }
+}
+
+/**
* ixgbe_start_mac_link_82599 - Setup MAC link settings
* @hw: pointer to hardware structure
* @autoneg_wait_to_complete: true when waiting for completion is needed
@@ -527,6 +590,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * We set the module speed differently for fixed fiber. For other
+ * multi-speed devices we don't have an error value so here if we
+ * detect an error we just log it and exit.
+ */
+static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ hw_dbg(hw, "Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
+
+/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -573,9 +705,19 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type.\n");
+ break;
+ }
/* Allow module to change analog characteristics (1G->10G) */
msleep(40);
@@ -625,10 +767,24 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ ixgbe_set_fiber_fixed_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type.\n");
+ break;
+ }
/* Allow module to change analog characteristics (10G->1G) */
msleep(40);
@@ -1872,7 +2028,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
goto out;
else
- status = ixgbe_identify_sfp_module_generic(hw);
+ status = ixgbe_identify_module_generic(hw);
}
/* Set PHY type none if no PHY detected */
@@ -1978,10 +2134,12 @@ sfp_check:
switch (hw->phy.type) {
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
case ixgbe_phy_sfp_ftl_active:
case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
break;
case ixgbe_phy_sfp_avago:
@@ -1999,6 +2157,15 @@ sfp_check:
else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
default:
break;
}
@@ -2045,6 +2212,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
+ u16 offset;
u16 fw_version = 0;
/* firmware check is only necessary for SFI devices */
@@ -2054,29 +2222,35 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
}
/* get the offset to the Firmware Module block */
- hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+ offset = IXGBE_FW_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &fw_offset))
+ goto fw_version_err;
if ((fw_offset == 0) || (fw_offset == 0xFFFF))
goto fw_version_out;
/* get the offset to the Pass Through Patch Configuration block */
- hw->eeprom.ops.read(hw, (fw_offset +
- IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
- &fw_ptp_cfg_offset);
+ offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
+ goto fw_version_err;
if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
goto fw_version_out;
/* get the firmware version */
- hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
- IXGBE_FW_PATCH_VERSION_4),
- &fw_version);
+ offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
+ if (hw->eeprom.ops.read(hw, offset, &fw_version))
+ goto fw_version_err;
if (fw_version > 0x5)
status = 0;
fw_version_out:
return status;
+
+fw_version_err:
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
+ return IXGBE_ERR_EEPROM_VERSION;
}
/**
@@ -2236,6 +2410,112 @@ reset_pipeline_out:
return ret_val;
}
+/**
+ * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ usleep_range(5000, 10000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ usleep_range(5000, 10000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2255,6 +2535,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
+ .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
.set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
@@ -2300,7 +2581,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
static struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82599,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bcdeb89af5..b5c434b617b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,17 +65,42 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
+ bool supported = false;
+ ixgbe_link_speed speed;
+ bool link_up;
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_82599_T3_LOM:
- return 0;
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ true : false;
+ else
+ supported = true;
+ break;
+ case ixgbe_media_type_backplane:
+ supported = true;
+ break;
+ case ixgbe_media_type_copper:
+ /* only some copper devices support flow control autoneg */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ supported = true;
+ break;
+ default:
+ break;
+ }
default:
- return IXGBE_ERR_FC_NOT_SUPPORTED;
+ break;
}
+
+ return supported;
}
/**
@@ -114,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -234,7 +260,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
IXGBE_GSSR_MAC_CSR_SM);
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+ ixgbe_device_supports_autoneg_fc(hw)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
@@ -2380,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2392,7 +2419,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
/* Autoneg flow control on copper adapters */
case ixgbe_media_type_copper:
- if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+ if (ixgbe_device_supports_autoneg_fc(hw))
ret_val = ixgbe_fc_autoneg_copper(hw);
break;
@@ -2479,42 +2506,39 @@ out:
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
- u32 gssr;
+ u32 gssr = 0;
u32 swmask = mask;
u32 fwmask = mask << 5;
- s32 timeout = 200;
+ u32 timeout = 200;
+ u32 i;
- while (timeout) {
+ for (i = 0; i < timeout; i++) {
/*
- * SW EEPROM semaphore bit is used for access to all
- * SW_FW_SYNC/GSSR bits (not just EEPROM)
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_eeprom_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
- if (!(gssr & (fwmask | swmask)))
- break;
-
- /*
- * Firmware currently using resource (fwmask) or other software
- * thread currently using resource (swmask)
- */
- ixgbe_release_eeprom_semaphore(hw);
- usleep_range(5000, 10000);
- timeout--;
- }
-
- if (!timeout) {
- hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
- return IXGBE_ERR_SWFW_SYNC;
+ if (!(gssr & (fwmask | swmask))) {
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ ixgbe_release_eeprom_semaphore(hw);
+ return 0;
+ } else {
+ /* Resource is currently in use by FW or SW */
+ ixgbe_release_eeprom_semaphore(hw);
+ usleep_range(5000, 10000);
+ }
}
- gssr |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ /* If time expired clear the bits holding the lock and retry */
+ if (gssr & (fwmask | swmask))
+ ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
- ixgbe_release_eeprom_semaphore(hw);
- return 0;
+ usleep_range(5000, 10000);
+ return IXGBE_ERR_SWFW_SYNC;
}
/**
@@ -2716,13 +2740,19 @@ out:
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset)
{
+ s32 ret_val;
+
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available.
*/
- hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+ san_mac_offset);
+ if (ret_val)
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ IXGBE_SAN_MAC_ADDR_PTR);
- return 0;
+ return ret_val;
}
/**
@@ -2739,23 +2769,16 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
u8 i;
+ s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available. If they're not, no point in calling set_lan_id() here.
*/
- ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- /*
- * No addresses available in this EEPROM. It's not an
- * error though, so just wipe the local address and return.
- */
- for (i = 0; i < 6; i++)
- san_mac_addr[i] = 0xFF;
-
- goto san_mac_addr_out;
- }
+ goto san_mac_addr_clr;
/* make sure we know which port we need to program */
hw->mac.ops.set_lan_id(hw);
@@ -2763,14 +2786,26 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
(san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
- hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+ &san_mac_data);
+ if (ret_val) {
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ san_mac_offset);
+ goto san_mac_addr_clr;
+ }
san_mac_addr[i * 2] = (u8)(san_mac_data);
san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
san_mac_offset++;
}
-
-san_mac_addr_out:
return 0;
+
+san_mac_addr_clr:
+ /* No addresses available in this EEPROM. It's not necessarily an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+ return ret_val;
}
/**
@@ -3219,8 +3254,9 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
*wwpn_prefix = 0xFFFF;
/* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
+ offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+ goto wwn_prefix_err;
if ((alt_san_mac_blk_offset == 0) ||
(alt_san_mac_blk_offset == 0xFFFF))
@@ -3228,19 +3264,26 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
/* check capability in alternative san mac address block */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
+ if (hw->eeprom.ops.read(hw, offset, &caps))
+ goto wwn_prefix_err;
if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
goto wwn_prefix_out;
/* get the corresponding prefix for WWNN/WWPN */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+ if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+ if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+ goto wwn_prefix_err;
wwn_prefix_out:
return 0;
+
+wwn_prefix_err:
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
+ return 0;
}
/**
@@ -3754,7 +3797,11 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
u8 sensor_index;
u8 sensor_location;
- hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
+ if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ ets_offset + 1 + i);
+ continue;
+ }
sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
IXGBE_ETS_DATA_INDEX_SHIFT);
sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 22eee38868f..d259dc76604 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -143,8 +143,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
+
#define hw_dbg(hw, format, arg...) \
- netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
+ netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg)
+#define hw_err(hw, format, arg...) \
+ netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 24e2e7aafda..e8649abf97c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -160,6 +160,13 @@ static int ixgbe_get_settings(struct net_device *netdev,
bool autoneg = false;
bool link_up;
+ /* SFP type is needed for get_link_capabilities */
+ if (hw->phy.media_type & (ixgbe_media_type_fiber |
+ ixgbe_media_type_fiber_qsfp)) {
+ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+ hw->phy.ops.identify_sfp(hw);
+ }
+
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
@@ -186,6 +193,11 @@ static int ixgbe_get_settings(struct net_device *netdev,
ecmd->advertising |= ADVERTISED_1000baseT_Full;
if (supported_link & IXGBE_LINK_SPEED_100_FULL)
ecmd->advertising |= ADVERTISED_100baseT_Full;
+
+ if (hw->phy.multispeed_fiber && !autoneg) {
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
}
if (autoneg) {
@@ -311,12 +323,17 @@ static int ixgbe_set_settings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (ecmd->autoneg == AUTONEG_DISABLE)
- return -EINVAL;
-
if (ecmd->advertising & ~ecmd->supported)
return -EINVAL;
+ /* only allow one speed at a time if no autoneg */
+ if (!ecmd->autoneg && hw->phy.multispeed_fiber) {
+ if (ecmd->advertising ==
+ (ADVERTISED_10000baseT_Full |
+ ADVERTISED_1000baseT_Full))
+ return -EINVAL;
+ }
+
old = hw->phy.autoneg_advertised;
advertised = 0;
if (ecmd->advertising & ADVERTISED_10000baseT_Full)
@@ -355,10 +372,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- if (hw->fc.disable_fc_autoneg)
- pause->autoneg = 0;
- else
+ if (ixgbe_device_supports_autoneg_fc(hw) &&
+ !hw->fc.disable_fc_autoneg)
pause->autoneg = 1;
+ else
+ pause->autoneg = 0;
if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
@@ -384,7 +402,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
/* some devices do not support autoneg of link flow control */
if ((pause->autoneg == AUTONEG_ENABLE) &&
- (ixgbe_device_supports_autoneg_fc(hw) != 0))
+ !ixgbe_device_supports_autoneg_fc(hw))
return -EINVAL;
fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
@@ -1048,7 +1066,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
+ for (j = 0; j < netdev->num_tx_queues; j++) {
ring = adapter->tx_ring[j];
if (!ring) {
data[i] = 0;
@@ -1140,11 +1158,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
- sprintf(p, "tx_q_%u_napi_yield", i);
+ sprintf(p, "tx_queue_%u_ll_napi_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_q_%u_misses", i);
+ sprintf(p, "tx_queue_%u_ll_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_q_%u_cleaned", i);
+ sprintf(p, "tx_queue_%u_ll_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
}
@@ -1154,11 +1172,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
- sprintf(p, "rx_q_%u_ll_poll_yield", i);
+ sprintf(p, "rx_queue_%u_ll_poll_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_q_%u_misses", i);
+ sprintf(p, "rx_queue_%u_ll_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_q_%u_cleaned", i);
+ sprintf(p, "rx_queue_%u_ll_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
}
@@ -1807,6 +1825,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
unsigned int size = 1024;
netdev_tx_t tx_ret_val;
struct sk_buff *skb;
+ u32 flags_orig = adapter->flags;
+
+ /* DCB can modify the frames on Tx */
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
/* allocate test skb */
skb = alloc_skb(size, GFP_KERNEL);
@@ -1859,6 +1881,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
/* free the original skb */
kfree_skb(skb);
+ adapter->flags = flags_orig;
return ret_val;
}
@@ -1884,11 +1907,12 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ struct ixgbe_hw *hw = &adapter->hw;
+
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
int i;
for (i = 0; i < adapter->num_vfs; i++) {
@@ -1912,21 +1936,18 @@ static void ixgbe_diag_test(struct net_device *netdev,
/* Offline tests */
e_info(hw, "offline testing starting\n");
- if (if_running)
- /* indicate we're in test mode */
- dev_close(netdev);
-
- /* bringing adapter down disables SFP+ optics */
- if (hw->mac.ops.enable_tx_laser)
- hw->mac.ops.enable_tx_laser(hw);
-
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- ixgbe_reset(adapter);
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbe_reset(adapter);
+
e_info(hw, "register testing starting\n");
if (ixgbe_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1963,13 +1984,11 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
+ else if (hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
} else {
e_info(hw, "online testing starting\n");
- /* if adapter is down, SFP+ optics will be disabled */
- if (!if_running && hw->mac.ops.enable_tx_laser)
- hw->mac.ops.enable_tx_laser(hw);
-
/* Online tests */
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1983,9 +2002,6 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
- /* if adapter was down, ensure SFP+ optics are disabled again */
- if (!if_running && hw->mac.ops.disable_tx_laser)
- hw->mac.ops.disable_tx_laser(hw);
skip_ol_tests:
msleep_interruptible(4 * 1000);
}
@@ -2909,33 +2925,21 @@ static int ixgbe_get_module_info(struct net_device *dev,
struct ixgbe_hw *hw = &adapter->hw;
u32 status;
u8 sff8472_rev, addr_mode;
- int ret_val = 0;
bool page_swap = false;
- /* avoid concurent i2c reads */
- while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
- msleep(100);
-
- /* used by the service task */
- set_bit(__IXGBE_READ_I2C, &adapter->state);
-
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
- if (status != 0) {
- ret_val = -EIO;
- goto err_out;
- }
+ if (status != 0)
+ return -EIO;
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
- if (status != 0) {
- ret_val = -EIO;
- goto err_out;
- }
+ if (status != 0)
+ return -EIO;
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
@@ -2952,9 +2956,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
-err_out:
- clear_bit(__IXGBE_READ_I2C, &adapter->state);
- return ret_val;
+ return 0;
}
static int ixgbe_get_module_eeprom(struct net_device *dev,
@@ -2966,51 +2968,27 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
- int ret_val = 0;
- /* ixgbe_get_module_info is called before this function in all
- * cases, so we do not need any checks we already do above,
- * and can trust ee->len to be a known value.
- */
+ if (ee->len == 0)
+ return -EINVAL;
- while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
- msleep(100);
- set_bit(__IXGBE_READ_I2C, &adapter->state);
-
- /* Read the first block, SFF-8079 */
- for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
- status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
- if (status != 0) {
- /* Error occured while reading module */
- ret_val = -EIO;
- goto err_out;
- }
- data[i] = databyte;
- }
-
- /* If the second block is requested, check if SFF-8472 is supported. */
- if (ee->len == ETH_MODULE_SFF_8472_LEN) {
- if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
- return -EOPNOTSUPP;
-
- /* Read the second block, SFF-8472 */
- for (i = ETH_MODULE_SFF_8079_LEN;
- i < ETH_MODULE_SFF_8472_LEN; i++) {
- status = hw->phy.ops.read_i2c_sff8472(hw,
- i - ETH_MODULE_SFF_8079_LEN, &databyte);
- if (status != 0) {
- /* Error occured while reading module */
- ret_val = -EIO;
- goto err_out;
- }
- data[i] = databyte;
- }
- }
+ for (i = ee->offset; i < ee->offset + ee->len; i++) {
+ /* I2C reads can take long time */
+ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return -EBUSY;
-err_out:
- clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ if (i < ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
- return ret_val;
+ if (status != 0)
+ return -EIO;
+
+ data[i - ee->offset] = databyte;
+ }
+
+ return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index be4b1fb3d0d..0ade0cd5ef5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.13.10-k"
+#define DRV_VERSION "3.15.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2013 Intel Corporation.";
@@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
@@ -195,6 +196,86 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
return 0;
}
+/**
+ * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
+ * @hw: hw specific details
+ *
+ * This function is used by probe to determine whether a device's PCI-Express
+ * bandwidth details should be gathered from the parent bus instead of from the
+ * device. Used to ensure that various locations all have the correct device ID
+ * checks.
+ */
+static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+{
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
+ int expected_gts)
+{
+ int max_gts = 0;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ struct pci_dev *pdev;
+
+ /* determine whether to use the the parent device
+ */
+ if (ixgbe_pcie_from_parent(&adapter->hw))
+ pdev = adapter->pdev->bus->parent->self;
+ else
+ pdev = adapter->pdev;
+
+ if (pcie_get_minimum_link(pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 2 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 4 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ /* 128b/130b encoding only reduces throughput by 1% */
+ max_gts = 8 * width;
+ break;
+ default:
+ e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ e_dev_info("PCI Express bandwidth of %dGT/s available\n",
+ max_gts);
+ e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ "Unknown"),
+ width,
+ (speed == PCIE_SPEED_2_5GT ? "20%" :
+ speed == PCIE_SPEED_5_0GT ? "20%" :
+ speed == PCIE_SPEED_8_0GT ? "N/a" :
+ "Unknown"));
+
+ if (max_gts < expected_gts) {
+ e_dev_warn("This is not sufficient for optimal performance of this card.\n");
+ e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
+ expected_gts);
+ e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
+ }
+}
+
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -3490,7 +3571,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int i;
- u32 rxctrl;
+ u32 rxctrl, rfctl;
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -3499,6 +3580,13 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
ixgbe_setup_psrtype(adapter);
ixgbe_setup_rdrxctl(adapter);
+ /* RSC Setup */
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ rfctl &= ~IXGBE_RFCTL_RSC_DIS;
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
+
/* Program registers for the distribution of queues */
ixgbe_setup_mrqc(adapter);
@@ -3724,8 +3812,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
- /* don't hardware filter vlans in promisc mode */
- ixgbe_vlan_filter_disable(adapter);
+ /* Only disable hardware filter vlans in promiscuous mode
+ * if SR-IOV and VMDQ are disabled - otherwise ensure
+ * that hardware VLAN filters remain enabled.
+ */
+ if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED)))
+ ixgbe_vlan_filter_disable(adapter);
+ else
+ ixgbe_vlan_filter_enable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
@@ -4087,6 +4182,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_passive_unknown:
case ixgbe_phy_sfp_active_unknown:
case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_qsfp_passive_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
return true;
case ixgbe_phy_nl:
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4352,7 +4451,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
if (hw->mac.san_mac_rar_index)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_reset(adapter);
}
@@ -4714,8 +4813,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
- hw->fc.disable_fc_autoneg =
- (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
+ hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
#ifdef CONFIG_PCI_IOV
/* assign number of SR-IOV VFs */
@@ -5205,6 +5303,9 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return retval;
#endif
+ if (hw->mac.ops.stop_link_on_d3)
+ hw->mac.ops.stop_link_on_d3(hw);
+
if (wufc) {
ixgbe_set_rx_mode(netdev);
@@ -5681,7 +5782,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
adapter->last_rx_ptp_check = jiffies;
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
@@ -5727,7 +5828,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Down\n");
@@ -5826,10 +5927,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
- /* concurent i2c reads are not supported */
- if (test_bit(__IXGBE_READ_I2C, &adapter->state))
- return;
-
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -5903,8 +6000,16 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
speed = hw->phy.autoneg_advertised;
- if ((!speed) && (hw->mac.ops.get_link_capabilities))
+ if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
+
+ /* setup the highest link when no autoneg */
+ if (!autoneg) {
+ if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+ speed = IXGBE_LINK_SPEED_10GB_FULL;
+ }
+ }
+
if (hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw, speed, true);
@@ -6038,7 +6143,7 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter);
ixgbe_ptp_rx_hang(adapter);
}
@@ -7247,6 +7352,42 @@ static const struct net_device_ops ixgbe_netdev_ops = {
};
/**
+ * ixgbe_enumerate_functions - Get the number of ports this device has
+ * @adapter: adapter structure
+ *
+ * This function enumerates the phsyical functions co-located on a single slot,
+ * in order to determine how many ports a device has. This is most useful in
+ * determining the required GT/s of PCIe bandwidth necessary for optimal
+ * performance.
+ **/
+static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *entry;
+ int physfns = 0;
+
+ /* Some cards can not use the generic count PCIe functions method, and
+ * so must be hardcoded to the correct value.
+ */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ physfns = 4;
+ break;
+ default:
+ list_for_each(entry, &adapter->pdev->bus_list) {
+ struct pci_dev *pdev =
+ list_entry(entry, struct pci_dev, bus_list);
+ /* don't count virtual functions */
+ if (!pdev->is_virtfn)
+ physfns++;
+ }
+ }
+
+ return physfns;
+}
+
+/**
* ixgbe_wol_supported - Check whether device supports WoL
* @hw: hw specific details
* @device_id: the device ID
@@ -7328,7 +7469,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
- int i, err, pci_using_dac;
+ int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
#ifdef IXGBE_FCOE
@@ -7483,10 +7624,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->mac.type == ixgbe_mac_82598EB) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- e_dev_err("failed to load because an unsupported SFP+ "
- "module type was detected.\n");
- e_dev_err("Reload the driver after installing a supported "
- "module.\n");
+ e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported module.\n");
goto err_sw_init;
} else if (err) {
e_dev_err("HW Init failed: %d\n", err);
@@ -7617,7 +7756,7 @@ skip_sriov:
/* pick up the PCI bus settings for reporting later */
hw->mac.ops.get_bus_info(hw);
- if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
+ if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
/* print bus type/speed/width info */
@@ -7643,12 +7782,20 @@ skip_sriov:
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, part_str);
- if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
- e_dev_warn("PCI-Express bandwidth available for this card is "
- "not sufficient for optimal performance.\n");
- e_dev_warn("For optimal performance a x8 PCI-Express slot "
- "is required.\n");
+ /* calculate the expected PCIe bandwidth required for optimal
+ * performance. Note that some older parts will never have enough
+ * bandwidth due to being older generation PCIe parts. We clamp these
+ * parts to ensure no warning is displayed if it can't be fixed.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
+ break;
+ default:
+ expected_gts = ixgbe_enumerate_functions(adapter) * 10;
+ break;
}
+ ixgbe_check_minimum_link(adapter, expected_gts);
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e5691ccbce9..e4c676006be 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -204,7 +204,83 @@ out:
}
/**
+ * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * the SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ u32 i, data, command;
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address command did not complete.\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY read command didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+
+ return 0;
+}
+
+/**
* ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
* @phy_data: Pointer to read data from PHY register
@@ -212,10 +288,7 @@ out:
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- u32 command;
- u32 i;
- u32 data;
- s32 status = 0;
+ s32 status;
u16 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+ status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
status = IXGBE_ERR_SWFW_SYNC;
+ }
- if (status == 0) {
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+ return status;
+}
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+/**
+ * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ * without SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 i, command;
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address command did not complete.\n");
- status = IXGBE_ERR_PHY;
- }
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the read
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad <<
- IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY read command didn't complete\n");
- status = IXGBE_ERR_PHY;
- } else {
- /*
- * Read operation is complete. Get the data
- * from MSRWD
- */
- data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
- data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
- *phy_data = (u16)(data);
- }
- }
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
- hw->mac.ops.release_swfw_sync(hw, gssr);
+ /* Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
}
- return status;
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
}
/**
* ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * using SWFW lock- this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 5 bit device type
@@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- u32 command;
- u32 i;
- s32 status = 0;
+ s32 status;
u16 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- status = IXGBE_ERR_SWFW_SYNC;
-
- if (status == 0) {
- /* Put the data in the MDI single read and write data register*/
- IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
-
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the write
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad <<
- IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
- }
-
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
}
return status;
@@ -775,6 +791,8 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
* Read control word from PHY init contents offset
*/
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ if (ret_val)
+ goto err_eeprom;
control = (eword & IXGBE_CONTROL_MASK_NL) >>
IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
@@ -787,10 +805,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
case IXGBE_DATA_NL:
hw_dbg(hw, "DATA:\n");
data_offset++;
- hw->eeprom.ops.read(hw, data_offset++,
- &phy_offset);
+ ret_val = hw->eeprom.ops.read(hw, data_offset++,
+ &phy_offset);
+ if (ret_val)
+ goto err_eeprom;
for (i = 0; i < edata; i++) {
- hw->eeprom.ops.read(hw, data_offset, &eword);
+ ret_val = hw->eeprom.ops.read(hw, data_offset,
+ &eword);
+ if (ret_val)
+ goto err_eeprom;
hw->phy.ops.write_reg(hw, phy_offset,
MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
@@ -822,12 +845,42 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
out:
return ret_val;
+
+err_eeprom:
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+ return IXGBE_ERR_PHY;
}
/**
- * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * ixgbe_identify_module_generic - Identifies module type
* @hw: pointer to hardware structure
*
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ status = ixgbe_identify_qsfp_module_generic(hw);
+ break;
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
@@ -1106,6 +1159,197 @@ err_read_i2c_eeprom:
}
/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u16 enforce_sfp = 0;
+ u8 connector = 0;
+ u8 cable_length = 0;
+ u8 device_tech = 0;
+ bool active_cable = false;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ hw->phy.id = identifier;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+ &comp_codes_10g);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
+ &comp_codes_1g);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+ hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+ } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+ } else {
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+ active_cable = true;
+
+ if (!active_cable) {
+ /* check for active DA cables that pre-date
+ * SFF-8436 v3.6
+ */
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CONNECTOR,
+ &connector);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CABLE_LENGTH,
+ &cable_length);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_DEVICE_TECH,
+ &device_tech);
+
+ if ((connector ==
+ IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+ (cable_length > 0) &&
+ ((device_tech >> 4) ==
+ IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+ active_cable = true;
+ }
+
+ if (active_cable) {
+ hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ /* unsupported module type */
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the QSFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor for optical modules */
+ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+ hw->phy.type = ixgbe_phy_qsfp_intel;
+ else
+ hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+ hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+ status = 0;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+ status = 0;
+ } else {
+ hw_dbg(hw,
+ "QSFP module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = 0;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
* @list_offset: offset to the SFP ID list
@@ -1147,7 +1391,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
- hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+ hw_err(hw, "eeprom read at %d failed\n",
+ IXGBE_PHY_INIT_OFFSET_NL);
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ }
if ((!*list_offset) || (*list_offset == 0xFFFF))
return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
@@ -1159,12 +1407,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* Find the matching SFP ID in the EEPROM
* and program the init sequence
*/
- hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ goto err_phy;
while (sfp_id != IXGBE_PHY_INIT_END_NL) {
if (sfp_id == sfp_type) {
(*list_offset)++;
- hw->eeprom.ops.read(hw, *list_offset, data_offset);
+ if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
+ goto err_phy;
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
hw_dbg(hw, "SFP+ module not supported\n");
return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1174,7 +1424,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
} else {
(*list_offset) += 2;
if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
- return IXGBE_ERR_PHY;
+ goto err_phy;
}
}
@@ -1184,6 +1434,10 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
}
return 0;
+
+err_phy:
+ hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
+ return IXGBE_ERR_PHY;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 886a3431cf5..24af12e3719 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -33,17 +33,28 @@
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
/* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER 0x0
-#define IXGBE_SFF_IDENTIFIER_SFP 0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
-#define IXGBE_SFF_1GBE_COMP_CODES 0x6
-#define IXGBE_SFF_10GBE_COMP_CODES 0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
-#define IXGBE_SFF_SFF_8472_SWAP 0x5C
-#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
+#define IXGBE_SFF_QSFP_CONNECTOR 0x82
+#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
+#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
+#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -54,7 +65,14 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -102,6 +120,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -121,7 +143,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 331987d6815..5184e2a1a7d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
ixgbe_ptp_reset(adapter);
- /* set the flag that PTP has been enabled */
- adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+ /* enter the IXGBE_PTP_RUNNING state */
+ set_bit(__IXGBE_PTP_RUNNING, &adapter->state);
return;
}
@@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
*/
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
- /* stop the overflow check task */
- adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
- IXGBE_FLAG2_PTP_PPS_ENABLED);
+ /* Leave the IXGBE_PTP_RUNNING state. */
+ if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
+ return;
+ /* stop the PPS signal */
+ adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
ixgbe_ptp_setup_sdp(adapter);
cancel_work_sync(&adapter->ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1e7d587c4e5..276d7b13533 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
ixgbe_disable_sriov(adapter);
}
-static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *vfdev;
- int dev_id;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- dev_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- dev_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- return false;
- }
-
- /* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
- while (vfdev) {
- /* if we don't own it we don't care */
- if (vfdev->is_virtfn && vfdev->physfn == pdev) {
- /* if it is assigned we cannot release it */
- if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
- }
-
- return false;
-}
-
#endif /* #ifdef CONFIG_PCI_IOV */
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
@@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
* without causing issues, so just leave the hardware
* available but disabled
*/
- if (ixgbe_vfs_are_assigned(adapter)) {
+ if (pci_vfs_assigned(adapter->pdev)) {
e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
return -EPERM;
}
@@ -672,8 +639,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- u32 reg, msgbuf[4];
- u32 reg_offset, vf_shift;
+ u32 reg, reg_offset, vf_shift;
+ u32 msgbuf[4] = {0, 0, 0, 0};
u8 *addr = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -768,6 +735,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
}
+static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 vlvf;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= IXGBE_VLVF_ENTRIES)
+ regindex = -1;
+
+ return regindex;
+}
+
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
@@ -775,6 +765,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
int err;
+ s32 reg_ndx;
+ u32 vlvf;
+ u32 bits;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (adapter->vfinfo[vf].pf_vlan || tcs) {
@@ -790,10 +783,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && adapter->netdev->flags & IFF_PROMISC)
+ err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+
err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!err && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && adapter->netdev->flags & IFF_PROMISC) {
+ reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
+ if (reg_ndx < 0)
+ goto out;
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+ bits &= ~(1 << VMDQ_P(0));
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(reg_ndx * 2) + 1);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(reg_ndx * 2) + 1);
+ bits &= ~(1 << (VMDQ_P(0) - 32));
+ bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+ }
+
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid &&
+ !test_bit(vid, adapter->active_vlans) && !bits)
+ ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+ }
+
+out:
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 70c6aa3d3f9..10775cb9b6d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -69,6 +69,7 @@
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
#define IXGBE_DEV_ID_X540T1 0x1560
/* VF Device IDs */
@@ -1520,9 +1521,11 @@ enum {
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
/* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040
@@ -1593,6 +1596,7 @@ enum {
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
@@ -1857,6 +1861,7 @@ enum {
#define IXGBE_RFCTL_ISCSI_DIS 0x00000001
#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E
#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS 0x00000020
#define IXGBE_RFCTL_NFSW_DIS 0x00000040
#define IXGBE_RFCTL_NFSR_DIS 0x00000080
#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300
@@ -2582,6 +2587,10 @@ enum ixgbe_phy_type {
ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
+ ixgbe_phy_qsfp_passive_unknown,
+ ixgbe_phy_qsfp_active_unknown,
+ ixgbe_phy_qsfp_intel,
+ ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
ixgbe_phy_generic
};
@@ -2622,6 +2631,8 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_fixed,
+ ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
@@ -2838,6 +2849,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
+ void (*stop_link_on_d3)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2885,6 +2897,8 @@ struct ixgbe_phy_operations {
s32 (*reset)(struct ixgbe_hw *);
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
@@ -2953,6 +2967,7 @@ struct ixgbe_phy_info {
bool smart_speed_active;
bool multispeed_fiber;
bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
};
#include "ixgbe_mbx.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f5166ad6bb..59a62bbfb37 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -488,8 +488,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* source pruning.
*/
if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
- !(compare_ether_addr(adapter->netdev->dev_addr,
- eth_hdr(skb)->h_source))) {
+ ether_addr_equal(adapter->netdev->dev_addr,
+ eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 270e65f2110..a36fa80968e 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -996,14 +996,14 @@ static int korina_open(struct net_device *dev)
* that handles the Done Finished
* Ovr and Und Events */
ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
- IRQF_DISABLED, "Korina ethernet Rx", dev);
+ 0, "Korina ethernet Rx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
dev->name, lp->rx_irq);
goto err_release;
}
ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
- IRQF_DISABLED, "Korina ethernet Tx", dev);
+ 0, "Korina ethernet Tx", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
dev->name, lp->tx_irq);
@@ -1012,7 +1012,7 @@ static int korina_open(struct net_device *dev)
/* Install handler for overrun error. */
ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
- IRQF_DISABLED, "Ethernet Overflow", dev);
+ 0, "Ethernet Overflow", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
dev->name, lp->ovr_irq);
@@ -1021,7 +1021,7 @@ static int korina_open(struct net_device *dev)
/* Install handler for underflow error. */
ret = request_irq(lp->und_irq, korina_und_interrupt,
- IRQF_DISABLED, "Ethernet Underflow", dev);
+ 0, "Ethernet Underflow", dev);
if (ret < 0) {
printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
dev->name, lp->und_irq);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index bfdb0686039..6a6c1f76d8e 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -282,8 +282,7 @@ ltq_etop_hw_init(struct net_device *dev)
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
- request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
- "etop_tx", priv);
+ request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
} else if (IS_RX(i)) {
ltq_dma_alloc_rx(&ch->dma);
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -291,8 +290,7 @@ ltq_etop_hw_init(struct net_device *dev)
if (ltq_etop_alloc_skb(ch))
return -ENOMEM;
ch->dma.desc = 0;
- request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
- "etop_rx", priv);
+ request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
}
ch->dma.irq = irq;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c35db735958..7fb5677451f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2641,7 +2641,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
return ret;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
pd->tx_csum_limit : 9 * 1024;
@@ -2833,7 +2833,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct resource *res;
int err;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b017818bcca..e35bac7cfdf 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -79,10 +79,10 @@
#define MVNETA_MAC_ADDR_HIGH 0x2418
#define MVNETA_SDMA_CONFIG 0x241c
#define MVNETA_SDMA_BRST_SIZE_16 4
-#define MVNETA_NO_DESC_SWAP 0x0
#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
#define MVNETA_RX_NO_DATA_SWAP BIT(4)
#define MVNETA_TX_NO_DATA_SWAP BIT(5)
+#define MVNETA_DESC_SWAP BIT(6)
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
#define MVNETA_PORT_STATUS 0x2444
#define MVNETA_TX_IN_PRGRS BIT(1)
@@ -138,7 +138,9 @@
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
#define MVNETA_MIB_COUNTERS_BASE 0x3080
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -264,8 +266,7 @@ struct mvneta_port {
* layout of the transmit and reception DMA descriptors, and their
* layout is therefore defined by the hardware design
*/
-struct mvneta_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
+
#define MVNETA_TX_L3_OFF_SHIFT 0
#define MVNETA_TX_IP_HLEN_SHIFT 8
#define MVNETA_TX_L4_UDP BIT(16)
@@ -280,15 +281,6 @@ struct mvneta_tx_desc {
#define MVNETA_TX_L4_CSUM_FULL BIT(30)
#define MVNETA_TX_L4_CSUM_NOT BIT(31)
- u16 reserverd1; /* csum_l4 (for future use) */
- u16 data_size; /* Data size of transmitted packet in bytes */
- u32 buf_phys_addr; /* Physical addr of transmitted buffer */
- u32 reserved2; /* hw_cmd - (for future use, PMT) */
- u32 reserved3[4]; /* Reserved - (for future use) */
-};
-
-struct mvneta_rx_desc {
- u32 status; /* Info about received packet */
#define MVNETA_RXD_ERR_CRC 0x0
#define MVNETA_RXD_ERR_SUMMARY BIT(16)
#define MVNETA_RXD_ERR_OVERRUN BIT(17)
@@ -299,16 +291,57 @@ struct mvneta_rx_desc {
#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
#define MVNETA_RXD_L4_CSUM_OK BIT(30)
+#if defined(__LITTLE_ENDIAN)
+struct mvneta_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u32 status; /* Info about received packet */
u16 reserved1; /* pnc_info - (for future use, PnC) */
u16 data_size; /* Size of received packet in bytes */
+
u32 buf_phys_addr; /* Physical address of the buffer */
u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+
u32 buf_cookie; /* cookie for access to RX buffer in rx path */
u16 reserved3; /* prefetch_cmd, for future use */
u16 reserved4; /* csum_l4 - (for future use, PnC) */
+
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+#else
+struct mvneta_tx_desc {
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u16 data_size; /* Size of received packet in bytes */
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u32 status; /* Info about received packet */
+
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+ u32 buf_phys_addr; /* Physical address of the buffer */
+
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+
u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
};
+#endif
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
@@ -908,13 +941,22 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Default burst size */
val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
- val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
- MVNETA_NO_DESC_SWAP);
+#if defined(__BIG_ENDIAN)
+ val |= MVNETA_DESC_SWAP;
+#endif
/* Assign port SDMA configuration */
mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+ /* Disable PHY polling in hardware, since we're using the
+ * kernel phylib to do this.
+ */
+ val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
+ val &= ~MVNETA_PHY_POLLING_ENABLE;
+ mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
+
mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1);
@@ -2307,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN);
if (phydev->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2440,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
return 0;
}
+static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ if (!pp->phy_dev)
+ return -ENOTSUPP;
+
+ ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
+ if (!ret)
+ mvneta_adjust_link(dev);
+
+ return ret;
+}
+
/* Ethtool methods */
/* Get settings (phy address, speed) for ethtools */
@@ -2558,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_change_mtu = mvneta_change_mtu,
.ndo_tx_timeout = mvneta_tx_timeout,
.ndo_get_stats64 = mvneta_get_stats64,
+ .ndo_do_ioctl = mvneta_ioctl,
};
const struct ethtool_ops mvneta_eth_tool_ops = {
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index db481477bcc..fff62460185 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -583,10 +583,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
* table is full.
*/
if (pep->htpr == NULL) {
- pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
if (pep->htpr == NULL)
return -ENOMEM;
} else {
@@ -1024,9 +1023,9 @@ static int rxq_init(struct net_device *dev)
pep->rx_desc_count = 0;
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
- pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_rx_desc_area)
goto out;
@@ -1085,9 +1084,9 @@ static int txq_init(struct net_device *dev)
pep->tx_desc_count = 0;
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
- pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_tx_desc_area)
goto out;
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
@@ -1124,8 +1123,7 @@ static int pxa168_eth_open(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int err;
- err = request_irq(dev->irq, pxa168_eth_int_handler,
- IRQF_DISABLED, dev->name, dev);
+ err = request_irq(dev->irq, pxa168_eth_int_handler, 0, dev->name, dev);
if (err) {
dev_err(&dev->dev, "can't assign irq\n");
return -EAGAIN;
@@ -1517,7 +1515,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
eth_hw_addr_random(dev);
- pep->pd = pdev->dev.platform_data;
+ pep->pd = dev_get_platdata(&pdev->dev);
pep->rx_ring_size = NUM_RX_DESCS;
if (pep->pd->rx_queue_size)
pep->rx_ring_size = pep->pd->rx_queue_size;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index ef94a591f9e..ecc7f7b696b 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3086,23 +3086,27 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
+ struct skge_element ee;
struct sk_buff *nskb;
nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
if (!nskb)
goto resubmit;
+ ee = *e;
+
+ skb = ee.skb;
+ prefetch(skb->data);
+
if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
dev_kfree_skb(nskb);
goto resubmit;
}
pci_unmap_single(skge->hw->pdev,
- dma_unmap_addr(e, mapaddr),
- dma_unmap_len(e, maplen),
+ dma_unmap_addr(&ee, mapaddr),
+ dma_unmap_len(&ee, maplen),
PCI_DMA_FROMDEVICE);
- skb = e->skb;
- prefetch(skb->data);
}
skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 299d0184f98..ea20182c696 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,7 +800,16 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
-int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return -EPERM;
+}
+
+static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
@@ -1252,6 +1261,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.wrapper = MLX4_CMD_UPDATE_QP_wrapper
},
{
+ .opcode = MLX4_CMD_GET_OP_REQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
+ },
+ {
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
.has_inbox = false,
.has_outbox = false,
@@ -1526,7 +1544,7 @@ static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
}
-int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
+static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
struct mlx4_vport_oper_state *vp_oper;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 9d4a1ea030d..b4881b68615 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -160,6 +160,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
int err;
@@ -169,15 +170,17 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
pfc->mbc,
pfc->delay);
- priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
- priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+ prof->rx_pause = !pfc->pfc_en;
+ prof->tx_pause = !pfc->pfc_en;
+ prof->rx_ppp = pfc->pfc_en;
+ prof->tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
+ prof->tx_pause,
+ prof->tx_ppp,
+ prof->rx_pause,
+ prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a28cd801a23..0c750985f47 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -53,9 +53,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_cq[i].moder_cnt = priv->tx_frames;
priv->tx_cq[i].moder_time = priv->tx_usecs;
- err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
- if (err)
- return err;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]);
+ if (err)
+ return err;
+ }
}
if (priv->adaptive_rx_coal)
@@ -65,9 +67,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
priv->rx_cq[i].moder_cnt = priv->rx_frames;
priv->rx_cq[i].moder_time = priv->rx_usecs;
priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
- err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
- if (err)
- return err;
+ if (priv->port_up) {
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ return err;
+ }
}
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 7c492382da0..0698c82d6ff 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
+static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, int index,
+ u8 owner)
+{
+ __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ void *end = ring->buf + ring->buf_size;
+ __be32 *ptr = (__be32 *)tx_desc;
+ int i;
+
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ }
+ } else {
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ if ((void *)ptr >= end) {
+ ptr = ring->buf;
+ stamp ^= cpu_to_be32(0x80000000);
+ }
+ }
+ }
+}
+
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
void *end = ring->buf + ring->buf_size;
int frags = skb_shinfo(skb)->nr_frags;
int i;
- __be32 *ptr = (__be32 *)tx_desc;
- __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
struct skb_shared_hwtstamps hwts;
if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- }
-
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
++data;
}
}
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- if ((void *) ptr >= end) {
- ptr = ring->buf;
- stamp ^= cpu_to_be32(0x80000000);
- }
- }
-
}
dev_kfree_skb_any(skb);
return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe;
u16 index;
- u16 new_index, ring_index;
+ u16 new_index, ring_index, stamp_index;
u32 txbbs_skipped = 0;
+ u32 txbbs_stamp = 0;
u32 cons_index = mcq->cons_index;
int size = cq->size;
u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
ring_index = ring->cons & size_mask;
+ stamp_index = ring_index;
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -345,6 +362,15 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
*/
rmb();
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
+
+ en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
+ cqe_err->vendor_err_syndrome,
+ cqe_err->syndrome);
+ }
+
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
@@ -359,6 +385,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
priv, ring, ring_index,
!!((ring->cons + txbbs_skipped) &
ring->size), timestamp);
+
+ mlx4_en_stamp_wqe(priv, ring, stamp_index,
+ !!((ring->cons + txbbs_stamp) &
+ ring->size));
+ stamp_index = ring_index;
+ txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
} while (ring_index != new_index);
@@ -556,17 +588,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
- struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
- struct ethhdr *ethh;
int tx_ind = 0;
int nr_txbb;
int desc_size;
int real_size;
- dma_addr_t dma;
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
@@ -642,6 +672,61 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
+ if (lso_header_size)
+ data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
+ DS_SIZE));
+ else
+ data = &tx_desc->data;
+
+ /* valid only for none inline segments */
+ tx_info->data_offset = (void *)data - (void *)tx_desc;
+
+ tx_info->linear = (lso_header_size < skb_headlen(skb) &&
+ !is_inline(skb, NULL)) ? 1 : 0;
+
+ data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+ if (is_inline(skb, &fragptr)) {
+ tx_info->inl = 1;
+ } else {
+ /* Map fragments */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+
+ frag = &skb_shinfo(skb)->frags[i];
+ dma = skb_frag_dma_map(ddev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(skb_frag_size(frag));
+ --data;
+ }
+
+ /* Map linear part */
+ if (tx_info->linear) {
+ u32 byte_count = skb_headlen(skb) - lso_header_size;
+ dma_addr_t dma;
+
+ dma = dma_map_single(ddev, skb->data +
+ lso_header_size, byte_count,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ }
+ tx_info->inl = 0;
+ }
+
/*
* For timestamping add flag to skb_shinfo and
* set flag for further reference
@@ -666,6 +751,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
+ struct ethhdr *ethh;
+
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
@@ -688,8 +775,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Copy headers;
* note that we already verified that it is linear */
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- data = ((void *) &tx_desc->lso +
- ALIGN(lso_header_size + 4, DS_SIZE));
priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
@@ -701,7 +786,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- data = &tx_desc->data;
tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ring->packets++;
@@ -710,38 +794,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
-
- /* valid only for none inline segments */
- tx_info->data_offset = (void *) data - (void *) tx_desc;
-
- tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
- data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
-
- if (!is_inline(skb, &fragptr)) {
- /* Map fragments */
- for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
- frag = &skb_shinfo(skb)->frags[i];
- dma = skb_frag_dma_map(priv->ddev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_frag_size(frag));
- --data;
- }
-
- /* Map linear part */
- if (tx_info->linear) {
- dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
- skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
- }
- tx_info->inl = 0;
- } else {
+ if (tx_info->inl) {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
tx_info->inl = 1;
}
@@ -781,6 +834,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
+tx_drop_unmap:
+ en_err(priv, "DMA mapping error\n");
+
+ for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
+ data++;
+ dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ }
+
tx_drop:
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 7e042869ef0..0416c5b3b35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -79,6 +79,7 @@ enum {
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
(1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
(1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
(1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
@@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_OP_REQUIRED:
+ atomic_inc(&priv->opreq_count);
+ /* FW commands can't be executed from interrupt context
+ * working in deferred task
+ */
+ queue_work(mlx4_wq, &priv->opreq_task);
+ break;
+
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Received comm channel event "
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 6fc6dabc78d..0d63daa2f42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1696,3 +1696,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
+
+enum {
+ ADD_TO_MCG = 0x26,
+};
+
+
+void mlx4_opreq_action(struct work_struct *work)
+{
+ struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
+ opreq_task);
+ struct mlx4_dev *dev = &priv->dev;
+ int num_tasks = atomic_read(&priv->opreq_count);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 *outbox;
+ u32 modifier;
+ u16 token;
+ u16 type_m;
+ u16 type;
+ int err;
+ u32 num_qps;
+ struct mlx4_qp qp;
+ int i;
+ u8 rem_mcg;
+ u8 prot;
+
+#define GET_OP_REQ_MODIFIER_OFFSET 0x08
+#define GET_OP_REQ_TOKEN_OFFSET 0x14
+#define GET_OP_REQ_TYPE_OFFSET 0x1a
+#define GET_OP_REQ_DATA_OFFSET 0x20
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
+ return;
+ }
+ outbox = mailbox->buf;
+
+ while (num_tasks) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to retreive required operation: %d\n",
+ err);
+ return;
+ }
+ MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
+ MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
+ MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
+ type_m = type >> 12;
+ type &= 0xfff;
+
+ switch (type) {
+ case ADD_TO_MCG:
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
+ err = EPERM;
+ break;
+ }
+ mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
+ GET_OP_REQ_DATA_OFFSET);
+ num_qps = be32_to_cpu(mgm->members_count) &
+ MGM_QPN_MASK;
+ rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
+ prot = ((u8 *)(&mgm->members_count))[0] >> 6;
+
+ for (i = 0; i < num_qps; i++) {
+ qp.qpn = be32_to_cpu(mgm->qp[i]);
+ if (rem_mcg)
+ err = mlx4_multicast_detach(dev, &qp,
+ mgm->gid,
+ prot, 0);
+ else
+ err = mlx4_multicast_attach(dev, &qp,
+ mgm->gid,
+ mgm->gid[5]
+ , 0, prot,
+ NULL);
+ if (err)
+ break;
+ }
+ break;
+ default:
+ mlx4_warn(dev, "Bad type for required operation\n");
+ err = EINVAL;
+ break;
+ }
+ err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+ 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to acknowledge required request: %d\n",
+ err);
+ goto out;
+ }
+ memset(outbox, 0, 0xffc);
+ num_tasks = atomic_dec_return(&priv->opreq_count);
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index fdf41665a05..a0a368b7c93 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
int mlx4_NOP(struct mlx4_dev *dev);
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
+void mlx4_opreq_action(struct work_struct *work);
#endif /* MLX4_FW_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 36be3208786..60c9f4f103f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_xrcd_table_free;
}
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+ goto err_mr_table_free;
+ }
+ }
+
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
- goto err_mr_table_free;
+ goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
@@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- if (!mlx4_is_slave(dev)) {
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
- }
- }
-
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_mcg_table_free;
+ goto err_qp_table_free;
}
if (!mlx4_is_slave(dev)) {
@@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1821,6 +1817,10 @@ err_cmd_poll:
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
+err_mcg_table_free:
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_mcg_table(dev);
+
err_mr_table_free:
mlx4_cleanup_mr_table(dev);
@@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
}
}
+ atomic_set(&priv->opreq_count, 0);
+ INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
+
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
@@ -2315,12 +2318,12 @@ err_port:
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
@@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
RES_TR_FREE_SLAVES_ONLY);
mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f3e804f2a35..55f6245efb6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,19 +39,8 @@
#include "mlx4.h"
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
-
static const u8 zero_gid[16]; /* automatically initialized to 0 */
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_MAX_QP_PER_MGM];
-};
-
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
return 1 << dev->oper_log_mgm_entry_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 17d9277e33e..348bb8c7d9a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -554,6 +554,17 @@ struct mlx4_mfunc {
struct mlx4_mfunc_master_ctx master;
};
+#define MGM_QPN_MASK 0x00FFFFFF
+#define MGM_BLCK_LB_BIT 30
+
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
@@ -802,6 +813,8 @@ struct mlx4_priv {
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
__be64 slave_node_guids[MLX4_MFUNC_MAX];
+ atomic_t opreq_count;
+ struct work_struct opreq_task;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index f984a89c27d..dd687632111 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1909,7 +1909,8 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int log_rq_stride = qpc->rq_size_stride & 7;
int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
- int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
int sq_size;
int rq_size;
int total_pages;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 21962828925..157fe8df2c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -6,13 +6,3 @@ config MLX5_CORE
tristate
depends on PCI && X86
default n
-
-config MLX5_DEBUG
- bool "Verbose debugging output" if (MLX5_CORE && EXPERT)
- depends on MLX5_CORE
- default y
- ---help---
- This option causes debugging code to be compiled into the
- mlx5_core driver. The output can be turned on via the
- debug_mask module parameter (which can also be set after
- the driver is loaded through sysfs).
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 5472cbd3402..6ca30739625 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
return 0;
}
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+ int csum)
{
block->token = token;
- block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
- block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+ if (csum) {
+ block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+ sizeof(block->data) - 2);
+ block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+ }
}
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
{
struct mlx5_cmd_mailbox *next = msg->next;
while (next) {
- calc_block_sig(next->buf, token);
+ calc_block_sig(next->buf, token, csum);
next = next->next;
}
}
-static void set_signature(struct mlx5_cmd_work_ent *ent)
+static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
- calc_chain_sig(ent->in, ent->token);
- calc_chain_sig(ent->out, ent->token);
+ calc_chain_sig(ent->in, ent->token, csum);
+ calc_chain_sig(ent->out, ent->token, csum);
}
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
lay->type = MLX5_PCI_CMD_XPORT;
lay->token = ent->token;
lay->status_own = CMD_OWNER_HW;
- if (!cmd->checksum_disabled)
- set_signature(ent);
+ set_signature(ent, !cmd->checksum_disabled);
dump_command(dev, ent, 1);
ktime_get_ts(&ent->ts1);
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
block = next->buf;
- if (xor8_buf(block, sizeof(*block)) != 0xff)
- return -EINVAL;
memcpy(to, block->data, copy);
to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_map;
}
+ cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
- case MLX5_CMD_STAT_LIM_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 443cc4d7b02..2231d93cc7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
+ snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+ name, pci_name(dev->pdev));
eq->eqn = out.eq_number;
err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
- name, eq);
+ eq->name, eq);
if (err)
goto err_eq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b47739b0b5f..bc0f5fb66e2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
struct mlx5_cmd_set_hca_cap_mbox_out set_out;
- struct mlx5_profile *prof = dev->profile;
u64 flags;
- int csum = 1;
int err;
memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
sizeof(set_ctx->hca_cap));
- if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
- csum = !!prof->cmdif_csum;
- flags = be64_to_cpu(set_ctx->hca_cap.flags);
- if (csum)
- flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
- else
- flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-
- set_ctx->hca_cap.flags = cpu_to_be64(flags);
- }
-
if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
+ flags = be64_to_cpu(query_out->hca_cap.flags);
+ /* disable checksum */
+ flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+
+ set_ctx->hca_cap.flags = cpu_to_be64(flags);
memset(&set_out, 0, sizeof(set_out));
set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
if (err)
goto query_ex;
- if (!csum)
- dev->cmd.checksum_disabled = 1;
-
query_ex:
kfree(query_out);
kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 3a2408d4482..7b12acf210f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
__be64 pas[0];
};
+enum {
+ MAX_RECLAIM_TIME_MSECS = 5000,
+};
+
static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
{
struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int err;
int i;
+ if (nclaimed)
+ *nclaimed = 0;
+
memset(&in, 0, sizeof(in));
outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
{
- unsigned long end = jiffies + msecs_to_jiffies(5000);
+ unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
struct fw_page *fwp;
struct rb_node *p;
+ int nclaimed = 0;
int err;
do {
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
return err;
}
+ if (nclaimed)
+ end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
}
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index e393d998be8..0951f7aca1e 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -705,7 +705,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
}
-void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_rx(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
{
u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
@@ -715,7 +716,8 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
}
}
-void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_tx(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
{
u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
@@ -724,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
netif_wake_queue(netdev);
}
-void ks8842_handle_rx_overrun(struct net_device *netdev,
+static void ks8842_handle_rx_overrun(struct net_device *netdev,
struct ks8842_adapter *adapter)
{
netdev_dbg(netdev, "%s: entry\n", __func__);
@@ -732,7 +734,7 @@ void ks8842_handle_rx_overrun(struct net_device *netdev,
netdev->stats.rx_fifo_errors++;
}
-void ks8842_tasklet(unsigned long arg)
+static void ks8842_tasklet(unsigned long arg)
{
struct net_device *netdev = (struct net_device *)arg;
struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1146,7 +1148,7 @@ static int ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
- struct ks8842_platform_data *pdata = pdev->dev.platform_data;
+ struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
u16 id;
unsigned i;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ac20098b542..075f4e21d33 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -688,7 +688,7 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
}
-void ks_enable_qmu(struct ks_net *ks)
+static void ks_enable_qmu(struct ks_net *ks)
{
u16 w;
@@ -915,7 +915,7 @@ static int ks_net_open(struct net_device *netdev)
struct ks_net *ks = netdev_priv(netdev);
int err;
-#define KS_INT_FLAGS (IRQF_DISABLED|IRQF_TRIGGER_LOW)
+#define KS_INT_FLAGS IRQF_TRIGGER_LOW
/* lock the card, even if we may not actually do anything
* else at the moment.
*/
@@ -1636,7 +1636,7 @@ static int ks8851_probe(struct platform_device *pdev)
} else {
struct ks8851_mll_platform_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
netdev_err(netdev, "No platform data\n");
err = -ENODEV;
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
new file mode 100644
index 00000000000..1731e050fa2
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Kconfig
@@ -0,0 +1,30 @@
+#
+# MOXART device configuration
+#
+
+config NET_VENDOR_MOXART
+ bool "MOXA ART devices"
+ default y
+ depends on (ARM && ARCH_MOXART)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about MOXA ART devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MOXART
+
+config ARM_MOXART_ETHER
+ tristate "MOXART Ethernet support"
+ depends on ARM && ARCH_MOXART
+ select NET_CORE
+ ---help---
+ If you wish to compile a kernel for a hardware with MOXA ART SoC and
+ want to use the internal ethernet then you should answer Y to this.
+
+
+endif # NET_VENDOR_MOXART
diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile
new file mode 100644
index 00000000000..aa3c73e9e95
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the MOXART network device drivers.
+#
+
+obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
new file mode 100644
index 00000000000..bd1a2d2bc2a
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -0,0 +1,559 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/dma-mapping.h>
+
+#include "moxart_ether.h"
+
+static inline void moxart_emac_write(struct net_device *ndev,
+ unsigned int reg, unsigned long value)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(value, priv->base + reg);
+}
+
+static void moxart_update_mac_address(struct net_device *ndev)
+{
+ moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
+ ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
+ moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
+ ((ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ (ndev->dev_addr[5])));
+}
+
+static int moxart_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+ moxart_update_mac_address(ndev);
+
+ return 0;
+}
+
+static void moxart_mac_free_memory(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
+ if (priv->tx_desc_base)
+ dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
+ priv->tx_desc_base, priv->tx_base);
+
+ if (priv->rx_desc_base)
+ dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
+ priv->rx_desc_base, priv->rx_base);
+
+ kfree(priv->tx_buf_base);
+ kfree(priv->rx_buf_base);
+}
+
+static void moxart_mac_reset(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(SW_RST, priv->base + REG_MAC_CTRL);
+ while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
+ mdelay(10);
+
+ writel(0, priv->base + REG_INTERRUPT_MASK);
+
+ priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
+}
+
+static void moxart_mac_enable(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
+ writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
+ writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
+
+ priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+ priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
+ writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+}
+
+static void moxart_mac_setup_desc_ring(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ void __iomem *desc;
+ int i;
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
+ memset(desc, 0, TX_REG_DESC_SIZE);
+
+ priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
+ }
+ writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
+
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
+ memset(desc, 0, RX_REG_DESC_SIZE);
+ writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+ writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
+ desc + RX_REG_OFFSET_DESC1);
+
+ priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
+ priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_buf[i],
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ netdev_err(ndev, "DMA mapping error\n");
+
+ writel(priv->rx_mapping[i],
+ desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
+ writel(priv->rx_buf[i],
+ desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
+ }
+ writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
+
+ priv->rx_head = 0;
+
+ /* reset the MAC controler TX/RX desciptor base address */
+ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
+ writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
+}
+
+static int moxart_mac_open(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ napi_enable(&priv->napi);
+
+ moxart_mac_reset(ndev);
+ moxart_update_mac_address(ndev);
+ moxart_mac_setup_desc_ring(ndev);
+ moxart_mac_enable(ndev);
+ netif_start_queue(ndev);
+
+ netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
+ __func__, readl(priv->base + REG_INTERRUPT_MASK),
+ readl(priv->base + REG_MAC_CTRL));
+
+ return 0;
+}
+
+static int moxart_mac_stop(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ napi_disable(&priv->napi);
+
+ netif_stop_queue(ndev);
+
+ /* disable all interrupts */
+ writel(0, priv->base + REG_INTERRUPT_MASK);
+
+ /* disable all functions */
+ writel(0, priv->base + REG_MAC_CTRL);
+
+ return 0;
+}
+
+static int moxart_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct moxart_mac_priv_t *priv = container_of(napi,
+ struct moxart_mac_priv_t,
+ napi);
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ void __iomem *desc;
+ unsigned int desc0, len;
+ int rx_head = priv->rx_head;
+ int rx = 0;
+
+ while (1) {
+ desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
+ desc0 = readl(desc + RX_REG_OFFSET_DESC0);
+
+ if (desc0 & RX_DESC0_DMA_OWN)
+ break;
+
+ if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
+ RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
+ net_dbg_ratelimited("packet error\n");
+ priv->stats.rx_dropped++;
+ priv->stats.rx_errors++;
+ continue;
+ }
+
+ len = desc0 & RX_DESC0_FRAME_LEN_MASK;
+
+ if (len > RX_BUF_SIZE)
+ len = RX_BUF_SIZE;
+
+ skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+ if (unlikely(!skb)) {
+ net_dbg_ratelimited("build_skb failed\n");
+ priv->stats.rx_dropped++;
+ priv->stats.rx_errors++;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+ rx++;
+
+ ndev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+ if (desc0 & RX_DESC0_MULTICAST)
+ priv->stats.multicast++;
+
+ writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+
+ rx_head = RX_NEXT(rx_head);
+ priv->rx_head = rx_head;
+
+ if (rx >= budget)
+ break;
+ }
+
+ if (rx < budget) {
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
+ }
+
+ priv->reg_imr |= RPKT_FINISH_M;
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+ return rx;
+}
+
+static void moxart_tx_finished(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ unsigned tx_head = priv->tx_head;
+ unsigned tx_tail = priv->tx_tail;
+
+ while (tx_tail != tx_head) {
+ dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ priv->tx_len[tx_tail], DMA_TO_DEVICE);
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
+
+ dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
+ priv->tx_skb[tx_tail] = NULL;
+
+ tx_tail = TX_NEXT(tx_tail);
+ }
+ priv->tx_tail = tx_tail;
+}
+
+static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *) dev_id;
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
+
+ if (ists & XPKT_OK_INT_STS)
+ moxart_tx_finished(ndev);
+
+ if (ists & RPKT_FINISH) {
+ if (napi_schedule_prep(&priv->napi)) {
+ priv->reg_imr &= ~RPKT_FINISH_M;
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ void __iomem *desc;
+ unsigned int len;
+ unsigned int tx_head = priv->tx_head;
+ u32 txdes1;
+ int ret = NETDEV_TX_BUSY;
+
+ desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
+
+ spin_lock_irq(&priv->txlock);
+ if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
+ net_dbg_ratelimited("no TX space for packet\n");
+ priv->stats.tx_dropped++;
+ goto out_unlock;
+ }
+
+ len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
+
+ priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out_unlock;
+ }
+
+ priv->tx_len[tx_head] = len;
+ priv->tx_skb[tx_head] = skb;
+
+ writel(priv->tx_mapping[tx_head],
+ desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
+ writel(skb->data,
+ desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
+
+ if (skb->len < ETH_ZLEN) {
+ memset(&skb->data[skb->len],
+ 0, ETH_ZLEN - skb->len);
+ len = ETH_ZLEN;
+ }
+
+ txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
+ txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
+ txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
+ txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+ writel(txdes1, desc + TX_REG_OFFSET_DESC1);
+ writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
+
+ /* start to send packet */
+ writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
+
+ priv->tx_head = TX_NEXT(tx_head);
+
+ ndev->trans_start = jiffies;
+ ret = NETDEV_TX_OK;
+out_unlock:
+ spin_unlock_irq(&priv->txlock);
+
+ return ret;
+}
+
+static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ return &priv->stats;
+}
+
+static void moxart_mac_setmulticast(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ int crc_val;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
+ crc_val = (crc_val >> 26) & 0x3f;
+ if (crc_val >= 32) {
+ writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
+ (1UL << (crc_val - 32)),
+ priv->base + REG_MCAST_HASH_TABLE1);
+ } else {
+ writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
+ (1UL << crc_val),
+ priv->base + REG_MCAST_HASH_TABLE0);
+ }
+ }
+}
+
+static void moxart_mac_set_rx_mode(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ spin_lock_irq(&priv->txlock);
+
+ (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
+ (priv->reg_maccr &= ~RCV_ALL);
+
+ (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
+ (priv->reg_maccr &= ~RX_MULTIPKT);
+
+ if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
+ priv->reg_maccr |= HT_MULTI_EN;
+ moxart_mac_setmulticast(ndev);
+ } else {
+ priv->reg_maccr &= ~HT_MULTI_EN;
+ }
+
+ writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+
+ spin_unlock_irq(&priv->txlock);
+}
+
+static struct net_device_ops moxart_netdev_ops = {
+ .ndo_open = moxart_mac_open,
+ .ndo_stop = moxart_mac_stop,
+ .ndo_start_xmit = moxart_mac_start_xmit,
+ .ndo_get_stats = moxart_mac_get_stats,
+ .ndo_set_rx_mode = moxart_mac_set_rx_mode,
+ .ndo_set_mac_address = moxart_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int moxart_mac_probe(struct platform_device *pdev)
+{
+ struct device *p_dev = &pdev->dev;
+ struct device_node *node = p_dev->of_node;
+ struct net_device *ndev;
+ struct moxart_mac_priv_t *priv;
+ struct resource *res;
+ unsigned int irq;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
+ if (!ndev)
+ return -ENOMEM;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ netdev_err(ndev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ndev->base_addr = res->start;
+ priv->base = devm_ioremap_resource(p_dev, res);
+ ret = IS_ERR(priv->base);
+ if (ret) {
+ dev_err(p_dev, "devm_ioremap_resource failed\n");
+ goto init_fail;
+ }
+
+ spin_lock_init(&priv->txlock);
+
+ priv->tx_buf_size = TX_BUF_SIZE;
+ priv->rx_buf_size = RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
+ TX_DESC_NUM, &priv->tx_base,
+ GFP_DMA | GFP_KERNEL);
+ if (priv->tx_desc_base == NULL)
+ goto init_fail;
+
+ priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
+ RX_DESC_NUM, &priv->rx_base,
+ GFP_DMA | GFP_KERNEL);
+ if (priv->rx_desc_base == NULL)
+ goto init_fail;
+
+ priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
+ GFP_ATOMIC);
+ if (!priv->tx_buf_base)
+ goto init_fail;
+
+ priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
+ GFP_ATOMIC);
+ if (!priv->rx_buf_base)
+ goto init_fail;
+
+ platform_set_drvdata(pdev, ndev);
+
+ ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
+ pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto init_fail;
+ }
+
+ ether_setup(ndev);
+ ndev->netdev_ops = &moxart_netdev_ops;
+ netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->irq = irq;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ free_netdev(ndev);
+ goto init_fail;
+ }
+
+ netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
+ __func__, ndev->irq, ndev->dev_addr);
+
+ return 0;
+
+init_fail:
+ netdev_err(ndev, "init failed\n");
+ moxart_mac_free_memory(ndev);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_netdev(ndev);
+ free_irq(ndev->irq, ndev);
+ moxart_mac_free_memory(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_mac_match[] = {
+ { .compatible = "moxa,moxart-mac" },
+ { }
+};
+
+static struct platform_driver moxart_mac_driver = {
+ .probe = moxart_mac_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-ethernet",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_mac_match,
+ },
+};
+module_platform_driver(moxart_mac_driver);
+
+MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
new file mode 100644
index 00000000000..2be9280d608
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -0,0 +1,330 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _MOXART_ETHERNET_H
+#define _MOXART_ETHERNET_H
+
+#define TX_REG_OFFSET_DESC0 0
+#define TX_REG_OFFSET_DESC1 4
+#define TX_REG_OFFSET_DESC2 8
+#define TX_REG_DESC_SIZE 16
+
+#define RX_REG_OFFSET_DESC0 0
+#define RX_REG_OFFSET_DESC1 4
+#define RX_REG_OFFSET_DESC2 8
+#define RX_REG_DESC_SIZE 16
+
+#define TX_DESC0_PKT_LATE_COL 0x1 /* abort, late collision */
+#define TX_DESC0_RX_PKT_EXS_COL 0x2 /* abort, >16 collisions */
+#define TX_DESC0_DMA_OWN 0x80000000 /* owned by controller */
+#define TX_DESC1_BUF_SIZE_MASK 0x7ff
+#define TX_DESC1_LTS 0x8000000 /* last TX packet */
+#define TX_DESC1_FTS 0x10000000 /* first TX packet */
+#define TX_DESC1_FIFO_COMPLETE 0x20000000
+#define TX_DESC1_INTR_COMPLETE 0x40000000
+#define TX_DESC1_END 0x80000000
+#define TX_DESC2_ADDRESS_PHYS 0
+#define TX_DESC2_ADDRESS_VIRT 4
+
+#define RX_DESC0_FRAME_LEN 0
+#define RX_DESC0_FRAME_LEN_MASK 0x7FF
+#define RX_DESC0_MULTICAST 0x10000
+#define RX_DESC0_BROADCAST 0x20000
+#define RX_DESC0_ERR 0x40000
+#define RX_DESC0_CRC_ERR 0x80000
+#define RX_DESC0_FTL 0x100000
+#define RX_DESC0_RUNT 0x200000 /* packet less than 64 bytes */
+#define RX_DESC0_ODD_NB 0x400000 /* receive odd nibbles */
+#define RX_DESC0_LRS 0x10000000 /* last receive segment */
+#define RX_DESC0_FRS 0x20000000 /* first receive segment */
+#define RX_DESC0_DMA_OWN 0x80000000
+#define RX_DESC1_BUF_SIZE_MASK 0x7FF
+#define RX_DESC1_END 0x80000000
+#define RX_DESC2_ADDRESS_PHYS 0
+#define RX_DESC2_ADDRESS_VIRT 4
+
+#define TX_DESC_NUM 64
+#define TX_DESC_NUM_MASK (TX_DESC_NUM-1)
+#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
+#define TX_BUF_SIZE 1600
+#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
+
+#define RX_DESC_NUM 64
+#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
+#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK))
+#define RX_BUF_SIZE 1600
+#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1)
+
+#define REG_INTERRUPT_STATUS 0
+#define REG_INTERRUPT_MASK 4
+#define REG_MAC_MS_ADDRESS 8
+#define REG_MAC_LS_ADDRESS 12
+#define REG_MCAST_HASH_TABLE0 16
+#define REG_MCAST_HASH_TABLE1 20
+#define REG_TX_POLL_DEMAND 24
+#define REG_RX_POLL_DEMAND 28
+#define REG_TXR_BASE_ADDRESS 32
+#define REG_RXR_BASE_ADDRESS 36
+#define REG_INT_TIMER_CTRL 40
+#define REG_APOLL_TIMER_CTRL 44
+#define REG_DMA_BLEN_CTRL 48
+#define REG_RESERVED1 52
+#define REG_MAC_CTRL 136
+#define REG_MAC_STATUS 140
+#define REG_PHY_CTRL 144
+#define REG_PHY_WRITE_DATA 148
+#define REG_FLOW_CTRL 152
+#define REG_BACK_PRESSURE 156
+#define REG_RESERVED2 160
+#define REG_TEST_SEED 196
+#define REG_DMA_FIFO_STATE 200
+#define REG_TEST_MODE 204
+#define REG_RESERVED3 208
+#define REG_TX_COL_COUNTER 212
+#define REG_RPF_AEP_COUNTER 216
+#define REG_XM_PG_COUNTER 220
+#define REG_RUNT_TLC_COUNTER 224
+#define REG_CRC_FTL_COUNTER 228
+#define REG_RLC_RCC_COUNTER 232
+#define REG_BROC_COUNTER 236
+#define REG_MULCA_COUNTER 240
+#define REG_RP_COUNTER 244
+#define REG_XP_COUNTER 248
+
+#define REG_PHY_CTRL_OFFSET 0x0
+#define REG_PHY_STATUS 0x1
+#define REG_PHY_ID1 0x2
+#define REG_PHY_ID2 0x3
+#define REG_PHY_ANA 0x4
+#define REG_PHY_ANLPAR 0x5
+#define REG_PHY_ANE 0x6
+#define REG_PHY_ECTRL1 0x10
+#define REG_PHY_QPDS 0x11
+#define REG_PHY_10BOP 0x12
+#define REG_PHY_ECTRL2 0x13
+#define REG_PHY_FTMAC100_WRITE 0x8000000
+#define REG_PHY_FTMAC100_READ 0x4000000
+
+/* REG_INTERRUPT_STATUS */
+#define RPKT_FINISH BIT(0) /* DMA data received */
+#define NORXBUF BIT(1) /* receive buffer unavailable */
+#define XPKT_FINISH BIT(2) /* DMA moved data to TX FIFO */
+#define NOTXBUF BIT(3) /* transmit buffer unavailable */
+#define XPKT_OK_INT_STS BIT(4) /* transmit to ethernet success */
+#define XPKT_LOST_INT_STS BIT(5) /* transmit ethernet lost (collision) */
+#define RPKT_SAV BIT(6) /* FIFO receive success */
+#define RPKT_LOST_INT_STS BIT(7) /* FIFO full, receive failed */
+#define AHB_ERR BIT(8) /* AHB error */
+#define PHYSTS_CHG BIT(9) /* PHY link status change */
+
+/* REG_INTERRUPT_MASK */
+#define RPKT_FINISH_M BIT(0)
+#define NORXBUF_M BIT(1)
+#define XPKT_FINISH_M BIT(2)
+#define NOTXBUF_M BIT(3)
+#define XPKT_OK_M BIT(4)
+#define XPKT_LOST_M BIT(5)
+#define RPKT_SAV_M BIT(6)
+#define RPKT_LOST_M BIT(7)
+#define AHB_ERR_M BIT(8)
+#define PHYSTS_CHG_M BIT(9)
+
+/* REG_MAC_MS_ADDRESS */
+#define MAC_MADR_MASK 0xffff /* 2 MSB MAC address */
+
+/* REG_INT_TIMER_CTRL */
+#define TXINT_TIME_SEL BIT(15) /* TX cycle time period */
+#define TXINT_THR_MASK 0x7000
+#define TXINT_CNT_MASK 0xf00
+#define RXINT_TIME_SEL BIT(7) /* RX cycle time period */
+#define RXINT_THR_MASK 0x70
+#define RXINT_CNT_MASK 0xF
+
+/* REG_APOLL_TIMER_CTRL */
+#define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */
+#define TXPOLL_CNT_MASK 0xf00
+#define TXPOLL_CNT_SHIFT_BIT 8
+#define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */
+#define RXPOLL_CNT_MASK 0xF
+#define RXPOLL_CNT_SHIFT_BIT 0
+
+/* REG_DMA_BLEN_CTRL */
+#define RX_THR_EN BIT(9) /* RX FIFO threshold arbitration */
+#define RXFIFO_HTHR_MASK 0x1c0
+#define RXFIFO_LTHR_MASK 0x38
+#define INCR16_EN BIT(2) /* AHB bus INCR16 burst command */
+#define INCR8_EN BIT(1) /* AHB bus INCR8 burst command */
+#define INCR4_EN BIT(0) /* AHB bus INCR4 burst command */
+
+/* REG_MAC_CTRL */
+#define RX_BROADPKT BIT(17) /* receive broadcast packets */
+#define RX_MULTIPKT BIT(16) /* receive all multicast packets */
+#define FULLDUP BIT(15) /* full duplex */
+#define CRC_APD BIT(14) /* append CRC to transmitted packet */
+#define RCV_ALL BIT(12) /* ignore incoming packet destination */
+#define RX_FTL BIT(11) /* accept packets larger than 1518 B */
+#define RX_RUNT BIT(10) /* accept packets smaller than 64 B */
+#define HT_MULTI_EN BIT(9) /* accept on hash and mcast pass */
+#define RCV_EN BIT(8) /* receiver enable */
+#define ENRX_IN_HALFTX BIT(6) /* enable receive in half duplex mode */
+#define XMT_EN BIT(5) /* transmit enable */
+#define CRC_DIS BIT(4) /* disable CRC check when receiving */
+#define LOOP_EN BIT(3) /* internal loop-back */
+#define SW_RST BIT(2) /* software reset, last 64 AHB clocks */
+#define RDMA_EN BIT(1) /* enable receive DMA chan */
+#define XDMA_EN BIT(0) /* enable transmit DMA chan */
+
+/* REG_MAC_STATUS */
+#define COL_EXCEED BIT(11) /* more than 16 collisions */
+#define LATE_COL BIT(10) /* transmit late collision detected */
+#define XPKT_LOST BIT(9) /* transmit to ethernet lost */
+#define XPKT_OK BIT(8) /* transmit to ethernet success */
+#define RUNT_MAC_STS BIT(7) /* receive runt detected */
+#define FTL_MAC_STS BIT(6) /* receive frame too long detected */
+#define CRC_ERR_MAC_STS BIT(5)
+#define RPKT_LOST BIT(4) /* RX FIFO full, receive failed */
+#define RPKT_SAVE BIT(3) /* RX FIFO receive success */
+#define COL BIT(2) /* collision, incoming packet dropped */
+#define MCPU_BROADCAST BIT(1)
+#define MCPU_MULTICAST BIT(0)
+
+/* REG_PHY_CTRL */
+#define MIIWR BIT(27) /* init write sequence (auto cleared)*/
+#define MIIRD BIT(26)
+#define REGAD_MASK 0x3e00000
+#define PHYAD_MASK 0x1f0000
+#define MIIRDATA_MASK 0xffff
+
+/* REG_PHY_WRITE_DATA */
+#define MIIWDATA_MASK 0xffff
+
+/* REG_FLOW_CTRL */
+#define PAUSE_TIME_MASK 0xffff0000
+#define FC_HIGH_MASK 0xf000
+#define FC_LOW_MASK 0xf00
+#define RX_PAUSE BIT(4) /* receive pause frame */
+#define TX_PAUSED BIT(3) /* transmit pause due to receive */
+#define FCTHR_EN BIT(2) /* enable threshold mode. */
+#define TX_PAUSE BIT(1) /* transmit pause frame */
+#define FC_EN BIT(0) /* flow control mode enable */
+
+/* REG_BACK_PRESSURE */
+#define BACKP_LOW_MASK 0xf00
+#define BACKP_JAM_LEN_MASK 0xf0
+#define BACKP_MODE BIT(1) /* address mode */
+#define BACKP_ENABLE BIT(0)
+
+/* REG_TEST_SEED */
+#define TEST_SEED_MASK 0x3fff
+
+/* REG_DMA_FIFO_STATE */
+#define TX_DMA_REQUEST BIT(31)
+#define RX_DMA_REQUEST BIT(30)
+#define TX_DMA_GRANT BIT(29)
+#define RX_DMA_GRANT BIT(28)
+#define TX_FIFO_EMPTY BIT(27)
+#define RX_FIFO_EMPTY BIT(26)
+#define TX_DMA2_SM_MASK 0x7000
+#define TX_DMA1_SM_MASK 0xf00
+#define RX_DMA2_SM_MASK 0x70
+#define RX_DMA1_SM_MASK 0xF
+
+/* REG_TEST_MODE */
+#define SINGLE_PKT BIT(26) /* single packet mode */
+#define PTIMER_TEST BIT(25) /* automatic polling timer test mode */
+#define ITIMER_TEST BIT(24) /* interrupt timer test mode */
+#define TEST_SEED_SELECT BIT(22)
+#define SEED_SELECT BIT(21)
+#define TEST_MODE BIT(20)
+#define TEST_TIME_MASK 0xffc00
+#define TEST_EXCEL_MASK 0x3e0
+
+/* REG_TX_COL_COUNTER */
+#define TX_MCOL_MASK 0xffff0000
+#define TX_MCOL_SHIFT_BIT 16
+#define TX_SCOL_MASK 0xffff
+#define TX_SCOL_SHIFT_BIT 0
+
+/* REG_RPF_AEP_COUNTER */
+#define RPF_MASK 0xffff0000
+#define RPF_SHIFT_BIT 16
+#define AEP_MASK 0xffff
+#define AEP_SHIFT_BIT 0
+
+/* REG_XM_PG_COUNTER */
+#define XM_MASK 0xffff0000
+#define XM_SHIFT_BIT 16
+#define PG_MASK 0xffff
+#define PG_SHIFT_BIT 0
+
+/* REG_RUNT_TLC_COUNTER */
+#define RUNT_CNT_MASK 0xffff0000
+#define RUNT_CNT_SHIFT_BIT 16
+#define TLCC_MASK 0xffff
+#define TLCC_SHIFT_BIT 0
+
+/* REG_CRC_FTL_COUNTER */
+#define CRCER_CNT_MASK 0xffff0000
+#define CRCER_CNT_SHIFT_BIT 16
+#define FTL_CNT_MASK 0xffff
+#define FTL_CNT_SHIFT_BIT 0
+
+/* REG_RLC_RCC_COUNTER */
+#define RLC_MASK 0xffff0000
+#define RLC_SHIFT_BIT 16
+#define RCC_MASK 0xffff
+#define RCC_SHIFT_BIT 0
+
+/* REG_PHY_STATUS */
+#define AN_COMPLETE 0x20
+#define LINK_STATUS 0x4
+
+struct moxart_mac_priv_t {
+ void __iomem *base;
+ struct net_device_stats stats;
+ unsigned int reg_maccr;
+ unsigned int reg_imr;
+ struct napi_struct napi;
+ struct net_device *ndev;
+
+ dma_addr_t rx_base;
+ dma_addr_t rx_mapping[RX_DESC_NUM];
+ void __iomem *rx_desc_base;
+ unsigned char *rx_buf_base;
+ unsigned char *rx_buf[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
+
+ dma_addr_t tx_base;
+ dma_addr_t tx_mapping[TX_DESC_NUM];
+ void __iomem *tx_desc_base;
+ unsigned char *tx_buf_base;
+ unsigned char *tx_buf[RX_DESC_NUM];
+ unsigned int tx_head;
+ unsigned int tx_buf_size;
+
+ spinlock_t txlock;
+ unsigned int tx_len[TX_DESC_NUM];
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ unsigned int tx_tail;
+};
+
+#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver TX buffer is too large!
+#endif
+#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver RX buffer is too large!
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 967bae8b85c..149355b52ad 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -74,6 +74,7 @@
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
+#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
@@ -194,6 +195,21 @@ struct myri10ge_slice_state {
int cpu;
__be32 __iomem *dca_tag;
#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define SLICE_STATE_IDLE 0
+#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
+#define SLICE_STATE_POLL 2 /* poll owns this slice */
+#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
+#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
+#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
+#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
+ spinlock_t lock;
+ unsigned long lock_napi_yield;
+ unsigned long lock_poll_yield;
+ unsigned long busy_poll_miss;
+ unsigned long busy_poll_cnt;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
char irq_desc[32];
};
@@ -244,7 +260,7 @@ struct myri10ge_priv {
int fw_ver_minor;
int fw_ver_tiny;
int adopted_rx_filter_bug;
- u8 mac_addr[6]; /* eeprom mac address */
+ u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
@@ -909,6 +925,92 @@ abort:
return status;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+ spin_lock_init(&ss->lock);
+ ss->state = SLICE_STATE_IDLE;
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state |= SLICE_STATE_NAPI_YIELD;
+ rc = false;
+ ss->lock_napi_yield++;
+ } else
+ ss->state = SLICE_STATE_NAPI;
+ spin_unlock(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+ spin_lock(&ss->lock);
+ WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock(&ss->lock);
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock_bh(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ ss->state |= SLICE_STATE_POLL_YIELD;
+ rc = false;
+ ss->lock_poll_yield++;
+ } else
+ ss->state |= SLICE_STATE_POLL;
+ spin_unlock_bh(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+ spin_lock_bh(&ss->lock);
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock_bh(&ss->lock);
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ WARN_ON(!(ss->state & SLICE_LOCKED));
+ return (ss->state & SLICE_USER_PEND);
+}
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+#endif
+
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
@@ -1300,6 +1402,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
}
}
+#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
+
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
@@ -1311,6 +1415,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
+ bool polling;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
@@ -1325,7 +1430,15 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
- skb = napi_get_frags(&ss->napi);
+ /* When busy polling in user context, allocate skb and copy headers to
+ * skb's linear memory ourselves. When not busy polling, use the napi
+ * gro api.
+ */
+ polling = myri10ge_ss_busy_polling(ss);
+ if (polling)
+ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
+ else
+ skb = napi_get_frags(&ss->napi);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1364,8 +1477,29 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
}
myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
+ skb_mark_napi_id(skb, &ss->napi);
+
+ if (polling) {
+ int hlen;
+
+ /* myri10ge_vlan_rx might have moved the header, so compute
+ * length and address again.
+ */
+ hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
+ va = page_address(skb_frag_page(&rx_frags[0])) +
+ rx_frags[0].page_offset;
+ /* Copy header into the skb linear memory */
+ skb_copy_to_linear_data(skb, va, hlen);
+ rx_frags[0].page_offset += hlen;
+ rx_frags[0].size -= hlen;
+ skb->data_len -= hlen;
+ skb->tail += hlen;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ else
+ napi_gro_frags(&ss->napi);
- napi_gro_frags(&ss->napi);
return 1;
}
@@ -1524,10 +1658,14 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
+ /* Try later if the busy_poll handler is running. */
+ if (!myri10ge_ss_lock_napi(ss))
+ return budget;
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
+ myri10ge_ss_unlock_napi(ss);
if (work_done < budget) {
napi_complete(napi);
put_be32(htonl(3), ss->irq_claim);
@@ -1535,6 +1673,34 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
return work_done;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int myri10ge_busy_poll(struct napi_struct *napi)
+{
+ struct myri10ge_slice_state *ss =
+ container_of(napi, struct myri10ge_slice_state, napi);
+ struct myri10ge_priv *mgp = ss->mgp;
+ int work_done;
+
+ /* Poll only when the link is up */
+ if (mgp->link_state != MXGEFW_LINK_UP)
+ return LL_FLUSH_FAILED;
+
+ if (!myri10ge_ss_lock_poll(ss))
+ return LL_FLUSH_BUSY;
+
+ /* Process a small number of packets */
+ work_done = myri10ge_clean_rx_done(ss, 4);
+ if (work_done)
+ ss->busy_poll_cnt += work_done;
+ else
+ ss->busy_poll_miss++;
+
+ myri10ge_ss_unlock_poll(ss);
+
+ return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
@@ -1742,6 +1908,10 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
+ "rx_busy_poll_cnt",
+#endif
};
#define MYRI10GE_NET_STATS_LEN 21
@@ -1842,6 +2012,12 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ data[i++] = ss->lock_napi_yield;
+ data[i++] = ss->lock_poll_yield;
+ data[i++] = ss->busy_poll_miss;
+ data[i++] = ss->busy_poll_cnt;
+#endif
}
}
@@ -2405,6 +2581,9 @@ static int myri10ge_open(struct net_device *dev)
goto abort_with_rings;
}
+ /* Initialize the slice spinlock and state used for polling */
+ myri10ge_ss_init_lock(ss);
+
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
@@ -2481,9 +2660,19 @@ static int myri10ge_close(struct net_device *dev)
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
+ local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
+ /* Lock the slice to prevent the busy_poll handler from
+ * accessing it. Later when we bring the NIC up, myri10ge_open
+ * resets the slice including this lock.
+ */
+ while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
+ pr_info("Slice %d locked\n", i);
+ mdelay(1);
+ }
}
+ local_bh_enable();
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
@@ -3569,8 +3758,11 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
+ napi_hash_del(&ss->napi);
netif_napi_del(&ss->napi);
}
+ /* Wait till napi structs are no longer used, and then free ss. */
+ synchronize_rcu();
kfree(mgp->ss);
mgp->ss = NULL;
}
@@ -3591,9 +3783,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
- ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
- &ss->rx_done.bus,
- GFP_KERNEL | __GFP_ZERO);
+ ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
+ &ss->rx_done.bus,
+ GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
@@ -3606,6 +3798,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
ss->dev = mgp->dev;
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
myri10ge_napi_weight);
+ napi_hash_add(&ss->napi);
}
return 0;
abort:
@@ -3625,13 +3818,12 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
bool old_allocated;
- int i, status, ncpus, msix_cap;
+ int i, status, ncpus;
mgp->num_slices = 1;
- msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
ncpus = netif_get_num_default_rss_queues();
- if (myri10ge_max_slices == 1 || msix_cap == 0 ||
+ if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
(myri10ge_max_slices == -1 && ncpus < 2))
return;
@@ -3749,6 +3941,9 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = myri10ge_busy_poll,
+#endif
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index c20766c2f65..79257f71c5d 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -83,8 +83,7 @@ static int jazzsonic_open(struct net_device* dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index c2e0256fe3d..4da172ac559 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -95,8 +95,7 @@ static int xtsonic_open(struct net_device *dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index dc2c6f561e9..e6f0a4366f9 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -390,7 +390,7 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
- pdata = (struct netxeth_platform_data *)pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
priv->xc = request_xc(pdata->xcno, &pdev->dev);
if (!priv->xc) {
dev_err(&pdev->dev, "unable to request xc engine\n");
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index e88bdb1aa66..79645f74b3a 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
- char addr[6];
+ char addr[ETH_ALEN];
pdev = ether->pdev;
@@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev)
addr[5] = 0xa8;
if (is_valid_ether_addr(addr))
- memcpy(dev->dev_addr, &addr, 0x06);
+ memcpy(dev->dev_addr, &addr, ETH_ALEN);
else
dev_err(&pdev->dev, "invalid mac address\n");
}
@@ -1014,7 +1014,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
if (ether->rxirq < 0) {
dev_err(&pdev->dev, "failed to get ether rx irq\n");
error = -ENXIO;
- goto failed_free_txirq;
+ goto failed_free_io;
}
platform_set_drvdata(pdev, dev);
@@ -1023,7 +1023,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
if (IS_ERR(ether->clk)) {
dev_err(&pdev->dev, "failed to get ether clock\n");
error = PTR_ERR(ether->clk);
- goto failed_free_rxirq;
+ goto failed_free_io;
}
ether->rmiiclk = clk_get(&pdev->dev, "RMII");
@@ -1049,10 +1049,6 @@ failed_put_rmiiclk:
clk_put(ether->rmiiclk);
failed_put_clk:
clk_put(ether->clk);
-failed_free_rxirq:
- free_irq(ether->rxirq, pdev);
-failed_free_txirq:
- free_irq(ether->txirq, pdev);
failed_free_io:
iounmap(ether->reg);
failed_free_mem:
@@ -1075,9 +1071,6 @@ static int w90p910_ether_remove(struct platform_device *pdev)
iounmap(ether->reg);
release_mem_region(ether->res->start, resource_size(ether->res));
- free_irq(ether->txirq, dev);
- free_irq(ether->rxirq, dev);
-
del_timer_sync(&ether->check_timer);
free_netdev(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 7779036690c..6797b107587 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -582,6 +582,19 @@ struct pch_gbe_hw_stats {
};
/**
+ * struct pch_gbe_privdata - PCI Device ID driver data
+ * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software
+ * @phy_disable_hibernate: Bool, disable PHY hibernation
+ * @platform_init: Platform initialization callback, called from
+ * probe, prior to PHY initialization.
+ */
+struct pch_gbe_privdata {
+ bool phy_tx_clk_delay;
+ bool phy_disable_hibernate;
+ int (*platform_init)(struct pci_dev *pdev);
+};
+
+/**
* struct pch_gbe_adapter - board specific private data structure
* @stats_lock: Spinlock structure for status
* @ethtool_lock: Spinlock structure for ethtool
@@ -604,6 +617,7 @@ struct pch_gbe_hw_stats {
* @rx_buffer_len: Receive buffer length
* @tx_queue_len: Transmit queue length
* @have_msi: PCI MSI mode flag
+ * @pch_gbe_privdata: PCI Device ID driver_data
*/
struct pch_gbe_adapter {
@@ -631,6 +645,7 @@ struct pch_gbe_adapter {
int hwts_tx_en;
int hwts_rx_en;
struct pci_dev *ptp_pdev;
+ struct pch_gbe_privdata *pdata;
};
#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 1129db0cdf8..f0ceb89af93 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -118,6 +118,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
* filled by get_settings() on a down link, speed is -1: */
if (speed == UINT_MAX) {
speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
}
ret = mii_ethtool_sset(&adapter->mii, ecmd);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ab1039a95bf..5a0f04c2c81 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
+#include <linux/gpio.h>
#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
@@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION;
#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
+#define MINNOW_PHY_RESET_GPIO 13
+
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
@@ -682,7 +685,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
}
adapter->hw.phy.addr = adapter->mii.phy_id;
netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
- if (addr == 32)
+ if (addr == PCH_GBE_PHY_REGS_LEN)
return -EAGAIN;
/* Selected the phy and isolate the rest */
for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
@@ -1488,9 +1491,9 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
bufsz = adapter->rx_buffer_len;
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
- rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic,
- GFP_KERNEL | __GFP_ZERO);
+ rx_ring->rx_buff_pool =
+ dma_zalloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
if (!rx_ring->rx_buff_pool)
return -ENOMEM;
@@ -1804,9 +1807,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_ZERO);
+ tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
return -ENOMEM;
@@ -1849,9 +1851,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
return -ENOMEM;
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
vfree(rx_ring->buffer_info);
return -ENOMEM;
@@ -2635,6 +2636,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->hw.back = adapter;
adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
+ adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
+ if (adapter->pdata && adapter->pdata->platform_init)
+ adapter->pdata->platform_init(pdev);
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
PCI_DEVFN(12, 4));
@@ -2710,6 +2714,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "PCH Network Connection\n");
+ /* Disable hibernation on certain platforms */
+ if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
+ pch_gbe_phy_disable_hibernate(&adapter->hw);
+
device_set_wakeup_enable(&pdev->dev, 1);
return 0;
@@ -2720,9 +2728,48 @@ err_free_netdev:
return ret;
}
+/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
+ * ensure it is awake for probe and init. Request the line and reset the PHY.
+ */
+static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
+{
+ unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
+ unsigned gpio = MINNOW_PHY_RESET_GPIO;
+ int ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
+ "minnow_phy_reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
+ return ret;
+ }
+
+ gpio_set_value(gpio, 0);
+ usleep_range(1250, 1500);
+ gpio_set_value(gpio, 1);
+ usleep_range(1250, 1500);
+
+ return ret;
+}
+
+static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
+ .phy_tx_clk_delay = true,
+ .phy_disable_hibernate = true,
+ .platform_init = pch_gbe_minnow_platform_init,
+};
+
static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
{.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+ .subvendor = PCI_VENDOR_ID_CIRCUITCO,
+ .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00),
+ .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
+ },
+ {.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index da079073a6c..8b7ff75fc8e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -74,6 +74,15 @@
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+/* AR8031 PHY Debug Registers */
+#define PHY_AR803X_ID 0x00001374
+#define PHY_AR8031_DBG_OFF 0x1D
+#define PHY_AR8031_DBG_DAT 0x1E
+#define PHY_AR8031_SERDES 0x05
+#define PHY_AR8031_HIBERNATE 0x0B
+#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */
+#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */
+
/* Phy Id Register (word 2) */
#define PHY_REVISION_MASK 0x000F
@@ -249,6 +258,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
}
/**
+ * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw)
+{
+ /* The RGMII interface requires a ~2ns TX clock delay. This is typically
+ * done in layout with a longer trace or via PHY strapping, but can also
+ * be done via PHY configuration registers.
+ */
+ struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+ u16 mii_reg;
+ int ret = 0;
+
+ switch (hw->phy.id) {
+ case PHY_AR803X_ID:
+ netdev_dbg(adapter->netdev,
+ "Configuring AR803X PHY for 2ns TX clock delay\n");
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg);
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+ PHY_AR8031_SERDES);
+ if (ret)
+ break;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+ mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY;
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+ mii_reg);
+ break;
+ default:
+ netdev_err(adapter->netdev,
+ "Unknown PHY (%x), could not set TX clock delay\n",
+ hw->phy.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Could not configure tx clock delay for PHY\n");
+ return ret;
+}
+
+/**
* pch_gbe_phy_init_setting - PHY initial setting
* @hw: Pointer to the HW structure
*/
@@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
+
+ /* Setup a TX clock delay on certain platforms */
+ if (adapter->pdata && adapter->pdata->phy_tx_clk_delay)
+ pch_gbe_phy_tx_clk_delay(hw);
+}
+
+/**
+ * pch_gbe_phy_disable_hibernate - Disable the PHY low power state
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+ u16 mii_reg;
+ int ret = 0;
+
+ switch (hw->phy.id) {
+ case PHY_AR803X_ID:
+ netdev_dbg(adapter->netdev,
+ "Disabling hibernation for AR803X PHY\n");
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+ PHY_AR8031_HIBERNATE);
+ if (ret)
+ break;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+ mii_reg &= ~PHY_AR8031_PS_HIB_EN;
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+ mii_reg);
+ break;
+ default:
+ netdev_err(adapter->netdev,
+ "Unknown PHY (%x), could not disable hibernation\n",
+ hw->phy.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Could not disable PHY hibernation\n");
+ return ret;
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 03264dc7b5e..0cbe69206e0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw);
#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5f0b5da614..5b65356e756 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
struct device_node *dn = pci_device_to_OF_node(pdev);
int len;
const u8 *maddr;
- u8 addr[6];
+ u8 addr[ETH_ALEN];
if (!dn) {
dev_dbg(&pdev->dev,
@@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
maddr = of_get_property(dn, "local-mac-address", &len);
- if (maddr && len == 6) {
- memcpy(mac->mac_addr, maddr, 6);
+ if (maddr && len == ETH_ALEN) {
+ memcpy(mac->mac_addr, maddr, ETH_ALEN);
return 0;
}
@@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return -ENOENT;
}
- if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
- &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+ if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+ != ETH_ALEN) {
dev_warn(&pdev->dev,
"can't parse mac address, not configuring\n");
return -EINVAL;
}
- memcpy(mac->mac_addr, addr, 6);
+ memcpy(mac->mac_addr, addr, ETH_ALEN);
return 0;
}
@@ -439,10 +440,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
goto out_ring_desc;
- ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
- RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma,
- GFP_KERNEL | __GFP_ZERO);
+ ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
+ RX_RING_SIZE * sizeof(u64),
+ &ring->buf_dma, GFP_KERNEL);
if (!ring->buffers)
goto out_ring_desc;
@@ -1219,7 +1219,7 @@ static int pasemi_mac_open(struct net_device *dev)
snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
dev->name);
- ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0,
mac->tx_irq_name, mac->tx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1230,7 +1230,7 @@ static int pasemi_mac_open(struct net_device *dev)
snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
dev->name);
- ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
+ ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, 0,
mac->rx_irq_name, mac->rx);
if (ret) {
dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index e2f4efa8ad4..f2749d46c12 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -83,7 +83,7 @@ struct pasemi_mac {
#define MAC_TYPE_GMAC 1
#define MAC_TYPE_XAUI 2
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0e1797295a4..f59e6be4a66 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -45,6 +45,17 @@ config QLCNIC_SRIOV
This allows for virtual function acceleration in virtualized
environments.
+config QLCNIC_DCB
+ bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
+ depends on QLCNIC && DCB
+ default y
+ ---help---
+ This configuration parameter enables DCB support in QLE83XX
+ and QLE82XX Converged Ethernet devices. This allows for DCB
+ get operations support through rtNetlink interface. Only CEE
+ mode of DCB is supported. PG and PFC values are related only
+ to Tx.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 9fbb1cdbfa4..8375cbde996 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 null_addr[6];
+ u8 null_addr[ETH_ALEN];
int i;
- memset(null_addr, 0, 6);
+ memset(null_addr, 0, ETH_ALEN);
if (netdev->flags & IFF_PROMISC) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index ec4cf7fd412..cbd75f97ffb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -459,16 +459,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
- int pos;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
+ if (pdev->msix_cap) {
+ pci_read_config_dword(pdev, pdev->msix_cap, &control);
if (enable)
control |= PCI_MSIX_FLAGS_ENABLE;
else
control = 0;
- pci_write_config_dword(pdev, pos, control);
+ pci_write_config_dword(pdev, pdev->msix_cap, control);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 4b1fb3faa3b..a848d297972 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -11,3 +11,5 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_minidump.o qlcnic_sriov_common.o
qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
+
+qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 221645e9f18..81bf83604c4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -20,7 +20,6 @@
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
-
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/timer.h>
@@ -35,11 +34,12 @@
#include "qlcnic_hdr.h"
#include "qlcnic_hw.h"
#include "qlcnic_83xx_hw.h"
+#include "qlcnic_dcb.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 2
-#define _QLCNIC_LINUX_SUBVERSION 44
-#define QLCNIC_LINUX_VERSIONID "5.2.44"
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 50
+#define QLCNIC_LINUX_VERSIONID "5.3.50"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,6 +98,9 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -389,7 +392,7 @@ struct qlcnic_dump_template_hdr {
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
- u8 enable; /* enable/disable dump */
+ bool enable; /* enable/disable dump */
u32 size; /* total size of the dump */
void *data; /* dump data area */
struct qlcnic_dump_template_hdr *tmpl_hdr;
@@ -427,7 +430,7 @@ struct qlcnic_hardware_context {
u8 diag_test;
u8 num_msix;
u8 nic_mode;
- char diag_cnt;
+ int diag_cnt;
u16 max_uc_count;
u16 port_type;
@@ -460,14 +463,16 @@ struct qlcnic_hardware_context {
struct qlcnic_fdt fdt;
struct qlc_83xx_reset reset;
struct qlc_83xx_idc idc;
- struct qlc_83xx_fw_info fw_info;
+ struct qlc_83xx_fw_info *fw_info;
struct qlcnic_intrpt_config *intr_tbl;
struct qlcnic_sriov *sriov;
u32 *reg_tbl;
u32 *ext_reg_tbl;
u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
u32 mbox_reg[4];
- spinlock_t mbx_lock;
+ struct qlcnic_mailbox *mailbox;
+ u8 extend_lb_time;
+ u8 phys_port_id[ETH_ALEN];
};
struct qlcnic_adapter_stats {
@@ -515,6 +520,7 @@ struct qlcnic_host_sds_ring {
u32 num_desc;
void __iomem *crb_sts_consumer;
+ struct qlcnic_host_tx_ring *tx_ring;
struct status_desc *desc_head;
struct qlcnic_adapter *adapter;
struct napi_struct napi;
@@ -532,9 +538,17 @@ struct qlcnic_host_tx_ring {
void __iomem *crb_intr_mask;
char name[IFNAMSIZ + 12];
u16 ctx_id;
+
+ u32 state;
u32 producer;
u32 sw_consumer;
u32 num_desc;
+
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
struct qlcnic_adapter *adapter;
@@ -559,7 +573,6 @@ struct qlcnic_recv_context {
u32 state;
u16 context_id;
u16 virt_port;
-
};
/* HW context creation */
@@ -604,6 +617,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
#define QLCNIC_CAP0_LRO_MSS (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI (1 << 22)
/*
* Context state
@@ -631,7 +645,7 @@ struct qlcnic_hostrq_rds_ring {
struct qlcnic_hostrq_rx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
- __le32 capabilities[4]; /* Flag bit vector */
+ __le32 capabilities[4]; /* Flag bit vector */
__le32 host_int_crb_mode; /* Interrupt crb usage */
__le32 host_rds_crb_mode; /* RDS crb usage */
/* These ring offsets are relative to data[0] below */
@@ -802,6 +816,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
+#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -814,6 +829,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_BDG BIT_8
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
@@ -821,6 +837,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -913,6 +930,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_APP_CHANGED_FLAGS 0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -922,11 +941,11 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_DISABLE 0xD
#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
+#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
-#define QLCNIC_NETDEV_WEIGHT 128
#define QLCNIC_ADAPTER_UP_MAGIC 777
#define __QLCNIC_FW_ATTACHED 0
@@ -937,10 +956,13 @@ struct qlcnic_ipaddr {
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_MULTI_TX_UNIQUE 9
#define __QLCNIC_SRIOV_ENABLE 10
#define __QLCNIC_SRIOV_CAPABLE 11
#define __QLCNIC_MBX_POLL_ENABLE 12
#define __QLCNIC_DIAG_MODE 13
+#define __QLCNIC_DCB_STATE 14
+#define __QLCNIC_DCB_IN_AEN 15
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -950,12 +972,6 @@ struct qlcnic_ipaddr {
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
#define QLCNIC_LB_BUCKET_SIZE 32
-
-/* QLCNIC Driver Error Code */
-#define QLCNIC_FW_NOT_RESPOND 51
-#define QLCNIC_TEST_IN_PROGRESS 52
-#define QLCNIC_UNDEFINED_ERROR 53
-#define QLCNIC_LB_CABLE_NOT_CONN 54
#define QLCNIC_ILB_MAX_RCV_LOOP 10
struct qlcnic_filter {
@@ -972,6 +988,21 @@ struct qlcnic_filter_hash {
u16 fbucket_size;
};
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+ struct workqueue_struct *work_q;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_mbx_ops *ops;
+ struct work_struct work;
+ struct completion completion;
+ struct list_head cmd_q;
+ unsigned long status;
+ spinlock_t queue_lock; /* Mailbox queue lock */
+ spinlock_t aen_lock; /* Mailbox response/AEN lock */
+ atomic_t rsp_status;
+ u32 num_cmds;
+};
+
struct qlcnic_adapter {
struct qlcnic_hardware_context *ahw;
struct qlcnic_recv_context *recv_ctx;
@@ -1035,6 +1066,7 @@ struct qlcnic_adapter {
struct delayed_work fw_work;
struct delayed_work idc_aen_work;
struct delayed_work mbx_poll_work;
+ struct qlcnic_dcb *dcb;
struct qlcnic_filter_hash fhash;
struct qlcnic_filter_hash rx_fhash;
@@ -1152,6 +1184,7 @@ struct qlcnic_pci_info {
};
struct qlcnic_npar_info {
+ bool eswitch_status;
u16 pvid;
u16 min_bw;
u16 max_bw;
@@ -1371,7 +1404,6 @@ struct qlcnic_esw_statistics {
struct __qlcnic_esw_statistics tx;
};
-#define QLCNIC_DUMP_MASK_DEF 0x1f
#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1385,9 +1417,20 @@ struct _cdrp_cmd {
};
struct qlcnic_cmd_args {
- struct _cdrp_cmd req;
- struct _cdrp_cmd rsp;
- int op_type;
+ struct completion completion;
+ struct list_head list;
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+ atomic_t rsp_status;
+ int pay_size;
+ u32 rsp_opcode;
+ u32 total_cmds;
+ u32 op_type;
+ u32 type;
+ u32 cmd_op;
+ u32 *hdr; /* Back channel message header */
+ u32 *pay; /* Back channel message payload */
+ u8 func_num;
};
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1435,6 +1478,12 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
+pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+void qlcnic_82xx_io_resume(struct pci_dev *);
/* Functions from qlcnic_init.c */
void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1462,7 +1511,8 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
@@ -1474,6 +1524,7 @@ void __qlcnic_set_multi(struct net_device *, u16);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
@@ -1495,8 +1546,9 @@ int qlcnic_reset_context(struct qlcnic_adapter *);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1523,6 +1575,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
@@ -1585,6 +1638,26 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
+static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err, tx_q;
+
+ tx_q = adapter->max_drv_tx_rings;
+
+ netdev->num_tx_queues = tx_q;
+ netdev->real_num_tx_queues = tx_q;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_q);
+ if (err)
+ dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
+ tx_q);
+ else
+ dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+
+ return err;
+}
+
struct qlcnic_nic_template {
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -1600,6 +1673,20 @@ struct qlcnic_nic_template {
int (*resume)(struct qlcnic_adapter *);
};
+struct qlcnic_mbx_ops {
+ int (*enqueue_cmd) (struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *, unsigned long *);
+ void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+
/* Adapter hardware abstraction */
struct qlcnic_hardware_ops {
void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
@@ -1607,8 +1694,8 @@ struct qlcnic_hardware_ops {
int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
- int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
- int (*setup_intr) (struct qlcnic_adapter *, u8);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+ int (*setup_intr) (struct qlcnic_adapter *, u8, int);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1641,6 +1728,11 @@ struct qlcnic_hardware_ops {
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
+ int (*read_phys_port_id) (struct qlcnic_adapter *);
+ pci_ers_result_t (*io_error_detected) (struct pci_dev *,
+ pci_channel_state_t);
+ pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
+ void (*io_resume) (struct pci_dev *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1669,14 +1761,15 @@ static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
}
static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
- u8 *mac)
+ u8 *mac, u8 function)
{
- return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
+ u8 num_intr, int txq)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+ return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -1867,6 +1960,12 @@ static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->set_mac_filter_count(adapter);
}
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->read_phys_port_id)
+ adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
@@ -1898,16 +1997,45 @@ static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
}
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+ test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
{
- writel(0, sds_ring->crb_intr_mask);
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0x1, sds_ring->crb_intr_mask);
+ else
+ writel(0, sds_ring->crb_intr_mask);
}
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
- writel(0x1, sds_ring->crb_intr_mask);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0, sds_ring->crb_intr_mask);
+ else
+ writel(0x1, sds_ring->crb_intr_mask);
if (!QLCNIC_IS_MSI_FAMILY(adapter))
writel(0xfbff, adapter->tgt_mask_reg);
@@ -1939,9 +2067,11 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
__func__, ##_args); \
} while (0)
-#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
@@ -1949,12 +2079,22 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
}
+static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+}
+
static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
return status;
@@ -1968,7 +2108,105 @@ static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+
+ return status;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(adapter);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(adapter);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(adapter);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(adapter, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(adapter);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(adapter, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(adapter);
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->register_aen)
+ dcb->ops->register_aen(adapter, flag);
+}
+
+static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *msg)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->handle_aen)
+ dcb->ops->handle_aen(adapter, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(adapter);
}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 9d4bb7f8390..3ca00e05f23 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -11,6 +11,7 @@
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
+#include <linux/aer.h>
#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
@@ -67,6 +68,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -149,7 +152,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.get_mac_address = qlcnic_83xx_get_mac_address,
.setup_intr = qlcnic_83xx_setup_intr,
.alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
- .mbx_cmd = qlcnic_83xx_mbx_op,
+ .mbx_cmd = qlcnic_83xx_issue_cmd,
.get_func_no = qlcnic_83xx_get_func_no,
.api_lock = qlcnic_83xx_cam_lock,
.api_unlock = qlcnic_83xx_cam_unlock,
@@ -175,6 +178,10 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.get_board_info = qlcnic_83xx_get_port_info,
.set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
.free_mac_list = qlcnic_82xx_free_mac_list,
+ .io_error_detected = qlcnic_83xx_io_error_detected,
+ .io_slot_reset = qlcnic_83xx_io_slot_reset,
+ .io_resume = qlcnic_83xx_io_resume,
+
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -261,7 +268,7 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -362,6 +369,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
for (i = 0; i < cmd->rsp.num; i++)
cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
}
@@ -398,24 +409,33 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
return IRQ_HANDLED;
}
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+ atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ complete(&mbx->completion);
+}
+
static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
{
- u32 resp, event;
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
-
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
out:
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@ -515,7 +535,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
}
/* Enable mailbox interrupt */
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
return err;
}
@@ -628,7 +648,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
ahw->max_uc_count = count;
}
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
{
u32 val;
@@ -682,11 +702,14 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
u32 data[]);
-static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
int i;
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
dev_info(&adapter->pdev->dev,
"Host MBX regs(%d)\n", cmd->req.num);
for (i = 0; i < cmd->req.num; i++) {
@@ -705,120 +728,73 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
pr_info("\n");
}
-/* Mailbox response for mac rcode */
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- u32 fw_data;
- u8 mac_cmd_rcode;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int opcode = LSW(cmd->req.arg[0]);
+ unsigned long max_loops;
- fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
- mac_cmd_rcode = (u8)fw_data;
- if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
- mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
- mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
- return QLCNIC_RCODE_SUCCESS;
- return 1;
-}
+ max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
-{
- u32 data;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* wait for mailbox completion */
- do {
- data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
- if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
- data = QLCNIC_RCODE_TIMEOUT;
- break;
- }
- mdelay(1);
- } while (!data);
- return data;
+ for (; max_loops; max_loops--) {
+ if (atomic_read(&cmd->rsp_status) ==
+ QLC_83XX_MBX_RESPONSE_ARRIVED)
+ return;
+
+ udelay(1);
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+ flush_workqueue(ahw->mailbox->work_q);
+ return;
}
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- int i;
- u16 opcode;
- u8 mbx_err_code;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
+ int cmd_type, err, opcode;
+ unsigned long timeout;
opcode = LSW(cmd->req.arg[0]);
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
- dev_info(&adapter->pdev->dev,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- dev_info(&adapter->pdev->dev, "Mailbox detached\n");
- return 0;
+ cmd_type = cmd->type;
+ err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
}
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-
- if (mbx_val) {
- QLCDB(adapter, DRV,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- QLCDB(adapter, DRV,
- "Mailbox not available, 0x%x, collect FW dump\n",
- mbx_val);
- cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return cmd->rsp.arg[0];
- }
-
- /* Fill in mailbox registers */
- mbx_cmd = cmd->req.arg[0];
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
- for (i = 1; i < cmd->req.num; i++)
- writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
-
- /* Signal FW about the impending command */
- QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-poll:
- rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
- if (rsp != QLCNIC_RCODE_TIMEOUT) {
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- __qlcnic_83xx_process_aen(adapter);
- goto poll;
- }
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
- qlcnic_83xx_get_mbx_data(adapter, cmd);
-
- switch (mbx_err_code) {
- case QLCNIC_MBX_RSP_OK:
- case QLCNIC_MBX_PORT_RSP_OK:
- rsp = QLCNIC_RCODE_SUCCESS;
- break;
- default:
- if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- rsp = qlcnic_83xx_mac_rcode(adapter);
- if (!rsp)
- goto out;
- }
+ switch (cmd_type) {
+ case QLC_83XX_MBX_CMD_WAIT:
+ if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
dev_err(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
- rsp = mbx_err_code;
- qlcnic_dump_mbx(adapter, cmd);
- break;
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
}
- goto out;
+ break;
+ case QLC_83XX_MBX_CMD_NO_WAIT:
+ return 0;
+ case QLC_83XX_MBX_CMD_BUSY_WAIT:
+ qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ qlcnic_83xx_detach_mailbox_work(adapter);
}
- dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
- QLCNIC_MBX_RSP(mbx_cmd));
- rsp = QLCNIC_RCODE_TIMEOUT;
-out:
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return rsp;
+ return cmd->rsp_opcode;
}
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -828,6 +804,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
u32 temp;
const struct qlcnic_mailbox_metadata *mbx_tbl;
+ memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
mbx_tbl = qlcnic_83xx_mbx_tbl;
size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
for (i = 0; i < size; i++) {
@@ -850,6 +827,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ mbx->cmd_op = type;
return 0;
}
}
@@ -888,9 +866,9 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 event[QLC_83XX_MBX_AEN_CNT];
int i;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
event[i] = readl(QLCNIC_MBX_FW(ahw, i));
@@ -910,6 +888,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
&adapter->idc_aen_work, 0);
break;
case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ ahw->extend_lb_time = event[1] >> 8 & 0xf;
break;
case QLCNIC_MBX_BC_EVENT:
qlcnic_sriov_handle_bc_event(adapter, event[1]);
@@ -922,6 +901,9 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
break;
+ case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
+ qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+ break;
default:
dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
@@ -933,20 +915,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 resp, event;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
if (resp & QLCNIC_SET_OWNER) {
event = readl(QLCNIC_MBX_FW(ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
}
-
- spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@ -969,6 +954,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
return;
INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
}
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@ -1355,8 +1341,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
/* disable and free mailbox interrupt */
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_83xx_enable_mbx_poll(adapter);
qlcnic_83xx_free_mbx_intr(adapter);
+ }
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
@@ -1377,6 +1365,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_enable_mbx_poll(adapter);
}
}
@@ -1386,6 +1376,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"%s: failed to setup mbx interrupt\n",
@@ -1402,6 +1393,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_disable_mbx_poll(adapter);
out:
netif_device_attach(netdev);
}
@@ -1619,26 +1614,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
- int err;
+ struct qlcnic_cmd_args *cmd = NULL;
u32 temp = 0;
- struct qlcnic_cmd_args cmd;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter,
QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
if (err)
- return err;
+ goto out;
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd.req.arg[1] = (mode ? 1 : 0) | temp;
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_info(&adapter->pdev->dev,
- "Promiscous mode config failed\n");
+ cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
- qlcnic_free_mbx_args(&cmd);
+ qlcnic_free_mbx_args(cmd);
+
+out:
+ kfree(cmd);
return err;
}
@@ -1651,7 +1653,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
"Loopback test not supported in non privileged mode\n");
- return ret;
+ return -ENOTSUPP;
}
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
@@ -1679,19 +1681,17 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
/* Poll for link up event before running traffic */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
- ret = -EIO;
+ ret = -EBUSY;
goto free_diag_res;
}
if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
netdev_info(netdev,
"Firmware didn't sent link up event to loopback request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ ret = -ETIMEDOUT;
qlcnic_83xx_clear_lb_mode(adapter, mode);
goto free_diag_res;
}
@@ -1700,7 +1700,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
/* Make sure carrier is off and queue is stopped during loopback */
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
ret = qlcnic_do_lb_test(adapter, mode);
@@ -1716,18 +1716,42 @@ fail_diag_alloc:
return ret;
}
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+ u32 *max_wait_count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int temp;
+
+ netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+ ahw->extend_lb_time);
+ temp = ahw->extend_lb_time * 1000;
+ *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+ ahw->extend_lb_time = 0;
+}
+
int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct net_device *netdev = adapter->netdev;
+ u32 config, max_wait_count;
int status = 0, loop = 0;
- u32 config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
status = qlcnic_83xx_get_port_config(adapter);
if (status)
return status;
config = ahw->port_config;
+
+ /* Check if port is already in loopback mode */
+ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+ (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+ netdev_err(netdev,
+ "Port already in Loopback mode.\n");
+ return -EINPROGRESS;
+ }
+
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
@@ -1748,21 +1772,24 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
qlcnic_83xx_clear_lb_mode(adapter, mode);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1774,10 +1801,12 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = ahw->port_config, max_wait_count;
struct net_device *netdev = adapter->netdev;
int status = 0, loop = 0;
- u32 config = ahw->port_config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
@@ -1797,21 +1826,23 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1950,25 +1981,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
u16 vlan_id, u8 op)
{
- int err;
- u32 *buf, temp = 0;
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd = NULL;
struct qlcnic_macvlan_mbx mv;
+ u32 *buf, temp = 0;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
if (err)
- return err;
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
if (vlan_id)
op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd.req.arg[1] = op | (1 << 8);
+ cmd->req.arg[1] = op | (1 << 8);
qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
- cmd.req.arg[1] |= temp;
+ cmd->req.arg[1] |= temp;
mv.vlan = vlan_id;
mv.mac_addr0 = addr[0];
mv.mac_addr1 = addr[1];
@@ -1976,14 +2013,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
- qlcnic_free_mbx_args(&cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+out:
+ kfree(cmd);
return err;
}
@@ -2008,12 +2046,14 @@ void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
cmd->req.arg[1] = type;
}
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
u32 mac_low, mac_high;
+ function = 0;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
if (err)
return err;
@@ -2099,10 +2139,12 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx;
u32 mask, resp, event;
+ unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ mbx = adapter->ahw->mailbox;
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
@@ -2110,11 +2152,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
if (event & QLCNIC_MBX_ASYNC_EVENT)
__qlcnic_83xx_process_aen(adapter);
+ else
+ qlcnic_83xx_notify_mbx_response(mbx);
+
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
writel(0, adapter->ahw->pci_base0 + mask);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
return IRQ_HANDLED;
}
@@ -2287,7 +2331,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
pci_info->tx_max_bw, pci_info->mac);
}
if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n",
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
ahw->max_pci_func, ahw->act_pci_func);
} else {
@@ -3104,7 +3148,7 @@ int qlcnic_83xx_set_settings(struct qlcnic_adapter *adapter,
status = qlcnic_83xx_set_port_config(adapter);
if (status) {
dev_info(&adapter->pdev->dev,
- "Faild to Set Link Speed and autoneg.\n");
+ "Failed to Set Link Speed and autoneg.\n");
adapter->ahw->port_config = config;
}
return status;
@@ -3477,3 +3521,360 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
idc->delay);
return err;
}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+ INIT_COMPLETION(mbx->completion);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+ destroy_workqueue(mbx->work_q);
+ kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+ complete(&cmd->completion);
+}
+
+static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ spin_lock(&mbx->queue_lock);
+
+ while (!list_empty(head)) {
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+ __func__, cmd->cmd_op);
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+
+ spin_unlock(&mbx->queue_lock);
+}
+
+static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ u32 host_mbx_ctrl;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+ return -EBUSY;
+
+ host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (host_mbx_ctrl) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ ahw->idc.collect_dump = 1;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+ u8 issue_cmd)
+{
+ if (issue_cmd)
+ QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+ else
+ QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+
+ spin_unlock(&mbx->queue_lock);
+
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, j;
+
+ if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+ } else {
+ fw_hal_version = ahw->fw_hal_version;
+ hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+ total_size = cmd->pay_size + hdr_size;
+ tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+ mbx_cmd = tmp | fw_hal_version << 29;
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+ /* Back channel specific operations bits */
+ mbx_cmd = 0x1 | 1 << 4;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= cmd->func_num << 5;
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+ for (j = 0; j < cmd->pay_size; j++, i++)
+ writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+ }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ complete(&mbx->completion);
+ cancel_work_sync(&mbx->work);
+ flush_workqueue(mbx->work_q);
+ qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd,
+ unsigned long *timeout)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ init_completion(&cmd->completion);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_add_tail(&cmd->list, &mbx->cmd_q);
+ mbx->num_cmds++;
+ cmd->total_cmds = mbx->num_cmds;
+ *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ queue_work(mbx->work_q, &mbx->work);
+
+ spin_unlock(&mbx->queue_lock);
+
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 mac_cmd_rcode;
+ u32 fw_data;
+
+ if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ return QLCNIC_RCODE_SUCCESS;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u8 mbx_err_code;
+ u32 fw_data;
+
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+ break;
+
+ dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode, mbx_err_code);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+ qlcnic_dump_mbx(adapter, cmd);
+ }
+
+ return;
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+ struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+ work);
+ struct qlcnic_adapter *adapter = mbx->adapter;
+ struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ struct device *dev = &adapter->pdev->dev;
+ atomic_t *rsp_status = &mbx->rsp_status;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ ahw = adapter->ahw;
+
+ while (true) {
+ if (qlcnic_83xx_check_mbx_status(adapter)) {
+ qlcnic_83xx_flush_mbx_queue(adapter);
+ return;
+ }
+
+ atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+
+ spin_lock(&mbx->queue_lock);
+
+ if (list_empty(head)) {
+ spin_unlock(&mbx->queue_lock);
+ return;
+ }
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+ spin_unlock(&mbx->queue_lock);
+
+ mbx_ops->encode_cmd(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+ if (wait_for_completion_timeout(&mbx->completion,
+ QLC_83XX_MBX_TIMEOUT)) {
+ mbx_ops->decode_resp(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+ } else {
+ dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mbx(adapter, cmd);
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+ }
+ mbx_ops->dequeue_cmd(adapter, cmd);
+ }
+}
+
+static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+ .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
+ .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
+ .decode_resp = qlcnic_83xx_decode_mbx_rsp,
+ .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
+ .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx;
+
+ ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+ if (!ahw->mailbox)
+ return -ENOMEM;
+
+ mbx = ahw->mailbox;
+ mbx->ops = &qlcnic_83xx_mbx_ops;
+ mbx->adapter = adapter;
+
+ spin_lock_init(&mbx->queue_lock);
+ spin_lock_init(&mbx->aen_lock);
+ INIT_LIST_HEAD(&mbx->cmd_q);
+ init_completion(&mbx->completion);
+
+ mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+ if (mbx->work_q == NULL) {
+ kfree(mbx);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+ return 0;
+}
+
+pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_83xx_aer_stop_poll_work(adapter);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ int err = 0;
+
+ pdev->error_state = pci_channel_io_normal;
+ err = pci_enable_device(pdev);
+ if (err)
+ goto disconnect;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ err = qlcnic_83xx_aer_reset(adapter);
+ if (err == 0)
+ return PCI_ERS_RESULT_RECOVERED;
+disconnect:
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ qlcnic_83xx_aer_start_poll_work(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 272f56a2e14..533e150503a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -84,11 +84,20 @@
/* Firmware image definitions */
#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
#define QLC_83XX_BOOT_FROM_FLASH 0
#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+#define QLC_FW_FILE_NAME_LEN 20
#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+#define QLC_83XX_MBX_POST_BC_OP 0x1
+#define QLC_83XX_MBX_COMPLETION 0x0
+#define QLC_83XX_MBX_REQUEST 0x1
+
+#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP 5000000
+
/* status descriptor mailbox data
* @phy_addr_{low|high}: physical address of buffer
* @sds_ring_size: buffer size
@@ -265,11 +274,7 @@ struct qlcnic_macvlan_mbx {
struct qlc_83xx_fw_info {
const struct firmware *fw;
- u16 major_fw_version;
- u8 minor_fw_version;
- u8 sub_fw_version;
- u8 fw_build_num;
- u8 load_from_file;
+ char fw_file_name[QLC_FW_FILE_NAME_LEN];
};
struct qlc_83xx_reset {
@@ -288,6 +293,7 @@ struct qlc_83xx_reset {
#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
#define QLC_83XX_IDC_TIMESTAMP 0
#define QLC_83XX_IDC_DURATION 1
#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
@@ -397,6 +403,7 @@ enum qlcnic_83xx_states {
#define QLC_83XX_MAX_MC_COUNT 38
#define QLC_83XX_MAX_UC_COUNT 4096
+#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
@@ -404,6 +411,7 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
#define QLC_83XX_DEFAULT_MODE 0x0
#define QLC_83XX_SRIOV_MODE 0x1
@@ -449,6 +457,20 @@ enum qlcnic_83xx_states {
#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+enum qlc_83xx_mbx_cmd_type {
+ QLC_83XX_MBX_CMD_WAIT = 0,
+ QLC_83XX_MBX_CMD_NO_WAIT,
+ QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+ QLC_83XX_MBX_RESPONSE_WAIT = 0,
+ QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
+
/* Additional registers in 83xx */
enum qlc_83xx_ext_regs {
QLCNIC_GLOBAL_RESET = 0,
@@ -498,8 +520,8 @@ enum qlc_83xx_ext_regs {
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -540,7 +562,7 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
struct qlcnic_cmd_args *);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
@@ -551,7 +573,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_handle_aen(int, void *);
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_intr(int, void *);
@@ -604,6 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u8);
int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
@@ -623,8 +646,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
@@ -634,4 +655,11 @@ int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+void qlcnic_83xx_io_resume(struct pci_dev *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 345d987aede..f09e787af0b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
/* Disable mailbox interrupt */
qlcnic_83xx_disable_mbx_intr(adapter);
@@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
{
int err;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
@@ -617,7 +621,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (err)
return err;
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
if (qlcnic_83xx_configure_opmode(adapter)) {
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -631,6 +635,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (adapter->portnum == 0)
qlcnic_set_drv_version(adapter);
+
+ qlcnic_dcb_get_info(adapter);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
@@ -641,7 +647,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
@@ -792,7 +797,6 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
} else {
ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
- return ret;
}
return ret;
@@ -811,9 +815,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
{
- u32 val;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
+ u32 val;
/* Perform NIC configuration based ready state entry actions */
if (ahw->idc.state_entry(adapter))
@@ -825,7 +830,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
dev_err(&adapter->pdev->dev,
"Error: device temperature %d above limits\n",
adapter->ahw->temp);
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_detach_driver(adapter);
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -838,7 +843,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
if (ret) {
adapter->flags |= QLCNIC_FW_HANG;
if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
}
@@ -846,6 +851,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
}
if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
/* Move to need reset state and prepare for reset */
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
return ret;
@@ -883,12 +890,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
int ret = 0;
if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
- clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
@@ -1080,7 +1088,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
/* Check if reset recovery is disabled */
@@ -1191,6 +1198,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
{
u32 val;
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
if (qlcnic_83xx_lock_driver(adapter)) {
dev_err(&adapter->pdev->dev,
"%s:failed, please retry\n", __func__);
@@ -1257,31 +1267,33 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ const struct firmware *fw = fw_info->fw;
u32 dest, *p_cache;
- u64 addr;
+ int i, ret = -EIO;
u8 data[16];
size_t size;
- int i, ret = -EIO;
+ u64 addr;
dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
- size = (adapter->ahw->fw_info.fw->size & ~0xF);
- p_cache = (u32 *)adapter->ahw->fw_info.fw->data;
+ size = (fw->size & ~0xF);
+ p_cache = (u32 *)fw->data;
addr = (u64)dest;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
(u32 *)p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return -EIO;
}
/* alignment check */
- if (adapter->ahw->fw_info.fw->size & 0xF) {
+ if (fw->size & 0xF) {
addr = dest + size;
- for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++)
- data[i] = adapter->ahw->fw_info.fw->data[size + i];
+ for (i = 0; i < (fw->size & 0xF); i++)
+ data[i] = fw->data[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
@@ -1289,13 +1301,13 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return -EIO;
}
}
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return 0;
}
@@ -1941,10 +1953,11 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
int err = -EIO;
- if (request_firmware(&adapter->ahw->fw_info.fw,
- QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+ if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
+ &(adapter->pdev->dev))) {
dev_err(&adapter->pdev->dev,
"No file FW image, loading flash FW image.\n");
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
@@ -1990,36 +2003,6 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
return 0;
}
-/**
-* qlcnic_83xx_config_default_opmode
-*
-* @adapter: adapter structure
-*
-* Configure default driver operating mode
-*
-* Returns: Error code or Success(0)
-* */
-int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
-{
- u32 op_mode;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
-
- qlcnic_get_func_no(adapter);
- op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
-
- if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
- op_mode = QLC_83XX_DEFAULT_OPMODE;
-
- if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
- ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- } else {
- return -EIO;
- }
-
- return 0;
-}
-
int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
{
int err;
@@ -2039,26 +2022,26 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
- /* VNIC mode is detected by BIT_23 in capabilities. This bit is also
- * set in case device is SRIOV capable. VNIC and SRIOV are mutually
- * exclusive. So in case of sriov capable device load driver in
- * default mode
+ /* eSwitch capability indicates vNIC mode.
+ * vNIC and SRIOV are mutually exclusive operational modes.
+ * If SR-IOV capability is detected, SR-IOV physical function
+ * will get initialized in default mode.
+ * SR-IOV virtual function initialization follows a
+ * different code path and opmode.
+ * SRIOV mode has precedence over vNIC mode.
*/
- if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) {
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
- return ahw->nic_mode;
- }
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ return QLC_83XX_DEFAULT_OPMODE;
- if (ahw->capabilities & BIT_23)
- ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
- else
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ return QLC_83XX_VIRTUAL_NIC_MODE;
- return ahw->nic_mode;
+ return QLC_83XX_DEFAULT_OPMODE;
}
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int ret;
ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2066,11 +2049,16 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
return -EIO;
if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
+ ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
- } else if (ret == QLC_83XX_DEFAULT_MODE) {
- if (qlcnic_83xx_config_default_opmode(adapter))
- return -EIO;
+
+ } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ } else {
+ return -EIO;
}
return 0;
@@ -2139,20 +2127,82 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
}
}
+static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlc_83xx_fw_info *fw_info;
+ int err = 0;
+
+ ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+ if (!ahw->fw_info) {
+ err = -ENOMEM;
+ } else {
+ fw_info = ahw->fw_info;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Invalid device id\n",
+ __func__);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+
int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
- if (qlcnic_sriov_vf_check(adapter))
- return qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ ahw->msix_supported = !!qlcnic_use_msi_x;
+ err = qlcnic_83xx_init_mailbox_work(adapter);
+ if (err)
+ goto exit;
- if (qlcnic_83xx_check_hw_status(adapter))
- return -EIO;
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ if (err)
+ goto detach_mbx;
+ else
+ return err;
+ }
+
+ err = qlcnic_83xx_check_hw_status(adapter);
+ if (err)
+ goto detach_mbx;
+
+ if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
+ qlcnic_83xx_read_flash_mfg_id(adapter);
+
+ err = qlcnic_83xx_get_fw_info(adapter);
+ if (err)
+ goto detach_mbx;
- /* Initilaize 83xx mailbox spinlock */
- spin_lock_init(&ahw->mbx_lock);
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ goto clear_fw_info;
+
+ err = qlcnic_setup_intr(adapter, 0, 0);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto disable_intr;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto disable_mbx_intr;
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_clear_function_resources(adapter);
INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
@@ -2160,22 +2210,90 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
- if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
- qlcnic_83xx_read_flash_mfg_id(adapter);
-
- if (qlcnic_83xx_idc_init(adapter))
- return -EIO;
-
/* Configure default, SR-IOV or Virtual NIC mode of operation */
- if (qlcnic_83xx_configure_opmode(adapter))
- return -EIO;
+ err = qlcnic_83xx_configure_opmode(adapter);
+ if (err)
+ goto disable_mbx_intr;
/* Perform operating mode specific initialization */
- if (adapter->nic_ops->init_driver(adapter))
- return -EIO;
+ err = adapter->nic_ops->init_driver(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+ return 0;
+
+disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+disable_intr:
+ qlcnic_teardown_intr(adapter);
+
+clear_fw_info:
+ kfree(ahw->fw_info);
+
+detach_mbx:
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+exit:
+ return err;
+}
+
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+}
+
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int ret = 0;
+ u32 owner;
+
+ /* Mark the previous IDC state as NEED_RESET so
+ * that state_entry() will perform the reattachment
+ * and bringup the device
+ */
+ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ ret = qlcnic_83xx_restart_hw(adapter);
+ if (ret < 0)
+ return ret;
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ }
+
+ ret = idc->state_entry(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 owner;
+
+ idc->prev_state = QLC_83XX_IDC_DEV_READY;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
- return adapter->ahw->idc.err_code;
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 599d1fda52f..0248a4c2f5d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -208,7 +208,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
return -EIO;
}
- if (ahw->capabilities & BIT_23)
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
else
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
@@ -239,3 +239,41 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
return 0;
}
+
+static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ return err;
+
+ if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ *port_id = nic_info.phys_port;
+ else
+ err = -EIO;
+
+ return err;
+}
+
+int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
+{
+ int id, err = 0;
+
+ err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
+ if (err)
+ return err;
+
+ if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ if (!qlcnic_enable_eswitch(adapter, id, 1))
+ adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
+ else
+ err = -EIO;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index d09389b3347..86850dd633a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -38,6 +38,9 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
{QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
{QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
{QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+ {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
};
static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -171,6 +174,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
break;
}
dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ qlcnic_dump_mbx(adapter, cmd);
} else if (rsp == QLCNIC_CDRP_RSP_OK)
cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
@@ -243,40 +247,38 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
- void *addr;
- struct qlcnic_hostrq_rx_ctx *prq;
- struct qlcnic_cardrsp_rx_ctx *prsp;
- struct qlcnic_hostrq_rds_ring *prq_rds;
- struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_intr_crb_mode, temp_rds_crb_mode;
struct qlcnic_cardrsp_rds_ring *prsp_rds;
struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_cmd_args cmd;
-
- dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
- u64 phys_addr;
-
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rx_ctx *prq;
u8 i, nrds_rings, nsds_rings;
- u16 temp_u16;
+ struct qlcnic_cmd_args cmd;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
+ u64 phys_addr;
+ u16 temp_u16;
+ void *addr;
int err;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
nrds_rings = adapter->max_rds_rings;
nsds_rings = adapter->max_sds_rings;
- rq_size =
- SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
- nsds_rings);
- rsp_size =
- SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
- nsds_rings);
+ rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &hostrq_phys_addr, GFP_KERNEL);
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
@@ -295,15 +297,20 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp_u16);
- prq->txrx_sds_binding = nsds_rings - 1;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ cap |= QLCNIC_CAP0_TX_MULTI;
+ } else {
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
+ prq->txrx_sds_binding = nsds_rings - 1;
+ temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+ temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+ prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+ }
prq->capabilities[0] = cpu_to_le32(cap);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->host_rds_crb_mode =
- cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
prq->num_rds_rings = cpu_to_le16(nrds_rings);
prq->num_sds_rings = cpu_to_le16(nsds_rings);
@@ -317,10 +324,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->rds_ring_offset));
for (i = 0; i < nrds_rings; i++) {
-
rds_ring = &recv_ctx->rds_rings[i];
rds_ring->producer = 0;
-
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
prq_rds[i].ring_kind = cpu_to_le32(i);
@@ -331,14 +336,16 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->sds_ring_offset));
for (i = 0; i < nsds_rings; i++) {
-
sds_ring = &recv_ctx->sds_rings[i];
sds_ring->consumer = 0;
memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
-
prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
- prq_sds[i].msi_index = cpu_to_le16(i);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+ else
+ prq_sds[i].msi_index = cpu_to_le16(i);
}
phys_addr = hostrq_phys_addr;
@@ -361,9 +368,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
rds_ring = &recv_ctx->rds_rings[i];
-
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
+ rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -371,24 +377,30 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
sds_ring = &recv_ctx->sds_rings[i];
-
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
- reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ reg2 = ahw->intr_tbl[i].src;
+ else
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
- sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
+ sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
recv_ctx->context_id = le16_to_cpu(prsp->context_id);
recv_ctx->virt_port = prsp->virt_port;
+ netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
qlcnic_free_mbx_args(&cmd);
+
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
return err;
}
@@ -416,16 +428,19 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring,
int ring)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
struct qlcnic_cardrsp_tx_ctx *prsp;
- void *rq_addr, *rsp_addr;
- size_t rq_size, rsp_size;
- u32 temp;
struct qlcnic_cmd_args cmd;
- int err;
- u64 phys_addr;
- dma_addr_t rq_phys_addr, rsp_phys_addr;
+ u32 temp, intr_mask, temp_int_crb_mode;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ int temp_nsds_rings, index, err;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u64 phys_addr;
+ u16 msix_id;
/* reset host resources */
tx_ring->producer = 0;
@@ -433,32 +448,42 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
+ rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
+ rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
prq = rq_addr;
-
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
- QLCNIC_CAP0_LSO);
+ QLCNIC_CAP0_LSO);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ temp |= QLCNIC_CAP0_TX_MULTI;
+
prq->capabilities[0] = cpu_to_le32(temp);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->msi_index = 0;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ temp_nsds_rings = adapter->max_sds_rings;
+ index = temp_nsds_rings + ring;
+ msix_id = ahw->intr_tbl[index].id;
+ prq->msi_index = cpu_to_le16(msix_id);
+ } else {
+ temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+ prq->msi_index = 0;
+ }
prq->interrupt_ctl = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
@@ -480,15 +505,25 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
+ tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ index = adapter->max_sds_rings + ring;
+ intr_mask = ahw->intr_tbl[index].src;
+ tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+ tx_ring->ctx_id, tx_ring->state);
} else {
- dev_err(&adapter->pdev->dev,
- "Failed to create tx ctx in firmware%d\n", err);
+ netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+ err);
err = -EIO;
}
-
qlcnic_free_mbx_args(&cmd);
out_free_rsp:
@@ -618,6 +653,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
}
}
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+ err = qlcnic_82xx_mq_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+
err = qlcnic_fw_cmd_create_rx_ctx(dev);
if (err)
goto err_out;
@@ -639,13 +681,19 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
}
set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
return 0;
err_out:
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(dev, 0);
+
if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
qlcnic_83xx_config_intrpt(dev, 0);
}
+
return err;
}
@@ -659,6 +707,12 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(adapter, 0);
+
if (qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
@@ -723,8 +777,54 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_cmd_args cmd;
+ u32 type, val;
+ int i, err = 0;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (ahw->intr_tbl[i].type << 4);
+ if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[1] = val;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(netdev, "Failed to %s interrupts %d\n",
+ op_type == QLCNIC_INTRPT_ADD ? "Add" :
+ "Delete", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+ val = cmd.rsp.arg[1];
+ if (LSB(val)) {
+ netdev_info(netdev,
+ "failed to configure interrupt for %d\n",
+ ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ ahw->intr_tbl[i].id = MSW(val);
+ ahw->intr_tbl[i].enabled = 1;
+ ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+ } else {
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].enabled = 0;
+ ahw->intr_tbl[i].src = 0;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return err;
+}
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
@@ -734,7 +834,7 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
if (err)
return err;
- cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+ cmd.req.arg[1] = function | BIT_8;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
@@ -765,8 +865,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args cmd;
size_t nic_size = sizeof(struct qlcnic_info_le);
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -819,8 +919,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -872,9 +972,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
size_t npar_size = sizeof(struct qlcnic_pci_info_le);
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
- pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t,
- GFP_KERNEL | __GFP_ZERO);
+ pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
@@ -974,8 +1073,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
return -EIO;
}
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
@@ -1030,8 +1129,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
if (mac_stats == NULL)
return -ENOMEM;
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 00000000000..d62d5ce432e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1179 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include "qlcnic.h"
+
+#define QLC_DCB_NUM_PARAM 3
+#define QLC_DCB_LOCAL_IDX 0
+#define QLC_DCB_OPER_IDX 1
+#define QLC_DCB_PEER_IDX 2
+
+#define QLC_DCB_GET_MAP(V) (1 << V)
+
+#define QLC_DCB_AEN_BIT 0x2
+#define QLC_DCB_FW_VER 0x2
+#define QLC_DCB_MAX_TC 0x8
+#define QLC_DCB_MAX_APP 0x8
+#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
+#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
+
+#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
+#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
+#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
+#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
+#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
+#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
+#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
+#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
+#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
+#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
+
+#define QLC_DCB_LOCAL_PARAM_FWID 0x3
+#define QLC_DCB_OPER_PARAM_FWID 0x1
+#define QLC_DCB_PEER_PARAM_FWID 0x2
+
+#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
+#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
+#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
+#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
+
+#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
+#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
+#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
+#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
+#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
+#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
+
+static void qlcnic_dcb_aen_work(struct work_struct *);
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
+static void __qlcnic_dcb_free(struct qlcnic_adapter *);
+static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
+static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
+static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+
+struct qlcnic_dcb_capability {
+ bool tsa_capability;
+ bool ets_capability;
+ u8 max_num_tc;
+ u8 max_ets_tc;
+ u8 max_pfc_tc;
+ u8 dcb_capability;
+};
+
+struct qlcnic_dcb_param {
+ u32 hdr_prio_pfc_map[2];
+ u32 prio_pg_map[2];
+ u32 pg_bw_map[2];
+ u32 pg_tsa_map[2];
+ u32 app[QLC_DCB_MAX_APP];
+};
+
+struct qlcnic_dcb_mbx_params {
+ /* 1st local, 2nd operational 3rd remote */
+ struct qlcnic_dcb_param type[3];
+ u32 prio_tc_map;
+};
+
+struct qlcnic_82xx_dcb_param_mbx_le {
+ __le32 hdr_prio_pfc_map[2];
+ __le32 prio_pg_map[2];
+ __le32 pg_bw_map[2];
+ __le32 pg_tsa_map[2];
+ __le32 app[QLC_DCB_MAX_APP];
+};
+
+enum qlcnic_dcb_selector {
+ QLC_SELECTOR_DEF = 0x0,
+ QLC_SELECTOR_ETHER,
+ QLC_SELECTOR_TCP,
+ QLC_SELECTOR_UDP,
+};
+
+enum qlcnic_dcb_prio_type {
+ QLC_PRIO_NONE = 0,
+ QLC_PRIO_GROUP,
+ QLC_PRIO_LINK,
+};
+
+enum qlcnic_dcb_pfc_type {
+ QLC_PFC_DISABLED = 0,
+ QLC_PFC_FULL,
+ QLC_PFC_TX,
+ QLC_PFC_RX
+};
+
+struct qlcnic_dcb_prio_cfg {
+ bool valid;
+ enum qlcnic_dcb_pfc_type pfc_type;
+};
+
+struct qlcnic_dcb_pg_cfg {
+ bool valid;
+ u8 total_bw_percent; /* of Link/ port BW */
+ u8 prio_count;
+ u8 tsa_type;
+};
+
+struct qlcnic_dcb_tc_cfg {
+ bool valid;
+ struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
+ enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
+ u8 link_percent; /* % of link bandwidth */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 up_tc_map;
+ u8 pgid;
+};
+
+struct qlcnic_dcb_app {
+ bool valid;
+ enum qlcnic_dcb_selector selector;
+ u16 protocol;
+ u8 priority;
+};
+
+struct qlcnic_dcb_cee {
+ struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
+ struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
+ struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
+ bool tc_param_valid;
+ bool pfc_mode_enable;
+};
+
+struct qlcnic_dcb_cfg {
+ /* 0 - local, 1 - operational, 2 - remote */
+ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
+ struct qlcnic_dcb_capability capability;
+ u32 version;
+};
+
+static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
+ .register_aen = qlcnic_83xx_dcb_register_aen,
+ .handle_aen = qlcnic_83xx_dcb_handle_aen,
+};
+
+static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
+ .handle_aen = qlcnic_82xx_dcb_handle_aen,
+};
+
+static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_NUMAPP(val);
+ else
+ return QLC_83XX_DCB_GET_NUMAPP(val);
+}
+
+static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_PFC_VALID(val);
+ else
+ return QLC_83XX_DCB_PFC_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_TSA_VALID(val);
+ else
+ return QLC_83XX_DCB_TSA_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
+ else
+ return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
+}
+
+static int qlcnic_dcb_prio_count(u8 up_tc_map)
+{
+ int j;
+
+ for (j = 0; j < QLC_DCB_MAX_TC; j++)
+ if (up_tc_map & QLC_DCB_GET_MAP(j))
+ break;
+
+ return j;
+}
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+{
+ if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+}
+
+static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
+ else if (qlcnic_83xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
+}
+
+int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb;
+
+ dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
+ if (!dcb)
+ return -ENOMEM;
+
+ adapter->dcb = dcb;
+ dcb->adapter = adapter;
+ qlcnic_set_dcb_ops(adapter);
+
+ return 0;
+}
+
+static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!dcb)
+ return;
+
+ qlcnic_dcb_register_aen(adapter, 0);
+
+ while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&dcb->aen_work);
+
+ if (dcb->wq) {
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+ }
+
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+ kfree(dcb->param);
+ dcb->param = NULL;
+ kfree(dcb);
+ adapter->dcb = NULL;
+}
+
+static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+{
+ qlcnic_dcb_get_hw_capability(adapter);
+ qlcnic_dcb_get_cee_cfg(adapter);
+ qlcnic_dcb_register_aen(adapter, 1);
+}
+
+static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ int err = 0;
+
+ INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
+
+ dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
+ if (!dcb->wq) {
+ dev_err(&adapter->pdev->dev,
+ "DCB workqueue allocation failed. DCB will be disabled\n");
+ return -1;
+ }
+
+ dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
+ if (!dcb->cfg) {
+ err = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
+ if (!dcb->param) {
+ err = -ENOMEM;
+ goto out_free_cfg;
+ }
+
+ qlcnic_dcb_get_info(adapter);
+
+ return 0;
+out_free_cfg:
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+
+out_free_wq:
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+
+ return err;
+}
+
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
+ char *buf)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_out;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX capability, err %d\n", err);
+ } else {
+ mbx_out = cmd.rsp.arg[1];
+ if (buf)
+ memcpy(buf, &mbx_out, sizeof(u32));
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+{
+ struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
+
+ err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+ if (err)
+ return err;
+
+ mbx_out = *val;
+ if (QLC_DCB_TSA_SUPPORT(mbx_out))
+ cap->tsa_capability = true;
+
+ if (QLC_DCB_ETS_SUPPORT(mbx_out))
+ cap->ets_capability = true;
+
+ cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
+ cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
+ cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
+
+ if (cap->max_num_tc > QLC_DCB_MAX_TC ||
+ cap->max_ets_tc > cap->max_num_tc ||
+ cap->max_pfc_tc > cap->max_num_tc) {
+ dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_capability *cap;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ if (err)
+ return err;
+
+ cap = &cfg->capability;
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+ char *buf, u8 type)
+{
+ u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t cardrsp_phys_addr;
+ struct qlcnic_dcb_param rsp;
+ struct qlcnic_cmd_args cmd;
+ u64 phys_addr;
+ void *addr;
+ int err, i;
+
+ switch (type) {
+ case QLC_DCB_LOCAL_PARAM_FWID:
+ case QLC_DCB_OPER_PARAM_FWID:
+ case QLC_DCB_PEER_PARAM_FWID:
+ break;
+ default:
+ dev_err(dev, "Invalid parameter type %d\n", type);
+ return -EINVAL;
+ }
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
+ GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+
+ prsp_le = addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ goto out_free_rsp;
+
+ phys_addr = cardrsp_phys_addr;
+ cmd.req.arg[1] = size | (type << 16);
+ cmd.req.arg[2] = MSD(phys_addr);
+ cmd.req.arg[3] = LSD(phys_addr);
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
+ goto out;
+ }
+
+ memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
+ rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
+ rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
+ rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
+ rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
+ rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
+ rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
+ rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
+ rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++)
+ rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
+
+ if (buf)
+ memcpy(buf, &rsp, size);
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_mbx_params *mbx;
+ int err;
+
+ mbx = adapter->dcb->param;
+ if (!mbx)
+ return 0;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+ QLC_DCB_LOCAL_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+ QLC_DCB_OPER_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+ QLC_DCB_PEER_PARAM_FWID);
+ if (err)
+ return err;
+
+ mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
+
+ qlcnic_dcb_data_cee_param_map(adapter);
+
+ return err;
+}
+
+static void qlcnic_dcb_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_dcb *dcb;
+
+ dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
+ adapter = dcb->adapter;
+
+ qlcnic_dcb_get_cee_cfg(adapter);
+ clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+}
+
+static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *data)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ return;
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ if (err)
+ return err;
+
+ if (mbx_out & BIT_2)
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
+ if (mbx_out & BIT_3)
+ cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
+ if (cap->dcb_capability)
+ cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+ char *buf, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params mbx_out;
+ int err, i, j, k, max_app, size;
+ struct qlcnic_dcb_param *each;
+ struct qlcnic_cmd_args cmd;
+ u32 val;
+ char *p;
+
+ size = 0;
+ memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
+ memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ return err;
+
+ cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX param, err %d\n", err);
+ goto out;
+ }
+
+ mbx_out.prio_tc_map = cmd.rsp.arg[1];
+ p = memcpy(buf, &mbx_out, sizeof(u32));
+ k = 2;
+ p += sizeof(u32);
+
+ for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
+ each = &mbx_out.type[j];
+
+ each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
+ each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
+ each->prio_pg_map[0] = cmd.rsp.arg[k++];
+ each->prio_pg_map[1] = cmd.rsp.arg[k++];
+ each->pg_bw_map[0] = cmd.rsp.arg[k++];
+ each->pg_bw_map[1] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[0] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[1] = cmd.rsp.arg[k++];
+ val = each->hdr_prio_pfc_map[0];
+
+ max_app = qlcnic_dcb_get_num_app(adapter, val);
+ for (i = 0; i < max_app; i++)
+ each->app[i] = cmd.rsp.arg[i + k];
+
+ size = 16 * sizeof(u32);
+ memcpy(p, &each->hdr_prio_pfc_map[0], size);
+ p += size;
+ if (j == 0)
+ k = 18;
+ else
+ k = 34;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ int err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+ if (err)
+ return err;
+
+ qlcnic_dcb_data_cee_param_map(adapter);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
+ bool flag)
+{
+ u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_DCB_AEN_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
+ (flag ? "register" : "unregister"), err);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *data)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ u32 *val = data;
+
+ if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ return;
+
+ if (*val & BIT_8)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ else
+ clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 i, tc, pgid;
+
+ for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
+ tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
+ tc_cfg = &type->tc_cfg[tc];
+ tc_cfg->valid = true;
+ tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
+
+ if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
+ type->pfc_mode_enable) {
+ tc_cfg->prio_cfg[i].valid = true;
+ tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
+ }
+
+ if (i < 4)
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
+ else
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
+
+ tc_cfg->pgid = pgid;
+
+ tc_cfg->prio_type = QLC_PRIO_LINK;
+ type->pg_cfg[tc_cfg->pgid].prio_count++;
+ }
+}
+
+static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_pg_cfg *pg_cfg;
+ u8 i, tsa, bw_per;
+
+ for (i = 0; i < QLC_DCB_MAX_PG; i++) {
+ pg_cfg = &type->pg_cfg[i];
+ pg_cfg->valid = true;
+
+ if (i < 4) {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
+ } else {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
+ }
+
+ pg_cfg->total_bw_percent = bw_per;
+ pg_cfg->tsa_type = tsa;
+ }
+}
+
+static void
+qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_app *app;
+ u8 i, num_app, map, cnt;
+ struct dcb_app new_app;
+
+ num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
+ for (i = 0; i < num_app; i++) {
+ app = &type->app[i];
+ app->valid = true;
+
+ /* Only for CEE (-1) */
+ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
+ new_app.selector = app->selector;
+ app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
+ new_app.protocol = app->protocol;
+ map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
+ cnt = qlcnic_dcb_prio_count(map);
+
+ if (cnt >= QLC_DCB_MAX_TC)
+ cnt = 0;
+
+ app->priority = cnt;
+ new_app.priority = cnt;
+
+ if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
+ dcb_setapp(adapter->netdev, &new_app);
+ }
+}
+
+static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
+ struct qlcnic_dcb_param *each = &mbx->type[idx];
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cee *type = &cfg->type[idx];
+
+ type->tc_param_valid = false;
+ type->pfc_mode_enable = false;
+ memset(type->tc_cfg, 0,
+ sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
+ memset(type->pg_cfg, 0,
+ sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
+
+ if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_pfc_tc)
+ type->pfc_mode_enable = true;
+
+ if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_ets_tc)
+ type->tc_param_valid = true;
+
+ qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
+ qlcnic_dcb_fill_cee_pg_params(each, type);
+ qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
+}
+
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
+ qlcnic_dcb_map_cee_params(adapter, i);
+
+ dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+}
+
+static u8 qlcnic_dcb_get_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+}
+
+static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
+{
+ memcpy(addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void
+qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
+ u8 *pgid, u8 *bw_per, u8 *up_tc_map)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
+ struct qlcnic_dcb_cee *type;
+ u8 i, cnt, pg;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *prio = *pgid = *bw_per = *up_tc_map = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+ return;
+
+ tc_cfg = &type->tc_cfg[tc];
+ if (!tc_cfg->valid)
+ return;
+
+ *pgid = tc_cfg->pgid;
+ *prio = tc_cfg->prio_type;
+ *up_tc_map = tc_cfg->up_tc_map;
+ pg = *pgid;
+
+ for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
+ temp = &type->tc_cfg[i];
+ if (temp->valid && (pg == temp->pgid))
+ cnt++;
+ }
+
+ tc_cfg->bwg_percent = (100 / cnt);
+ *bw_per = tc_cfg->bwg_percent;
+}
+
+static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_pg_cfg *pgcfg;
+ struct qlcnic_dcb_cee *type;
+
+ *bw_pct = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+ return;
+
+ pgcfg = &type->pg_cfg[pgid];
+ if (!pgcfg->valid)
+ return;
+
+ *bw_pct = pgcfg->total_bw_percent;
+}
+
+static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 val = QLC_DCB_GET_MAP(prio);
+ struct qlcnic_dcb_cee *type;
+ u8 i;
+
+ *setting = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->pfc_mode_enable)
+ return;
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc_cfg = &type->tc_cfg[i];
+ if (!tc_cfg->valid)
+ continue;
+
+ if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
+ *setting = tc_cfg->prio_cfg[prio].pfc_type;
+ }
+}
+
+static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
+ u8 *cap)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcb->cfg->capability.dcb_capability;
+ break;
+ default:
+ *cap = false;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return -EINVAL;
+
+ switch (attr) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = cfg->capability.max_ets_tc;
+ return 0;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = cfg->capability.max_pfc_tc;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
+}
+
+static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return cfg->capability.dcb_capability;
+}
+
+static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *type;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 1;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *flag = 0;
+
+ switch (fid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (type->tc_param_valid)
+ *flag |= DCB_FEATCFG_ENABLE;
+ else
+ *flag |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (type->pfc_mode_enable) {
+ if (type->tc_cfg[0].prio_cfg[0].pfc_type)
+ *flag |= DCB_FEATCFG_ENABLE;
+ } else {
+ *flag |= DCB_FEATCFG_ERROR;
+ }
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ *flag |= DCB_FEATCFG_ENABLE;
+ break;
+ default:
+ netdev_err(netdev, "Invalid Feature ID %d\n", fid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ *bw_pct = 0;
+}
+
+static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ int i;
+
+ *app_count = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++) {
+ if (peer->app[i].valid)
+ (*app_count)++;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ struct qlcnic_dcb_app *app;
+ int i, j;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
+ app = &peer->app[i];
+ if (!app->valid)
+ continue;
+
+ table[j].selector = app->selector;
+ table[j].priority = app->priority;
+ table[j++].protocol = app->protocol;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ u8 i, j, k, map;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
+ if (!peer->pg_cfg[i].valid)
+ continue;
+
+ pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
+
+ for (k = 0; k < QLC_DCB_MAX_TC; k++) {
+ if (peer->tc_cfg[i].valid &&
+ (peer->tc_cfg[i].pgid == i)) {
+ map = peer->tc_cfg[i].up_tc_map;
+ pg->prio_pg[j++] = map;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_tc_cfg *tc;
+ struct qlcnic_dcb_cee *peer;
+ u8 i, setting, prio;
+
+ pfc->pfc_en = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc = &peer->tc_cfg[i];
+ prio = qlcnic_dcb_prio_count(tc->up_tc_map);
+
+ setting = 0;
+ qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
+ if (setting)
+ pfc->pfc_en |= QLC_DCB_GET_MAP(i);
+ }
+
+ pfc->tcs_supported = cfg->capability.max_pfc_tc;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
+ .getstate = qlcnic_dcb_get_state,
+ .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
+ .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
+ .getpfccfg = qlcnic_dcb_get_pfc_cfg,
+ .getcap = qlcnic_dcb_get_capability,
+ .getnumtcs = qlcnic_dcb_get_num_tcs,
+ .getapp = qlcnic_dcb_get_app,
+ .getpfcstate = qlcnic_dcb_get_pfc_state,
+ .getdcbx = qlcnic_dcb_get_dcbx,
+ .getfeatcfg = qlcnic_dcb_get_feat_cfg,
+
+ .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
+
+ .peer_getappinfo = qlcnic_dcb_peer_app_info,
+ .peer_getapptable = qlcnic_dcb_peer_app_table,
+ .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
+ .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 00000000000..b87ce9fb503
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,41 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_DCBX_H
+#define __QLCNIC_DCBX_H
+
+void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+
+#ifdef CONFIG_QLCNIC_DCB
+int __qlcnic_register_dcb(struct qlcnic_adapter *);
+#else
+static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+struct qlcnic_dcb_ops {
+ void (*init_dcbnl_ops) (struct qlcnic_adapter *);
+ void (*free) (struct qlcnic_adapter *);
+ int (*attach) (struct qlcnic_adapter *);
+ int (*query_hw_capability) (struct qlcnic_adapter *, char *);
+ int (*get_hw_capability) (struct qlcnic_adapter *);
+ void (*get_info) (struct qlcnic_adapter *);
+ int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
+ int (*get_cee_cfg) (struct qlcnic_adapter *);
+ int (*register_aen) (struct qlcnic_adapter *, bool);
+ void (*handle_aen) (struct qlcnic_adapter *, void *);
+};
+
+struct qlcnic_dcb {
+ struct qlcnic_dcb_mbx_params *param;
+ struct qlcnic_adapter *adapter;
+ struct delayed_work aen_work;
+ struct workqueue_struct *wq;
+ struct qlcnic_dcb_ops *ops;
+ struct qlcnic_dcb_cfg *cfg;
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 7aac23ab31d..ebe4c86e523 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -125,6 +125,14 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
};
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+ "xmit_on",
+ "xmit_off",
+ "xmit_called",
+ "xmit_finished",
+};
+
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
@@ -630,15 +638,15 @@ qlcnic_set_ringparam(struct net_device *dev,
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
- int min;
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int min;
min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = adapter->ahw->max_tx_ques;
+ channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->ahw->max_tx_ques;
+ channel->tx_count = adapter->max_drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
@@ -646,18 +654,27 @@ static int qlcnic_set_channels(struct net_device *dev,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
+ int txq = 0;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count != channel->max_tx)
+ if (channel->other_count || channel->combined_count)
return -EINVAL;
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
- return err;
+ if (channel->rx_count) {
+ err = qlcnic_validate_max_rss(adapter, channel->rx_count);
+ if (err)
+ return err;
+ }
+
+ if (channel->tx_count) {
+ err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
+ if (err)
+ return err;
+ txq = channel->tx_count;
+ }
- err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
- netdev_info(dev, "allocated 0x%x sds rings\n",
- adapter->max_sds_rings);
+ err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
+ netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
+ adapter->max_sds_rings, adapter->max_drv_tx_rings);
return err;
}
@@ -893,6 +910,7 @@ free_diag_res:
clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
return ret;
}
@@ -966,6 +984,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_drv_tx_rings = adapter->max_drv_tx_rings;
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1006,9 +1025,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
- netdev_info(netdev, "firmware didnt respond to loopback"
- " configure request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
goto free_res;
} else if (adapter->ahw->diag_cnt) {
ret = adapter->ahw->diag_cnt;
@@ -1025,6 +1044,7 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
clear_it:
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
@@ -1077,11 +1097,21 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
+ for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ for (index = 0; index < num_stats; index++) {
+ sprintf(data, "tx_ring_%d %s", i,
+ qlcnic_tx_ring_stats_strings[index]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
for (index = 0; index < QLCNIC_STATS_LEN; index++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
+
if (qlcnic_83xx_check(adapter)) {
num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
for (i = 0; i < num_stats; i++, index++)
@@ -1173,11 +1203,22 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size;
+ int index, ret, length, size, ring;
char *p;
+ memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ tx_ring = &adapter->tx_ring[ring];
+ *data++ = tx_ring->xmit_on;
+ *data++ = tx_ring->xmit_off;
+ *data++ = tx_ring->xmit_called;
+ *data++ = tx_ring->xmit_finished;
+ }
+ }
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
@@ -1468,6 +1509,68 @@ static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
adapter->ahw->msg_enable = msglvl;
}
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = true;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump enabled\n");
+
+ return 0;
+}
+
+static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = false;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump disabled\n");
+
+ return 0;
+}
+
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool state;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
+ } else {
+ state = fw_dump->enable;
+ }
+
+ return state;
+}
+
static int
qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
@@ -1484,7 +1587,7 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
else
dump->len = 0;
- if (!fw_dump->enable)
+ if (!qlcnic_check_fw_dump_state(adapter))
dump->flag = ETH_FW_DUMP_DISABLE;
else
dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
@@ -1532,77 +1635,111 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
return 0;
}
+static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct net_device *netdev = adapter->netdev;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev,
+ "Can not change driver mask to 0x%x. FW dump not enabled\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ fw_dump->tmpl_hdr->drv_cap_mask = mask;
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
+ return 0;
+}
+
static int
qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
- int i;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool valid_mask = false;
+ int i, ret = 0;
u32 state;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
- if (!fw_dump->enable) {
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
netdev_info(netdev, "FW dump not enabled\n");
- return 0;
+ ret = -EOPNOTSUPP;
+ break;
}
+
if (fw_dump->clr) {
netdev_info(netdev,
- "Previous dump not cleared, not forcing dump\n");
- return 0;
+ "Previous dump not cleared, not forcing dump\n");
+ break;
}
+
netdev_info(netdev, "Forcing a FW dump\n");
qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
- if (fw_dump->enable && fw_dump->tmpl_hdr) {
- netdev_info(netdev, "Disabling FW dump\n");
- fw_dump->enable = 0;
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_disable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_ENABLE_FW_DUMP:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
- }
- if (!fw_dump->enable) {
- netdev_info(netdev, "Enabling FW dump\n");
- fw_dump->enable = 1;
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_enable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
- return 0;
+ break;
+
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
netdev_info(netdev, "Device in FAILED state\n");
- return 0;
+ break;
+
default:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
+
for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
if (val->flag == qlcnic_fw_dump_level[i]) {
- fw_dump->tmpl_hdr->drv_cap_mask =
- val->flag;
- netdev_info(netdev, "Driver mask changed to: 0x%x\n",
- fw_dump->tmpl_hdr->drv_cap_mask);
- return 0;
+ valid_mask = true;
+ break;
}
}
- netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
- return -EINVAL;
+
+ if (valid_mask) {
+ ret = qlcnic_set_dump_mask(adapter, val->flag);
+ } else {
+ netdev_info(netdev, "Invalid dump level: 0x%x\n",
+ val->flag);
+ ret = -EINVAL;
+ }
}
- return 0;
+ return ret;
}
const struct ethtool_ops qlcnic_ethtool_ops = {
@@ -1657,3 +1794,11 @@ const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
.set_msglevel = qlcnic_set_msglevel,
.get_msglevel = qlcnic_get_msglevel,
};
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+ .get_settings = qlcnic_get_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .set_dump = qlcnic_set_dump,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4d5f59b2d15..f8adc7b01f1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -387,7 +387,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return -EIO;
- tx_ring = adapter->tx_ring;
+ tx_ring = &adapter->tx_ring[0];
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
@@ -740,6 +740,22 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac,
+ adapter->ahw->physical_port);
+ if (ret)
+ return ret;
+
+ memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+ adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+ return 0;
+}
+
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4a71b28effc..272c356cf9b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -85,8 +85,11 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_TEMP_HDR 0x30
#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
#define QLCNIC_CMD_CONFIG_VPORT 0x32
+#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
+#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
#define QLCNIC_CMD_GET_MAC_STATS 0x37
#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
#define QLCNIC_CMD_GET_LED_STATUS 0x3C
#define QLCNIC_CMD_CONFIGURE_RSS 0x41
#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
@@ -122,6 +125,7 @@ enum qlcnic_regs {
#define QLCNIC_MBX_COMP_EVENT 0x8100
#define QLCNIC_MBX_REQUEST_EVENT 0x8101
#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
@@ -149,7 +153,6 @@ struct ethtool_stats;
struct pci_device_id;
struct qlcnic_host_sds_ring;
struct qlcnic_host_tx_ring;
-struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
@@ -173,10 +176,12 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *tx_ring, int);
@@ -184,7 +189,7 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 974d62607e1..66c26cf7a2b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -127,12 +127,12 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
}
}
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_buffer *cmd_buf;
struct qlcnic_skb_frag *buffrag;
int i, j;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
@@ -241,7 +241,13 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
-
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ sds_ring->tx_ring = &adapter->tx_ring[ring];
+ else
+ sds_ring->tx_ring = &adapter->tx_ring[0];
+ }
for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
INIT_LIST_HEAD(&sds_ring->free_list[i]);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 6946d354f44..11b4bb83b93 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -127,6 +127,23 @@
struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
struct qlcnic_host_rds_ring *, u16, u16);
+inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+
+static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
@@ -147,10 +164,7 @@ static inline u8 qlcnic_mac_hash(u64 mac)
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
u16 handle, u8 ring_id)
{
- unsigned short device = adapter->pdev->device;
-
- if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
+ if (qlcnic_83xx_check(adapter))
return handle | (ring_id << 15);
else
return handle;
@@ -357,14 +371,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
}
static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
- struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 l4proto, opcode = 0, hdr_len = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u16 protocol = ntohs(skb->protocol);
u32 producer = tx_ring->producer;
@@ -547,7 +561,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc)
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *pbuf;
struct qlcnic_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
@@ -556,10 +570,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int i, k, frag_count, delta = 0;
u32 producer, num_txd;
- num_txd = tx_ring->num_desc;
-
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
return NETDEV_TX_BUSY;
}
@@ -569,7 +581,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto drop_packet;
}
+ if (qlcnic_check_multi_tx(adapter))
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+ else
+ tx_ring = &adapter->tx_ring[0];
+ num_txd = tx_ring->num_desc;
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
+
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
@@ -584,11 +603,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(tx_ring->txq);
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_start_queue(netdev);
+ netif_tx_start_queue(tx_ring->txq);
} else {
adapter->stats.xmit_off++;
+ tx_ring->xmit_off++;
return NETDEV_TX_BUSY;
}
}
@@ -643,7 +663,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->producer = get_next_index(producer, num_txd);
smp_mb();
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
goto unwind_buff;
if (adapter->drv_mac_learn)
@@ -651,6 +671,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
+ tx_ring->xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
@@ -673,7 +694,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
adapter->ahw->linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
} else if (!adapter->ahw->linkup && linkup) {
netdev_info(netdev, "NIC Link is up\n");
@@ -768,9 +789,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
- return 1;
-
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
@@ -788,6 +806,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
frag->dma = 0ULL;
}
adapter->stats.xmitfinished++;
+ tx_ring->xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
@@ -800,10 +819,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
smp_mb();
- if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ if (netif_tx_queue_stopped(tx_ring->txq) &&
+ netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_wake_queue(netdev);
+ netif_tx_wake_queue(tx_ring->txq);
adapter->stats.xmit_on++;
+ tx_ring->xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
@@ -823,7 +844,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
return done;
}
@@ -833,16 +853,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
- tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+ tx_ring = sds_ring->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_int(sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+
+ return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
@@ -952,20 +996,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
break;
case 1:
dev_info(dev, "loopback already in progress\n");
- adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+ adapter->ahw->diag_cnt = -EINPROGRESS;
break;
case 2:
dev_info(dev, "loopback cable is not connected\n");
- adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+ adapter->ahw->diag_cnt = -ENODEV;
break;
default:
dev_info(dev,
"loopback configure request failed, err %x\n",
ret);
- adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+ adapter->ahw->diag_cnt = -EIO;
break;
}
break;
+ case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
+ qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+ break;
default:
break;
}
@@ -1411,23 +1458,31 @@ void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings;
+ int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
- max_sds_rings = adapter->max_sds_rings;
-
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- if (ring == adapter->max_sds_rings - 1)
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
- else
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- QLCNIC_NETDEV_WEIGHT*2);
+ NAPI_POLL_WEIGHT);
+ } else {
+ if (ring == (adapter->max_sds_rings - 1))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll,
+ NAPI_POLL_WEIGHT);
+ }
}
if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1435,6 +1490,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
return 0;
}
@@ -1443,6 +1506,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
@@ -1450,6 +1514,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
}
qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
qlcnic_free_tx_rings(adapter);
}
@@ -1457,6 +1529,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1467,12 +1540,24 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
}
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
}
void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1484,6 +1569,17 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ qlcnic_check_multi_tx(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_int(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
}
#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
@@ -1685,7 +1781,7 @@ static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
break;
default:
dev_info(&adapter->pdev->dev,
- "Unkonwn opcode: 0x%x\n", opcode);
+ "Unknown opcode: 0x%x\n", opcode);
goto skip;
}
@@ -1864,7 +1960,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings, temp;
+ int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1872,25 +1968,22 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
- max_sds_rings = adapter->max_sds_rings;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_rx_poll,
- QLCNIC_NETDEV_WEIGHT * 2);
- } else {
- temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
+ NAPI_POLL_WEIGHT);
+ else
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_msix_sriov_vf_poll,
- temp);
- }
+ NAPI_POLL_WEIGHT);
} else {
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ NAPI_POLL_WEIGHT);
}
}
@@ -1905,7 +1998,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll,
- QLCNIC_NETDEV_WEIGHT);
+ NAPI_POLL_WEIGHT);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index bc05d016c85..21d00a0449a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -100,6 +100,8 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
};
@@ -146,6 +148,11 @@ static const u32 qlcnic_reg_tbl[] = {
static const struct qlcnic_board_info qlcnic_boards[] = {
{ PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE844X,
+ 0x0,
+ 0x0,
+ "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_QLE834X,
PCI_VENDOR_ID_QLOGIC,
0x24e,
@@ -254,7 +261,6 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
-#define QLC_MAX_SDS_RINGS 8
static const
struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -278,12 +284,15 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
- u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret;
- if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
- return -EIO;
+ ret = qlcnic_get_mac_address(adapter, mac_addr,
+ adapter->ahw->pci_func);
+ if (ret)
+ return ret;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -422,9 +431,27 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
usleep_range(10000, 11000);
+ if (!adapter->fw_work.work.func)
+ return;
+
cancel_delayed_work_sync(&adapter->fw_work);
}
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(ahw->phys_port_id);
+ memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -442,6 +469,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_add = qlcnic_fdb_add,
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
+ .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -514,13 +542,36 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.get_board_info = qlcnic_82xx_get_board_info,
.set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
.free_mac_list = qlcnic_82xx_free_mac_list,
+ .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
+ .io_error_detected = qlcnic_82xx_io_error_detected,
+ .io_slot_reset = qlcnic_82xx_io_slot_reset,
+ .io_resume = qlcnic_82xx_io_resume,
};
+static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int num_tx_q;
+
+ if (ahw->msix_supported &&
+ (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+ num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
+ num_online_cpus());
+ if (num_tx_q > 1) {
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
+ &adapter->state);
+ adapter->max_drv_tx_rings = num_tx_q;
+ }
+ } else {
+ adapter->max_drv_tx_rings = 1;
+ }
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
+ int max_tx_rings, max_sds_rings, tx_vector;
int err = -1, i;
- int max_tx_rings, tx_vector;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
max_tx_rings = 0;
@@ -554,7 +605,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
adapter->max_sds_rings = num_msix -
max_tx_rings - 1;
} else {
- adapter->max_sds_rings = num_msix;
+ adapter->ahw->num_msix = num_msix;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1))
+ max_sds_rings = num_msix - max_tx_rings;
+ else
+ max_sds_rings = num_msix;
+
+ adapter->max_sds_rings = max_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
@@ -570,6 +629,8 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
num_msix += (max_tx_rings + 1);
} else {
num_msix = rounddown_pow_of_two(err);
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += max_tx_rings;
}
if (num_msix) {
@@ -605,6 +666,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
adapter->msix_entries[0].vector = pdev->irq;
return err;
}
+
if (qlcnic_use_msi || qlcnic_use_msi_x)
return -EOPNOTSUPP;
@@ -621,28 +683,69 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
if (!num_intr)
num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- if (adapter->ahw->msix_supported)
+ if (ahw->msix_supported) {
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
num_intr));
- else
+ if (qlcnic_check_multi_tx(adapter)) {
+ if (txq)
+ adapter->max_drv_tx_rings = txq;
+ num_msix += adapter->max_drv_tx_rings;
+ }
+ } else {
num_msix = 1;
+ }
err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM || !err)
+ if (err == -ENOMEM)
return err;
- err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ ahw->intr_tbl = vzalloc(ahw->num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ err = qlcnic_82xx_config_intrpt(adapter, 1);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure Interrupt for %d vector\n",
+ ahw->num_msix);
return err;
+ }
- return -EIO;
+ return 0;
}
void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
@@ -696,6 +799,23 @@ static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
return ret;
}
+static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
+{
+ bool ret = false;
+
+ if (qlcnic_84xx_check(adapter)) {
+ ret = true;
+ } else if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
@@ -739,18 +859,30 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
(pci_info[i].type != QLCNIC_TYPE_NIC))
continue;
+ if (qlcnic_port_eswitch_cfg_capability(adapter)) {
+ if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+ adapter->npars[j].eswitch_status = true;
+ else
+ continue;
+ } else {
+ adapter->npars[j].eswitch_status = true;
+ }
+
adapter->npars[j].pci_func = pfn;
adapter->npars[j].active = (u8)pci_info[i].active;
adapter->npars[j].type = (u8)pci_info[i].type;
adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+
j++;
}
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
- adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
- if (qlcnic_83xx_check(adapter))
+ if (qlcnic_82xx_check(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
qlcnic_enable_eswitch(adapter, i, 1);
}
@@ -829,7 +961,9 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -870,8 +1004,8 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
return 0;
}
-static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
- int index)
+static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+ int index)
{
struct pci_dev *pdev = adapter->pdev;
unsigned short subsystem_vendor;
@@ -1173,6 +1307,9 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
return 0;
for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
esw_cfg.pci_func = adapter->npars[i].pci_func;
esw_cfg.mac_override = BIT_0;
@@ -1235,6 +1372,9 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->ahw->act_pci_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (err)
@@ -1413,6 +1553,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter) &&
(ring == (num_sds_rings - 1))) {
if (!(adapter->flags &
QLCNIC_MSIX_ENABLED))
@@ -1436,9 +1577,11 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
return err;
}
}
- if (qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter)) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
@@ -1473,8 +1616,10 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
free_irq(sds_ring->irq, sds_ring);
}
}
- if (qlcnic_83xx_check(adapter) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
@@ -1510,8 +1655,10 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
return 0;
+
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+
qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
@@ -1558,6 +1705,8 @@ int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
+ int ring;
+
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1567,7 +1716,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1585,8 +1733,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
- qlcnic_release_tx_buffers(adapter);
- spin_unlock(&adapter->tx_clean_lock);
+
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
}
/* Usage: During suspend and firmware recovery module */
@@ -1666,6 +1815,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
+ int max_tx_rings = adapter->max_drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
@@ -1682,6 +1832,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
adapter->ahw->diag_test = 0;
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_tx_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1750,6 +1901,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->max_sds_rings = 1;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
+ adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1907,12 +2059,18 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
+ err = qlcnic_set_real_num_queues(adapter, netdev);
+ if (err)
+ return err;
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register net device\n");
return err;
}
+ qlcnic_dcb_init_dcbnl_ops(adapter);
+
return 0;
}
@@ -1975,7 +2133,8 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
tx_ring->cmd_buf_arr = cmd_buf_arr;
}
- if (qlcnic_83xx_check(adapter)) {
+ if (qlcnic_83xx_check(adapter) ||
+ (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
@@ -1986,6 +2145,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
}
}
}
+
return 0;
}
@@ -2004,6 +2164,17 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
+static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ return __qlcnic_register_dcb(adapter);
+}
+
+void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ kfree(adapter->dcb);
+ adapter->dcb = NULL;
+}
+
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2048,9 +2219,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
default:
@@ -2061,7 +2234,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_free_hw_res;
- netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+ QLCNIC_MAX_TX_RINGS);
if (!netdev) {
err = -ENOMEM;
goto err_out_iounmap;
@@ -2091,29 +2265,49 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
adapter->drv_mac_learn = true;
- adapter->max_drv_tx_rings = 1;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
+ qlcnic_register_dcb(adapter);
+
if (qlcnic_82xx_check(adapter)) {
qlcnic_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_start_firmware(adapter);
if (err) {
- dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
- goto err_out_free_hw;
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+ "\t\tIf reboot doesn't help, try flashing the card\n");
+ goto err_out_maintenance_mode;
+ }
+
+ qlcnic_get_multiq_capability(adapter);
+
+ if ((adapter->ahw->act_pci_func > 2) &&
+ qlcnic_check_multi_tx(adapter)) {
+ adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
+ dev_info(&adapter->pdev->dev,
+ "vNIC mode enabled, Set max TX rings = %d\n",
+ adapter->max_drv_tx_rings);
}
+ if (!qlcnic_check_multi_tx(adapter)) {
+ clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+ }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
adapter->flags |= QLCNIC_NEED_FLR;
+
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
+
} else if (qlcnic_83xx_check(adapter)) {
+ adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
@@ -2132,6 +2326,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
+ qlcnic_read_phys_port_id(adapter);
+
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
@@ -2145,16 +2341,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 0);
- if (err) {
- dev_err(&pdev->dev, "Failed to setup interrupt\n");
- goto err_out_disable_msi;
- }
-
- if (qlcnic_83xx_check(adapter)) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err)
+ if (qlcnic_82xx_check(adapter)) {
+ err = qlcnic_setup_intr(adapter, 0, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
+ }
}
err = qlcnic_get_act_pci_func(adapter);
@@ -2220,6 +2412,22 @@ err_out_disable_pdev:
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
return err;
+
+err_out_maintenance_mode:
+ netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ err = register_netdev(netdev);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register net device\n");
+ qlcnic_clr_all_drv_state(adapter, 0);
+ goto err_out_free_hw;
+ }
+
+ pci_set_drvdata(pdev, adapter);
+ qlcnic_add_sysfs(adapter);
+
+ return 0;
}
static void qlcnic_remove(struct pci_dev *pdev)
@@ -2238,13 +2446,18 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
+ qlcnic_dcb_free(adapter);
+
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_register_nic_idc_func(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ kfree(ahw->fw_info);
}
qlcnic_detach(adapter);
@@ -2278,6 +2491,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->qlcnic_wq);
adapter->qlcnic_wq = NULL;
}
+
qlcnic_free_adapter_resources(adapter);
kfree(ahw);
free_netdev(netdev);
@@ -2324,8 +2538,16 @@ static int qlcnic_resume(struct pci_dev *pdev)
static int qlcnic_open(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ u32 state;
int err;
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+ netdev_err(netdev, "%s: Device is in FAILED state\n", __func__);
+
+ return -EIO;
+ }
+
netif_carrier_off(netdev);
err = qlcnic_attach(adapter);
@@ -2336,7 +2558,7 @@ static int qlcnic_open(struct net_device *netdev)
if (err)
goto err_out;
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
return 0;
@@ -2468,6 +2690,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
static void qlcnic_tx_timeout(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
@@ -2481,6 +2705,25 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
+ if (qlcnic_82xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ dev_info(&netdev->dev, "ring=%d\n", ring);
+ dev_info(&netdev->dev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+ dev_info(&netdev->dev, "producer=%d\n",
+ readl(tx_ring->crb_cmd_producer));
+ dev_info(&netdev->dev, "sw_consumer = %d\n",
+ tx_ring->sw_consumer);
+ dev_info(&netdev->dev, "hw_consumer = %d\n",
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+ dev_info(&netdev->dev, "xmit-on=%llu\n",
+ tx_ring->xmit_on);
+ dev_info(&netdev->dev, "xmit-off=%llu\n",
+ tx_ring->xmit_off);
+ }
+ }
adapter->ahw->reset_context = 1;
}
}
@@ -2869,7 +3112,7 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
rtnl_lock();
- if (adapter->ahw->fw_dump.enable &&
+ if (qlcnic_check_fw_dump_state(adapter) &&
(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
QLCDB(adapter, DRV, "Take FW dump\n");
qlcnic_dump_fw(adapter);
@@ -3013,6 +3256,13 @@ void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter, u32 key)
return;
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD) {
+ netdev_err(adapter->netdev, "%s: Device is in FAILED state\n",
+ __func__);
+ qlcnic_api_unlock(adapter);
+
+ return;
+ }
if (state == QLCNIC_DEV_READY) {
QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
@@ -3074,6 +3324,8 @@ qlcnic_attach_work(struct work_struct *work)
return;
}
attach:
+ qlcnic_dcb_get_info(adapter);
+
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
goto done;
@@ -3245,7 +3497,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0);
+ err = qlcnic_setup_intr(adapter, 0, 0);
if (err) {
kfree(adapter->msix_entries);
@@ -3253,19 +3505,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
- if (qlcnic_83xx_check(adapter)) {
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "failed to setup mbx interrupt\n");
- qlcnic_clr_all_drv_state(adapter, 1);
- clear_bit(__QLCNIC_AER, &adapter->state);
- goto done;
- }
- }
-
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (err) {
@@ -3286,8 +3525,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
-static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3306,12 +3545,6 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_free_mbx_intr(adapter);
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
- cancel_delayed_work_sync(&adapter->idc_aen_work);
- }
-
qlcnic_detach(adapter);
qlcnic_teardown_intr(adapter);
@@ -3323,13 +3556,13 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
PCI_ERS_RESULT_RECOVERED;
}
-static void qlcnic_io_resume(struct pci_dev *pdev)
+void qlcnic_82xx_io_resume(struct pci_dev *pdev)
{
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3339,9 +3572,48 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
&adapter->state))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
- FW_POLL_DELAY);
+ FW_POLL_DELAY);
+}
+
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_error_detected) {
+ return hw_ops->io_error_detected(pdev, state);
+ } else {
+ dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
}
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_slot_reset) {
+ return hw_ops->io_slot_reset(pdev);
+ } else {
+ dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_resume)
+ hw_ops->io_resume(pdev);
+ else
+ dev_err(&pdev->dev, "AER resume handler not registered.\n");
+}
+
+
static int
qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
{
@@ -3370,16 +3642,65 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 max_hw = QLCNIC_MAX_TX_RINGS;
+ u32 max_allowed;
+
+ if (!qlcnic_82xx_check(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (txq > QLCNIC_MAX_TX_RINGS) {
+ netdev_err(netdev, "Invalid ring count\n");
+ return -EINVAL;
+ }
+
+ max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+ num_online_cpus()));
+ if ((txq > max_allowed) || !is_power_of_2(txq)) {
+ if (!is_power_of_2(txq))
+ netdev_err(netdev,
+ "TX queue should be a power of 2\n");
+ if (txq > num_online_cpus())
+ netdev_err(netdev,
+ "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
+ num_online_cpus());
+ netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
+ __u32 val)
{
struct net_device *netdev = adapter->netdev;
u8 max_hw = adapter->ahw->max_rx_ques;
u32 max_allowed;
- if (val > QLC_MAX_SDS_RINGS) {
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (val > QLCNIC_MAX_SDS_RINGS) {
netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLC_MAX_SDS_RINGS);
+ QLCNIC_MAX_SDS_RINGS);
return -EINVAL;
}
@@ -3409,27 +3730,48 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
{
int err;
struct net_device *netdev = adapter->netdev;
+ int num_msix;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
+ if (qlcnic_82xx_check(adapter)) {
+ if (txq != 0)
+ adapter->max_drv_tx_rings = txq;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (txq > adapter->max_drv_tx_rings))
+ num_msix = adapter->max_drv_tx_rings;
+ else
+ num_msix = data;
+ }
+
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
+ netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
+
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data);
+
+ err = qlcnic_setup_intr(adapter, data, txq);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
@@ -3457,8 +3799,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
- err = len;
- done:
+done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 79e54efe07b..15513608d48 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1082,14 +1082,17 @@ flash_temp:
}
tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+ dev_info(&adapter->pdev->dev,
+ "Default minidump capture mask 0x%x\n",
+ tmpl_hdr->cap_mask);
if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
ahw->fw_dump.use_pex_dma = true;
else
ahw->fw_dump.use_pex_dma = false;
- ahw->fw_dump.enable = 1;
+ qlcnic_enable_fw_dump_state(adapter);
return 0;
}
@@ -1112,7 +1115,11 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
ahw = adapter->ahw;
- if (!fw_dump->enable) {
+ /* Return if we don't have firmware dump template header */
+ if (!tmpl_hdr)
+ return -EIO;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
dev_info(&adapter->pdev->dev, "Dump not enabled\n");
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 5d40045b3ce..392b9bd12b4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
struct qlcnic_cmd_args *);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
@@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.get_mac_address = qlcnic_83xx_get_mac_address,
.setup_intr = qlcnic_83xx_setup_intr,
.alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
- .mbx_cmd = qlcnic_sriov_vf_mbx_op,
+ .mbx_cmd = qlcnic_sriov_issue_cmd,
.get_func_no = qlcnic_83xx_get_func_no,
.api_lock = qlcnic_83xx_cam_lock,
.api_unlock = qlcnic_83xx_cam_unlock,
@@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
u32 *pay, u8 pci_func, u8 size)
{
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- unsigned long flags;
- u16 opcode;
- u8 mbx_err_code;
- int i, j;
-
- opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
-
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
- dev_info(&adapter->pdev->dev,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- dev_info(&adapter->pdev->dev, "Mailbox detached\n");
- return 0;
- }
-
- spin_lock_irqsave(&ahw->mbx_lock, flags);
-
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
- if (mbx_val) {
- QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
- spin_unlock_irqrestore(&ahw->mbx_lock, flags);
- return QLCNIC_RCODE_TIMEOUT;
- }
- /* Fill in mailbox registers */
- val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
- mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
-
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
- mbx_cmd = 0x1 | (1 << 4);
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct qlcnic_cmd_args cmd;
+ unsigned long timeout;
+ int err;
- if (qlcnic_sriov_pf_check(adapter))
- mbx_cmd |= (pci_func << 5);
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd.hdr = hdr;
+ cmd.pay = pay;
+ cmd.pay_size = size;
+ cmd.func_num = pci_func;
+ cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+ cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
- for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
- i++, j++) {
- writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
+ err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
}
- for (j = 0; j < size; j++, i++)
- writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
- /* Signal FW about the impending command */
- QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-
- /* Waiting for the mailbox cmd to complete and while waiting here
- * some AEN might arrive. If more than 5 seconds expire we can
- * assume something is wrong.
- */
-poll:
- rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
- if (rsp != QLCNIC_RCODE_TIMEOUT) {
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- __qlcnic_83xx_process_aen(adapter);
- goto poll;
- }
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
-
- switch (mbx_err_code) {
- case QLCNIC_MBX_RSP_OK:
- case QLCNIC_MBX_PORT_RSP_OK:
- rsp = QLCNIC_RCODE_SUCCESS;
- break;
- default:
- if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- rsp = qlcnic_83xx_mac_rcode(adapter);
- if (!rsp)
- goto out;
- }
- dev_err(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
- rsp = mbx_err_code;
- break;
- }
- goto out;
+ if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
}
- dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
- QLCNIC_MBX_RSP(mbx_cmd));
- rsp = QLCNIC_RCODE_TIMEOUT;
-out:
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return rsp;
+ return cmd.rsp_opcode;
}
static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
@@ -458,7 +400,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
+ adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
return 0;
}
@@ -490,11 +432,12 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_cmd_args cmd;
- int ret;
+ int ret = 0;
ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
if (ret)
@@ -522,8 +465,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
{
- struct qlcnic_info nic_info;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info;
int err;
err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
@@ -534,7 +477,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
if (err)
return -EIO;
- err = qlcnic_sriov_get_vf_acl(adapter);
+ err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
if (err)
return err;
@@ -564,7 +507,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
dev_warn(&adapter->pdev->dev,
"Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1);
+ err = qlcnic_setup_intr(adapter, 1, 0);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -590,6 +533,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
+
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
goto err_out_send_channel_term;
@@ -597,6 +543,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
pci_set_drvdata(adapter->pdev, adapter);
dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
adapter->netdev->name);
+
qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
adapter->ahw->idc.delay);
return 0;
@@ -637,8 +584,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err;
- spin_lock_init(&ahw->mbx_lock);
- set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
ahw->reset_context = 0;
@@ -1085,6 +1030,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
if (test_bit(QLC_BC_VF_FLR, &vf->state))
return;
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
trans = list_first_entry(&vf->rcv_act.wait_list,
struct qlcnic_bc_trans, list);
adapter = vf->adapter;
@@ -1234,6 +1180,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
return;
}
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
cmd_op = hdr->cmd_op;
if (qlcnic_sriov_alloc_bc_trans(&trans))
return;
@@ -1359,7 +1306,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
if (enable)
cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
- err = qlcnic_83xx_mbx_op(adapter, &cmd);
+ err = qlcnic_83xx_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -1391,10 +1338,11 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
return -EIO;
}
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
struct device *dev = &adapter->pdev->dev;
struct qlcnic_bc_trans *trans;
int err;
@@ -1411,7 +1359,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
goto cleanup_transaction;
retry:
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
rsp = -EIO;
QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
@@ -1454,7 +1402,7 @@ err_out:
if (rsp == QLCNIC_RCODE_TIMEOUT) {
ahw->reset_context = 1;
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
}
cleanup_transaction:
@@ -1613,8 +1561,9 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
{
int err;
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ adapter->need_fw_reset = 0;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
if (err)
@@ -1628,6 +1577,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
+ qlcnic_dcb_get_info(adapter);
+
return 0;
err_out_term_channel:
@@ -1657,8 +1608,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u8 i, max_ints = ahw->num_msix - 1;
- qlcnic_83xx_disable_mbx_intr(adapter);
netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
@@ -1702,6 +1655,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
struct device *dev = &adapter->pdev->dev;
struct qlc_83xx_idc *idc = &ahw->idc;
u8 func = ahw->pci_func;
@@ -1712,7 +1666,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
/* Skip the context reset and check if FW is hung */
if (adapter->reset_ctx_cnt < 3) {
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
dev_info(dev,
"Resetting context, wait here to check if FW is in failed state\n");
return 0;
@@ -1737,7 +1691,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
__func__, adapter->reset_ctx_cnt, func);
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
adapter->need_fw_reset = 0;
@@ -1787,6 +1741,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
static int
qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
@@ -1794,7 +1749,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->tx_timeo_cnt = 0;
adapter->reset_ctx_cnt = 0;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
}
@@ -1803,6 +1758,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
u8 func = adapter->ahw->pci_func;
@@ -1812,7 +1768,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->tx_timeo_cnt = 0;
adapter->reset_ctx_cnt = 0;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
}
return 0;
@@ -1990,7 +1946,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
int err;
set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
if (err)
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index eb49cd65378..686f460b150 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -397,6 +397,7 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
@@ -407,12 +408,15 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
/* After disabling SRIOV re-init the driver in default mode
configure opmode based on op_mode of function
*/
- if (qlcnic_83xx_configure_opmode(adapter))
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ rtnl_unlock();
return -EIO;
+ }
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
+ rtnl_unlock();
return 0;
}
@@ -533,6 +537,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
return -EIO;
}
+ rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
@@ -555,6 +560,7 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
__qlcnic_up(adapter, netdev);
error:
+ rtnl_unlock();
return err;
}
@@ -1183,10 +1189,19 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
+ struct qlcnic_adapter *adapter;
+
+ adapter = vf->adapter;
cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
+ /* For 84xx adapter in case of PVID , PFD should send vlan mode as
+ * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
+ */
+ if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
+ return 0;
+
switch (mode) {
case QLC_GUEST_VLAN_MODE:
cmd->rsp.arg[1] = mode | 1 << 8;
@@ -1284,6 +1299,10 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
+ QLCNIC_CMD_DCB_QUERY_CAP,
+ QLCNIC_CMD_DCB_QUERY_PARAM,
+ QLCNIC_CMD_INIT_NIC_FUNC,
+ QLCNIC_CMD_STOP_NIC_FUNC,
};
static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
@@ -1639,14 +1658,14 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!is_valid_ether_addr(mac) || vf >= num_vfs)
return -EINVAL;
- if (!compare_ether_addr(adapter->mac_addr, mac)) {
+ if (ether_addr_equal(adapter->mac_addr, mac)) {
netdev_err(netdev, "MAC address is already in use by the PF\n");
return -EINVAL;
}
for (i = 0; i < num_vfs; i++) {
vf_info = &sriov->vf_info[i];
- if (!compare_ether_addr(vf_info->vp->mac, mac)) {
+ if (ether_addr_equal(vf_info->vp->mac, mac)) {
netdev_err(netdev,
"MAC address is already in use by VF %d\n",
i);
@@ -1768,8 +1787,8 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return 0;
}
-static inline __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
- struct qlcnic_vport *vp, int vf)
+static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp, int vf)
{
__u32 vlan = 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 660c3f5b223..019f4377307 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -465,8 +465,14 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
memset(&pm_cfg, 0,
sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_func = adapter->npars[i].pci_func;
+ if (!adapter->npars[i].active)
+ continue;
+
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
@@ -632,8 +638,14 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
memset(&esw_cfg, 0,
sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_func = adapter->npars[i].pci_func;
+ if (!adapter->npars[i].active)
+ continue;
+
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
@@ -732,6 +744,9 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
if (ret)
return ret;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
np_cfg[i].pci_func = i;
np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
@@ -1257,6 +1272,7 @@ void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 state;
if (device_create_bin_file(dev, &bin_attr_port_stats))
dev_info(dev, "failed to create port stats sysfs entry");
@@ -1270,8 +1286,13 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ return;
+
if (device_create_bin_file(dev, &bin_attr_pci_config))
dev_info(dev, "failed to create pci config sysfs entry");
+
if (device_create_file(dev, &dev_attr_beacon))
dev_info(dev, "failed to create beacon sysfs entry");
@@ -1292,6 +1313,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
+ u32 state;
device_remove_bin_file(dev, &bin_attr_port_stats);
@@ -1300,6 +1322,11 @@ void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_FAILED || state == QLCNIC_DEV_BADBAD)
+ return;
+
device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_file(dev, &dev_attr_beacon);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 7e8d6826396..89943377846 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2149,7 +2149,7 @@ struct ql_adapter {
struct timer_list timer;
atomic_t lb_count;
/* Keep local copy of current mac address. */
- char current_mac_addr[6];
+ char current_mac_addr[ETH_ALEN];
};
/*
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index 10093f0c4c0..6bc5db70392 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -740,8 +740,8 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
int i;
if (!mpi_coredump) {
- netif_err(qdev, drv, qdev->ndev, "No memory available\n");
- return -ENOMEM;
+ netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
+ return -EINVAL;
}
/* Try to get the spinlock, but dont worry if
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
index ff2bf8a4e24..7ad146080c3 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c
@@ -1274,7 +1274,7 @@ void ql_mpi_reset_work(struct work_struct *work)
return;
}
- if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) {
netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
qdev->core_is_dumped = 1;
queue_delayed_work(qdev->workqueue,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 85e5c97191d..3397cee8977 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1897,12 +1897,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
-
- if (regs->len > R8169_REGS_SIZE)
- regs->len = R8169_REGS_SIZE;
+ u32 __iomem *data = tp->mmio_addr;
+ u32 *dw = p;
+ int i;
rtl_lock_work(tp);
- memcpy_fromio(p, tp->mmio_addr, regs->len);
+ for (i = 0; i < R8169_REGS_SIZE; i += 4)
+ memcpy_fromio(dw++, data++, 4);
rtl_unlock_work(tp);
}
@@ -4230,6 +4231,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34:
+ case RTL_GIGA_MAC_VER_35:
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 19a8a045e07..a30c4395b23 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740 and R8A7779.
+ R8A7740, R8A777x and R8A7790.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a753928bab9..5cd831ebfa8 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
[RMCR] = 0x0258,
[TFUCR] = 0x0264,
[RFOCR] = 0x0268,
+ [RMIIMODE] = 0x026c,
[FCFTR] = 0x0270,
[TRIMD] = 0x027c,
};
@@ -377,6 +378,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
@@ -392,6 +395,30 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
+/* R8A7790 */
+static struct sh_eth_cpu_data r8a7790_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate_r8a777x,
+
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+ EESR_ECI,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rmiimode = 1,
+ .shift_rd0 = 1,
+};
+
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -413,6 +440,8 @@ static struct sh_eth_cpu_data sh7724_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7724,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
@@ -451,6 +480,8 @@ static struct sh_eth_cpu_data sh7757_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7757,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.rmcr_value = 0x00000001,
@@ -519,6 +550,8 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_giga,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -577,6 +610,8 @@ static struct sh_eth_cpu_data sh7734_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -604,6 +639,8 @@ static struct sh_eth_cpu_data sh7763_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -641,6 +678,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -663,6 +702,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
};
static struct sh_eth_cpu_data sh7619_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.apr = 1,
@@ -672,6 +713,8 @@ static struct sh_eth_cpu_data sh7619_data = {
};
static struct sh_eth_cpu_data sh771x_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tsu = 1,
};
@@ -1124,6 +1167,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
if (ret)
goto out;
+ if (mdp->cd->rmiimode)
+ sh_eth_write(ndev, 0x1, RMIIMODE);
+
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
@@ -1297,9 +1343,12 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
+ dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
+ mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
- netif_rx(skb);
+ netif_receive_skb(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
}
@@ -1857,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
pm_runtime_get_sync(&mdp->pdev->dev);
+ napi_enable(&mdp->napi);
+
ret = request_irq(ndev->irq, sh_eth_interrupt,
mdp->cd->irq_flags, ndev->name, ndev);
if (ret) {
dev_err(&ndev->dev, "Can not assign IRQ number\n");
- return ret;
+ goto out_napi_off;
}
/* Descriptor set */
@@ -1879,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
- napi_enable(&mdp->napi);
-
return ret;
out_free_irq:
free_irq(ndev->irq, ndev);
+out_napi_off:
+ napi_disable(&mdp->napi);
pm_runtime_put_sync(&mdp->pdev->dev);
return ret;
}
@@ -1976,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- napi_disable(&mdp->napi);
-
netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */
@@ -1995,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
+ napi_disable(&mdp->napi);
+
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
@@ -2561,7 +2612,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp = NULL;
- struct sh_eth_plat_data *pd = pdev->dev.platform_data;
+ struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
/* get base addr */
@@ -2594,9 +2645,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
- /* Fill in the fields of the device structure with ethernet values. */
- ether_setup(ndev);
-
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
@@ -2618,10 +2666,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->edmac_endian = pd->edmac_endian;
mdp->no_ether_link = pd->no_ether_link;
mdp->ether_link_active_low = pd->ether_link_active_low;
- mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */
mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
@@ -2749,6 +2797,7 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
+ { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 99995bf38c4..a0db02c63b1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -60,6 +60,7 @@ enum {
EDOCR,
TFUCR,
RFOCR,
+ RMIIMODE,
FCFTR,
RPADIR,
TRIMD,
@@ -156,6 +157,13 @@ enum {
SH_ETH_MAX_REGISTER_OFFSET,
};
+enum {
+ SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_RCAR,
+ SH_ETH_REG_FAST_SH4,
+ SH_ETH_REG_FAST_SH3_SH2
+};
+
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
#define SH4_SKB_RX_ALIGN 32
@@ -453,6 +461,7 @@ struct sh_eth_cpu_data {
void (*set_rate)(struct net_device *ndev);
/* mandatory initialize value */
+ int register_type;
unsigned long eesipr_value;
/* optional initialize value */
@@ -482,6 +491,7 @@ struct sh_eth_cpu_data {
unsigned hw_crc:1; /* E-DMAC have CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
+ unsigned rmiimode:1; /* EtherC has RMIIMODE register */
};
struct sh_eth_private {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 856e523ac93..c7657188601 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
static int sgiseeq_probe(struct platform_device *pdev)
{
- struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
+ struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
struct hpc3_regs *hpcregs = pd->hpc;
struct sgiseeq_init_block *sr;
unsigned int irq = pd->irq;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4136ccc4a95..08892129444 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,5 +1,5 @@
config SFC
- tristate "Solarflare SFC4000/SFC9000-family support"
+ tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
depends on PCI
select MDIO
select CRC32
@@ -7,13 +7,14 @@ config SFC
select I2C_ALGOBIT
select PTP_1588_CLOCK
---help---
- This driver supports 10-gigabit Ethernet cards based on
- the Solarflare SFC4000 and SFC9000-family controllers.
+ This driver supports 10/40-gigabit Ethernet cards based on
+ the Solarflare SFC4000, SFC9000-family and SFC9100-family
+ controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
- bool "Solarflare SFC4000/SFC9000-family MTD support"
+ bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---
@@ -21,7 +22,7 @@ config SFC_MTD
(e.g. /dev/mtd1). This is required to update the firmware or
the boot configuration under Linux.
config SFC_MCDI_MON
- bool "Solarflare SFC9000-family hwmon support"
+ bool "Solarflare SFC9000/SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
default y
---help---
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 945bf06e69e..3a83c0dca8e 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,8 +1,7 @@
-sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
- falcon_xmac.o mcdi_mac.o \
- selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
+sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
+ rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
+ mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 5400a33f254..17d83f37fbf 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,10 @@
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
#define EFX_DWORD_0_LBN 0
#define EFX_DWORD_0_WIDTH 32
#define EFX_DWORD_1_LBN 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 00000000000..9f18ae984f9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3059 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef10_regs.h"
+#include "io.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "workarounds.h"
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Hardware control for EF10 architecture including 'Huntington'. */
+
+#define EFX_EF10_DRVGEN_EV 7
+enum {
+ EFX_EF10_TEST = 1,
+ EFX_EF10_REFILL,
+};
+
+/* The reserved RSS context value */
+#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+
+/* The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define HUNT_FILTER_TBL_ROWS 8192
+
+struct efx_ef10_filter_table {
+/* The RX match field masks supported by this fw & hw, in order of priority */
+ enum efx_filter_match_flags rx_match_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+ unsigned int rx_match_count;
+
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* BUSY flag indicates that an update is in progress. STACK_OLD is
+ * used to mark and sweep stack-owned MAC filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1UL
+#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+ wait_queue_head_t waitq;
+/* Shadow of net_device address lists, guarded by mac_lock */
+#define EFX_EF10_FILTER_STACK_UC_MAX 32
+#define EFX_EF10_FILTER_STACK_MC_MAX 256
+ struct {
+ u8 addr[ETH_ALEN];
+ u16 id;
+ } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
+ stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
+ int stack_uc_count; /* negative for PROMISC */
+ int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+};
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
+static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+
+static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
+ return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
+ EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
+}
+
+static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
+{
+ return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+}
+
+static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf)) {
+ netif_err(efx, drv, efx->net_dev,
+ "unable to read datapath firmware capabilities\n");
+ return -EIO;
+ }
+
+ nic_data->datapath_caps =
+ MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+ netif_err(efx, drv, efx->net_dev,
+ "current firmware does not support TSO\n");
+ return -ENODEV;
+ }
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+ netif_err(efx, probe, efx->net_dev,
+ "current firmware does not support an RX prefix\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+ rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
+ return rc > 0 ? rc : -ERANGE;
+}
+
+static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
+ return -EIO;
+
+ memcpy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
+ return 0;
+}
+
+static int efx_ef10_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data;
+ int i, rc;
+
+ /* We can have one VI for each 8K region. However we need
+ * multiple TX queues per channel.
+ */
+ efx->max_channels =
+ min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ BUG_ON(efx->max_channels == 0);
+
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
+ 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
+ if (rc)
+ goto fail1;
+
+ /* Get the MC's warm boot count. In case it's rebooting right
+ * now, be prepared to retry.
+ */
+ i = 0;
+ for (;;) {
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0)
+ break;
+ if (++i == 5)
+ goto fail2;
+ ssleep(1);
+ }
+ nic_data->warm_boot_count = rc;
+
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* In case we're recovering from a crash (kexec), we want to
+ * cancel any outstanding request by the previous user of this
+ * function. We send a special message using the least
+ * significant bits of the 'high' (doorbell) register.
+ */
+ _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
+
+ rc = efx_mcdi_init(efx);
+ if (rc)
+ goto fail2;
+
+ /* Reset (most) configuration for this function */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ goto fail3;
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc < 0)
+ goto fail3;
+
+ efx->rx_packet_len_offset =
+ ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
+
+ rc = efx_mcdi_port_get_number(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->port_num = rc;
+
+ rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_get_sysclk_freq(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
+
+ /* Check whether firmware supports bug 35388 workaround */
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ if (rc == 0)
+ nic_data->workaround_35388 = true;
+ else if (rc != -ENOSYS && rc != -ENOENT)
+ goto fail3;
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 35388 is %sabled\n",
+ nic_data->workaround_35388 ? "en" : "dis");
+
+ rc = efx_mcdi_mon_probe(efx);
+ if (rc)
+ goto fail3;
+
+ return 0;
+
+fail3:
+ efx_mcdi_fini(efx);
+fail2:
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+fail1:
+ kfree(nic_data);
+ efx->nic_data = NULL;
+ return rc;
+}
+
+static int efx_ef10_free_vis(struct efx_nic *efx)
+{
+ int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ return rc;
+}
+
+static void efx_ef10_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ efx_mcdi_mon_remove(efx);
+
+ /* This needs to be after efx_ptp_remove_channel() with no filters */
+ efx_ef10_rx_free_indir_table(efx);
+
+ rc = efx_ef10_free_vis(efx);
+ WARN_ON(rc != 0);
+
+ efx_mcdi_fini(efx);
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+ kfree(nic_data);
+}
+
+static int efx_ef10_alloc_vis(struct efx_nic *efx,
+ unsigned int min_vis, unsigned int max_vis)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+static int efx_ef10_dimension_resources(struct efx_nic *efx)
+{
+ unsigned int n_vis =
+ max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+ return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+}
+
+static int efx_ef10_init_nic(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (nic_data->must_check_datapath_caps) {
+ rc = efx_ef10_init_datapath_caps(efx);
+ if (rc)
+ return rc;
+ nic_data->must_check_datapath_caps = false;
+ }
+
+ if (nic_data->must_realloc_vis) {
+ /* We cannot let the number of VIs change now */
+ rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
+ nic_data->n_allocated_vis);
+ if (rc)
+ return rc;
+ nic_data->must_realloc_vis = false;
+ }
+
+ efx_ef10_rx_push_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_map_reset_flags(u32 *flags)
+{
+ enum {
+ EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
+ ETH_RESET_SHARED_SHIFT),
+ EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY | ETH_RESET_MGMT) <<
+ ETH_RESET_SHARED_SHIFT)
+ };
+
+ /* We assume for now that our PCI function is permitted to
+ * reset everything.
+ */
+
+ if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
+ *flags &= ~EF10_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
+ *flags &= ~EF10_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+#define EF10_DMA_STAT(ext_name, mcdi_name) \
+ [EF10_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
+ [EF10_STAT_ ## int_name] = \
+ { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_OTHER_STAT(ext_name) \
+ [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
+ EF10_DMA_STAT(tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(tx_packets, TX_PKTS),
+ EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(rx_good_bytes),
+ EF10_OTHER_STAT(rx_bad_bytes),
+ EF10_DMA_STAT(rx_packets, RX_PKTS),
+ EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+};
+
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
+ (1ULL << EF10_STAT_tx_packets) | \
+ (1ULL << EF10_STAT_tx_pause) | \
+ (1ULL << EF10_STAT_tx_unicast) | \
+ (1ULL << EF10_STAT_tx_multicast) | \
+ (1ULL << EF10_STAT_tx_broadcast) | \
+ (1ULL << EF10_STAT_rx_bytes) | \
+ (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_rx_good_bytes) | \
+ (1ULL << EF10_STAT_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_rx_packets) | \
+ (1ULL << EF10_STAT_rx_good) | \
+ (1ULL << EF10_STAT_rx_bad) | \
+ (1ULL << EF10_STAT_rx_pause) | \
+ (1ULL << EF10_STAT_rx_control) | \
+ (1ULL << EF10_STAT_rx_unicast) | \
+ (1ULL << EF10_STAT_rx_multicast) | \
+ (1ULL << EF10_STAT_rx_broadcast) | \
+ (1ULL << EF10_STAT_rx_lt64) | \
+ (1ULL << EF10_STAT_rx_64) | \
+ (1ULL << EF10_STAT_rx_65_to_127) | \
+ (1ULL << EF10_STAT_rx_128_to_255) | \
+ (1ULL << EF10_STAT_rx_256_to_511) | \
+ (1ULL << EF10_STAT_rx_512_to_1023) | \
+ (1ULL << EF10_STAT_rx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
+ (1ULL << EF10_STAT_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_overflow) | \
+ (1ULL << EF10_STAT_rx_nodesc_drops))
+
+/* These statistics are only provided by the 10G MAC. For a 10G/40G
+ * switchable port we do not expose these because they might not
+ * include all the packets they should.
+ */
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
+ (1ULL << EF10_STAT_tx_lt64) | \
+ (1ULL << EF10_STAT_tx_64) | \
+ (1ULL << EF10_STAT_tx_65_to_127) | \
+ (1ULL << EF10_STAT_tx_128_to_255) | \
+ (1ULL << EF10_STAT_tx_256_to_511) | \
+ (1ULL << EF10_STAT_tx_512_to_1023) | \
+ (1ULL << EF10_STAT_tx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+
+/* These statistics are only provided by the 40G MAC. For a 10G/40G
+ * switchable port we do expose these because the errors will otherwise
+ * be silent.
+ */
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
+ (1ULL << EF10_STAT_rx_length_error))
+
+#if BITS_PER_LONG == 64
+#define STAT_MASK_BITMAP(bits) (bits)
+#else
+#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
+#endif
+
+static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
+{
+ static const unsigned long hunt_40g_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_40G_EXTRA_STAT_MASK)
+ };
+ static const unsigned long hunt_10g_only_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_10G_ONLY_STAT_MASK)
+ };
+ u32 port_caps = efx_mcdi_phy_get_caps(efx);
+
+ if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ return hunt_40g_stat_mask;
+ else
+ return hunt_10g_only_stat_mask;
+}
+
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+ efx_ef10_stat_mask(efx), names);
+}
+
+static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ __le64 *dma_stats;
+
+ dma_stats = efx->stats_buffer.addr;
+ nic_data = efx->nic_data;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+ stats, efx->stats_buffer.addr, false);
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ /* Update derived statistics */
+ stats[EF10_STAT_rx_good_bytes] =
+ stats[EF10_STAT_rx_bytes] -
+ stats[EF10_STAT_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
+ stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+
+ return 0;
+}
+
+
+static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ const unsigned long *mask = efx_ef10_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+ int retry;
+
+ /* If we're unlucky enough to read statistics during the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us)
+ */
+ for (retry = 0; retry < 100; ++retry) {
+ if (efx_ef10_try_update_nic_stats(efx) == 0)
+ break;
+ udelay(100);
+ }
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_rx_gtjumbo] +
+ stats[EF10_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+ }
+
+ return stats_count;
+}
+
+static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int mode, value;
+ efx_dword_t timer_cmd;
+
+ if (channel->irq_moderation) {
+ mode = 3;
+ value = channel->irq_moderation - 1;
+ } else {
+ mode = 0;
+ value = 0;
+ }
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
+ channel->channel);
+ }
+}
+
+static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
+{
+ if (type != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void efx_ef10_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(pdu, hdr, hdr_len);
+ memcpy(pdu + hdr_len, sdu, sdu_len);
+ wmb();
+
+ /* The hardware provides 'low' and 'high' (doorbell) registers
+ * for passing the 64-bit address of an MCDI request to
+ * firmware. However the dwords are swapped by firmware. The
+ * least significant bits of the doorbell are then 0 for all
+ * MCDI requests due to alignment.
+ */
+ _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
+ ER_DZ_MC_DB_LWRD);
+ _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
+ ER_DZ_MC_DB_HWRD);
+}
+
+static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
+
+ rmb();
+ return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void
+efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(outbuf, pdu + offset, outlen);
+}
+
+static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc < 0) {
+ /* The firmware is presumably in the process of
+ * rebooting. However, we are supposed to report each
+ * reboot just once, so we must only do that once we
+ * can read and store the updated warm boot count.
+ */
+ return 0;
+ }
+
+ if (rc == nic_data->warm_boot_count)
+ return 0;
+
+ nic_data->warm_boot_count = rc;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* The datapath firmware might have been changed */
+ nic_data->must_check_datapath_caps = true;
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * statistic that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+
+ return -EIO;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
+
+ if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ /* Note test interrupts */
+ if (context->index == efx->irq_level)
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
+ queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
+
+ if (queues == 0)
+ return IRQ_NONE;
+
+ if (likely(soft_enabled)) {
+ /* Note test interrupts */
+ if (queues & (1U << efx->irq_level))
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return IRQ_HANDLED;
+}
+
+static void efx_ef10_irq_test_generate(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
+ (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ (tx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+/* This writes to the TX_DESC_WPTR and also pushes data */
+static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned int write_ptr;
+ efx_oword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ efx_qword_t *txd;
+ int rc;
+ int i;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* A previous user of this TX queue might have set us up the
+ * bomb by writing a descriptor to the TX push collector but
+ * not the doorbell. (Each collector belongs to a port, not a
+ * queue or function, so cannot easily be reset.) We must
+ * attempt to push a no-op descriptor in its place.
+ */
+ tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
+ tx_queue->insert_count = 1;
+ txd = efx_tx_desc(tx_queue, 0);
+ EFX_POPULATE_QWORD_4(*txd,
+ ESF_DZ_TX_DESC_IS_OPT, true,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ tx_queue->write_count = 1;
+ wmb();
+ efx_ef10_push_tx_desc(tx_queue, txd);
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned int write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
+{
+ unsigned int old_write_count = tx_queue->write_count;
+ struct efx_tx_buffer *buffer;
+ unsigned int write_ptr;
+ efx_qword_t *txd;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ /* Create TX descriptor ring entry */
+ if (buffer->flags & EFX_TX_BUF_OPTION) {
+ *txd = buffer->option;
+ } else {
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_3(
+ *txd,
+ ESF_DZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
+ ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ }
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_ef10_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_ef10_notify_tx_desc(tx_queue);
+ }
+}
+
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
+ EFX_MAX_CHANNELS);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ return 0;
+}
+
+static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc != 0);
+}
+
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) efx->rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
+ efx->rx_hash_key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
+ rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+ }
+
+ rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_2(*rxd,
+ ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
+ ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int write_count;
+ efx_dword_t reg;
+
+ /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
+ write_count = rx_queue->added_count & ~7;
+ if (rx_queue->notified_count == write_count)
+ return;
+
+ do
+ efx_ef10_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ while (++rx_queue->notified_count != write_count);
+
+ wmb();
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
+ write_count & rx_queue->ptr_mask);
+ efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
+ efx_rx_queue_index(rx_queue));
+}
+
+static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
+
+static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_REFILL);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
+ inbuf, sizeof(inbuf), 0,
+ efx_ef10_rx_defer_refill_complete, 0);
+}
+
+static void
+efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ /* nothing to do */
+}
+
+static int efx_ef10_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static int efx_ef10_ev_init(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ struct efx_ef10_nic_data *nic_data;
+ bool supports_rx_merge;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ nic_data = efx->nic_data;
+ supports_rx_merge =
+ !!(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* IRQ return is ignored */
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
+ unsigned int rx_queue_label)
+{
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "rx event arrived on queue %d labeled as queue %u\n",
+ efx_rx_queue_index(rx_queue), rx_queue_label);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+static void
+efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
+ unsigned int actual, unsigned int expected)
+{
+ unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, actual, expected);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+/* partially received RX was aborted. clean up. */
+static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
+{
+ unsigned int rx_desc_ptr;
+
+ WARN_ON(rx_queue->scatter_n == 0);
+
+ netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
+ "scattered RX aborted (dropping %u buffers)\n",
+ rx_queue->scatter_n);
+
+ rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
+
+ efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
+ 0, EFX_RX_PKT_DISCARD);
+
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+ ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
+}
+
+static int efx_ef10_handle_rx_event(struct efx_channel *channel,
+ const efx_qword_t *event)
+{
+ unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
+ unsigned int n_descs, n_packets, i;
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue;
+ bool rx_cont;
+ u16 flags = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ /* Basic packet information */
+ rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
+ next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
+ rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
+
+ WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
+ efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
+
+ n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+
+ if (n_descs != rx_queue->scatter_n + 1) {
+ /* detect rx abort */
+ if (unlikely(n_descs == rx_queue->scatter_n)) {
+ WARN_ON(rx_bytes != 0);
+ efx_ef10_handle_rx_abort(rx_queue);
+ return 0;
+ }
+
+ if (unlikely(rx_queue->scatter_n != 0)) {
+ /* Scattered packet completions cannot be
+ * merged, so something has gone wrong.
+ */
+ efx_ef10_handle_rx_bad_lbits(
+ rx_queue, next_ptr_lbits,
+ (rx_queue->removed_count +
+ rx_queue->scatter_n + 1) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+ return 0;
+ }
+
+ /* Merged completion for multiple non-scattered packets */
+ rx_queue->scatter_n = 1;
+ rx_queue->scatter_len = 0;
+ n_packets = n_descs;
+ ++channel->n_rx_merge_events;
+ channel->n_rx_merge_packets += n_packets;
+ flags |= EFX_RX_PKT_PREFIX_LEN;
+ } else {
+ ++rx_queue->scatter_n;
+ rx_queue->scatter_len += rx_bytes;
+ if (rx_cont)
+ return 0;
+ n_packets = 1;
+ }
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
+ flags |= EFX_RX_PKT_DISCARD;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
+ channel->n_rx_ip_hdr_chksum_err += n_packets;
+ } else if (unlikely(EFX_QWORD_FIELD(*event,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
+ channel->n_rx_tcp_udp_chksum_err += n_packets;
+ } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
+ flags |= EFX_RX_PKT_CSUMMED;
+ }
+
+ if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ flags |= EFX_RX_PKT_TCP;
+
+ channel->irq_mod_score += 2 * n_packets;
+
+ /* Handle received packet(s) */
+ for (i = 0; i < n_packets; i++) {
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_queue->scatter_len,
+ flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ }
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ return n_packets;
+}
+
+static int
+efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue;
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ int tx_descs = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
+ return 0;
+
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+ tx_queue = efx_channel_get_tx_queue(channel,
+ tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+
+ return tx_descs;
+}
+
+static void
+efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
+
+ switch (subcode) {
+ case ESE_DZ_DRV_TIMER_EV:
+ case ESE_DZ_DRV_WAKE_UP_EV:
+ break;
+ case ESE_DZ_DRV_START_UP_EV:
+ /* event queue init complete. ok. */
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, subcode,
+ EFX_QWORD_VAL(*event));
+
+ }
+}
+
+static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
+
+ switch (subcode) {
+ case EFX_EF10_TEST:
+ channel->event_test_cpu = raw_smp_processor_id();
+ break;
+ case EFX_EF10_REFILL:
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here
+ */
+ efx_fast_push_rx_descriptors(&channel->rx_queue);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %u"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, (unsigned) subcode,
+ EFX_QWORD_VAL(*event));
+ }
+}
+
+static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event, *p_event;
+ unsigned int read_ptr;
+ int ev_code;
+ int tx_descs = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ break;
+
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
+
+ netif_vdbg(efx, drv, efx->net_dev,
+ "processing event on %d " EFX_QWORD_FMT "\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ switch (ev_code) {
+ case ESE_DZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case ESE_DZ_EV_CODE_RX_EV:
+ spent += efx_ef10_handle_rx_event(channel, &event);
+ if (spent >= quota) {
+ /* XXX can we split a merged event to
+ * avoid going over-quota?
+ */
+ spent = quota;
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_TX_EV:
+ tx_descs += efx_ef10_handle_tx_event(channel, &event);
+ if (tx_descs > efx->txq_entries) {
+ spent = quota;
+ goto out;
+ } else if (++spent == quota) {
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_DRIVER_EV:
+ efx_ef10_handle_driver_event(channel, &event);
+ if (++spent == quota)
+ goto out;
+ break;
+ case EFX_EF10_DRVGEN_EV:
+ efx_ef10_handle_driver_generated_event(channel, &event);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, ev_code,
+ EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+static void efx_ef10_ev_read_ack(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_dword_t rptr;
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (channel->eventq_read_ptr &
+ channel->eventq_mask) >>
+ ERF_DD_EVQ_IND_RPTR_WIDTH);
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ channel->eventq_read_ptr &
+ ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
+ channel->eventq_read_ptr &
+ channel->eventq_mask);
+ efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
+ }
+}
+
+static void efx_ef10_ev_test_generate(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event;
+ int rc;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_TEST);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+void efx_ef10_handle_drain_event(struct efx_nic *efx)
+{
+ if (atomic_dec_and_test(&efx->active_queues))
+ wake_up(&efx->flush_wq);
+
+ WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
+static int efx_ef10_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int pending;
+
+ /* If the MC has just rebooted, the TX/RX queues will have already been
+ * torn down, but efx->active_queues needs to be set to zero.
+ */
+ if (nic_data->must_realloc_vis) {
+ atomic_set(&efx->active_queues, 0);
+ return 0;
+ }
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_ef10_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_ef10_tx_fini(tx_queue);
+ }
+
+ wait_event_timeout(efx->flush_wq,
+ atomic_read(&efx->active_queues) == 0,
+ msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
+ pending = atomic_read(&efx->active_queues);
+ if (pending) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
+ pending);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+ /* XXX should we randomise the initval? */
+}
+
+/* Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static struct efx_filter_spec *
+efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static void
+efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void efx_ef10_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ u32 match_fields = 0;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /* Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(spec->gen_field)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &spec->gen_field, sizeof(spec->gen_field)); \
+ }
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
+ spec->rss_context !=
+ EFX_FILTER_RSS_CONTEXT_DEFAULT ?
+ spec->rss_context : nic_data->rx_rss_context);
+}
+
+static int efx_ef10_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ u64 *handle, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+ int rc;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ return rc;
+}
+
+static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_flags[match_pri] == match_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_filter_spec *saved_spec;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool replacing = false;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX)
+ return -EINVAL;
+
+ rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ if (rc < 0)
+ return rc;
+ match_pri = rc;
+
+ hash = efx_ef10_filter_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ unsigned int depth = 1;
+ unsigned int i;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec &
+ EFX_EF10_FILTER_FLAG_BUSY)
+ break;
+ if (spec->priority < saved_spec->priority &&
+ !(saved_spec->priority ==
+ EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ goto found;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+
+ /* Once we reach the maximum search depth, use
+ * the first suitable slot or return -EBUSY if
+ * there was none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+ goto found;
+ }
+
+ ++depth;
+ }
+
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+
+found:
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
+
+ /* Mark lower-priority multicast recipients busy prior to removal */
+ if (is_mc_recip) {
+ unsigned int depth, i;
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ if (test_bit(depth, mc_rem_map))
+ table->entry[i].spec |=
+ EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
+ replacing);
+
+ /* Finalise the software table entry */
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+ priv_flags = efx_ef10_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ spin_unlock_bh(&efx->filter_lock);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->filter_lock);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ } else {
+ priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ efx_ef10_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
+
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here on EF10 */
+}
+
+/* Remove a filter.
+ * If !stack_requested, remove by ID
+ * If stack_requested, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ */
+static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, bool stack_requested)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ /* Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ */
+ for (;;) {
+ spin_lock_bh(&efx->filter_lock);
+ if (!(table->entry[filter_idx].spec &
+ EFX_EF10_FILTER_FLAG_BUSY))
+ break;
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec || spec->priority > priority ||
+ (!stack_requested &&
+ efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ filter_id / HUNT_FILTER_TBL_ROWS)) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
+ /* Reset steering of a stack-owned filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK);
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
+ rc = efx_ef10_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ true);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+}
+
+static int efx_ef10_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ const struct efx_filter_spec *saved_spec;
+ int rc;
+
+ spin_lock_bh(&efx->filter_lock);
+ saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ filter_id / HUNT_FILTER_TBL_ROWS) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ /* TODO */
+}
+
+static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
+}
+
+static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] = (efx_ef10_filter_rx_match_pri(
+ table, spec->match_flags) *
+ HUNT_FILTER_TBL_ROWS +
+ filter_idx);
+ }
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
+
+static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *saved_spec;
+ unsigned int hash, i, depth = 1;
+ bool replacing = false;
+ int ins_index = -1;
+ u64 cookie;
+ s32 rc;
+
+ /* Must be an RX filter without RSS and not for a multicast
+ * destination address (RFS only works for connected sockets).
+ * These restrictions allow us to pass only a tiny amount of
+ * data through to the completion function.
+ */
+ EFX_WARN_ON_PARANOID(spec->flags !=
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
+ EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
+ EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
+
+ hash = efx_ef10_filter_hash(spec);
+
+ spin_lock_bh(&efx->filter_lock);
+
+ /* Find any existing filter with the same match tuple or else
+ * a free slot to insert at. If an existing filter is busy,
+ * we have to give up.
+ */
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ EFX_WARN_ON_PARANOID(saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK);
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto fail_unlock;
+ }
+ ins_index = i;
+ break;
+ }
+
+ /* Once we reach the maximum search depth, use the
+ * first suitable slot or return -EBUSY if there was
+ * none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ break;
+ }
+
+ ++depth;
+ }
+
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ replacing = true;
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto fail_unlock;
+ }
+ *saved_spec = *spec;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ EFX_EF10_FILTER_FLAG_BUSY);
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Pack up the variables needed on completion */
+ cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf,
+ table->entry[ins_index].handle, replacing);
+ efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ MC_CMD_FILTER_OP_OUT_LEN,
+ efx_ef10_filter_rfs_insert_complete, cookie);
+
+ return ins_index;
+
+fail_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int ins_index, dmaq_id;
+ struct efx_filter_spec *spec;
+ bool replacing;
+
+ /* Unpack the cookie */
+ replacing = cookie >> 31;
+ ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
+ dmaq_id = cookie & 0xffff;
+
+ spin_lock_bh(&efx->filter_lock);
+ spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (rc == 0) {
+ table->entry[ins_index].handle =
+ MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (replacing)
+ spec->dmaq_id = dmaq_id;
+ } else if (!replacing) {
+ kfree(spec);
+ spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, spec, 0);
+ spin_unlock_bh(&efx->filter_lock);
+
+ wake_up_all(&table->waitq);
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual);
+
+static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+
+ if (!spec ||
+ (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
+ spec->priority != EFX_FILTER_PRI_HINT ||
+ !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
+ flow_id, filter_idx))
+ return false;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
+ efx_ef10_filter_rfs_expire_complete, filter_idx))
+ return false;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ return true;
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ }
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ struct efx_ef10_filter_table *table;
+ size_t outlen;
+ int rc;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+ table->rx_match_count = 0;
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_flags[table->rx_match_count++] = rc;
+ }
+ }
+
+ table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ efx->filter_state = table;
+ init_waitqueue_head(&table->waitq);
+ return 0;
+
+fail:
+ kfree(table);
+ return rc;
+}
+
+static void efx_ef10_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ bool failed = false;
+ int rc;
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ false);
+ if (rc)
+ failed = true;
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ table->entry[filter_idx].spec &=
+ ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore all filters\n");
+ else
+ nic_data->must_restore_filters = false;
+}
+
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ WARN_ON(rc != 0);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_filter_spec spec;
+ bool remove_failed = false;
+ struct netdev_hw_addr *uc;
+ struct netdev_hw_addr *mc;
+ unsigned int filter_idx;
+ int i, n, rc;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ /* Mark old filters that may need to be removed */
+ spin_lock_bh(&efx->filter_lock);
+ n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ if (net_dev->flags & IFF_PROMISC ||
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
+ table->stack_uc_count = -1;
+ } else {
+ table->stack_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ ETH_ALEN);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ memcpy(table->stack_uc_list[i].addr,
+ uc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
+ table->stack_mc_count = -1;
+ } else {
+ table->stack_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->stack_mc_list[0].addr);
+ i = 1;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ memcpy(table->stack_mc_list[i].addr,
+ mc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (table->stack_uc_count >= 0) {
+ for (i = 0; i < table->stack_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_uc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to unicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_uc_list[i].id);
+ table->stack_uc_count = -1;
+ break;
+ }
+ table->stack_uc_list[i].id = rc;
+ }
+ }
+ if (table->stack_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_uc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_uc_count = 0;
+ } else {
+ table->stack_uc_list[0].id = rc;
+ }
+ }
+
+ /* Insert/renew multicast filters */
+ if (table->stack_mc_count >= 0) {
+ for (i = 0; i < table->stack_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_mc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to multicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_mc_list[i].id);
+ table->stack_mc_count = -1;
+ break;
+ }
+ table->stack_mc_list[i].id = rc;
+ }
+ }
+ if (table->stack_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_mc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_mc_count = 0;
+ } else {
+ table->stack_mc_list[0].id = rc;
+ }
+ }
+
+ /* Remove filters that weren't renewed. Since nothing else
+ * changes the STACK_OLD flag or removes these filters, we
+ * don't need to hold the filter_lock while scanning for
+ * these filters.
+ */
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ if (ACCESS_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_STACK_OLD) {
+ if (efx_ef10_filter_remove_internal(efx,
+ EFX_FILTER_PRI_REQUIRED,
+ i, true) < 0)
+ remove_failed = true;
+ }
+ }
+ WARN_ON(remove_failed);
+}
+
+static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return efx_mcdi_set_mac(efx);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+struct efx_ef10_nvram_type_info {
+ u16 type, type_mask;
+ u8 port;
+ const char *name;
+};
+
+static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
+ { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
+ { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+};
+
+static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
+ const struct efx_ef10_nvram_type_info *info;
+ size_t size, erase_size, outlen;
+ bool protected;
+ int rc;
+
+ for (info = efx_ef10_nvram_types; ; info++) {
+ if (info ==
+ efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+ return -ENODEV;
+ if ((type & ~info->type_mask) == info->type)
+ break;
+ }
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
+ part->fw_subtype = MCDI_DWORD(outbuf,
+ NVRAM_METADATA_OUT_SUBTYPE);
+
+ part->common.dev_type_name = "EF10 NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int efx_ef10_mtd_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+ struct efx_mcdi_mtd_partition *parts;
+ size_t outlen, n_parts_total, i, n_parts;
+ unsigned int type;
+ int rc;
+
+ ASSERT_RTNL();
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
+ return -EIO;
+
+ n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+ if (n_parts_total >
+ MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
+ return -EIO;
+
+ parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ n_parts = 0;
+ for (i = 0; i < n_parts_total; i++) {
+ type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
+ i);
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
+}
+
+const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ /* TODO: test_chip */
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+};
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 00000000000..b3f4e3755fd
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,415 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF10_REGS_H
+#define EFX_EF10_REGS_H
+
+/* EF10 hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * E<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register Host memory structure
+ * -------------------------------------------------------------
+ * Address R
+ * Bitfield RF SF
+ * Enumerator FE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * D: Huntington A0
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* BIU_HW_REV_ID_REG: */
+#define ER_DZ_BIU_HW_REV_ID 0x00000000
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+/* BIU_MC_SFT_STATUS_REG: */
+#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
+#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+/* BIU_INT_ISR_REG: */
+#define ER_DZ_BIU_INT_ISR 0x00000090
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+/* MC_DB_LWRD_REG: */
+#define ER_DZ_MC_DB_LWRD 0x00000200
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+/* MC_DB_HWRD_REG: */
+#define ER_DZ_MC_DB_HWRD 0x00000204
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+/* EVQ_RPTR_REG: */
+#define ER_DZ_EVQ_RPTR 0x00000400
+#define ER_DZ_EVQ_RPTR_STEP 8192
+#define ER_DZ_EVQ_RPTR_ROWS 2048
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+/* EVQ_TMR_REG: */
+#define ER_DZ_EVQ_TMR 0x00000420
+#define ER_DZ_EVQ_TMR_STEP 8192
+#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+/* RX_DESC_UPD_REG: */
+#define ER_DZ_RX_DESC_UPD 0x00000830
+#define ER_DZ_RX_DESC_UPD_STEP 8192
+#define ER_DZ_RX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/* TX_DESC_UPD_REG: */
+#define ER_DZ_TX_DESC_UPD 0x00000a10
+#define ER_DZ_TX_DESC_UPD_STEP 8192
+#define ER_DZ_TX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+/* EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+/* MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+/* RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_RX_EV_RSVD2_LBN 54
+#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DZ_RX_EV_SOFT1_LBN 32
+#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
+#define ESF_DZ_RX_EV_RSVD1_LBN 31
+#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_DZ_RX_ABORT_LBN 30
+#define ESF_DZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+/* RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+/* RX_USER_DESC */
+#define ESF_DZ_RX_USR_RESERVED_LBN 62
+#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
+#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+
+/* TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+/* TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_TX_EV_RSVD_LBN 48
+#define ESF_DZ_TX_EV_RSVD_WIDTH 10
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DZ_TX_CAN_MERGE_LBN 31
+#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_DZ_TX_SOFT1_LBN 24
+#define ESF_DZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+/* TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+/* TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+/* TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+/* TX_USER_DESC */
+#define ESF_DZ_TX_USR_TYPE_LBN 63
+#define ESF_DZ_TX_USR_TYPE_WIDTH 1
+#define ESF_DZ_TX_USR_CONT_LBN 62
+#define ESF_DZ_TX_USR_CONT_WIDTH 1
+#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+/*************************************************************************/
+
+/* TX_DESC_UPD_REG: Transmit descriptor update register.
+ * We may write just one dword of these registers.
+ */
+#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
+
+/* The workaround for bug 35388 requires multiplexing writes through
+ * the TX_DESC_UPD_DWORD address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* TX_PIOBUF
+ * PIO buffer aperture (paged)
+ */
+#define ER_DZ_TX_PIOBUF 4096
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+
+/* RX packet prefix */
+#define ES_DZ_RX_PREFIX_HASH_OFST 0
+#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
+#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
+#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
+#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
+#define ES_DZ_RX_PREFIX_SIZE 14
+
+#endif /* EFX_EF10_REGS_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index c72968840f1..07c9bc4c61b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
-#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
@@ -81,8 +80,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
- [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
- [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
@@ -191,8 +189,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
+static int efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
@@ -248,30 +246,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- if (rx_queue->enabled)
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue);
}
return spent;
}
-/* Mark channel as finished processing
- *
- * Note that since we will not receive further interrupts for this
- * channel before we finish processing and call the eventq_read_ack()
- * method, there is no need to use the interrupt hold-off timers.
- */
-static inline void efx_channel_processed(struct efx_channel *channel)
-{
- /* The interrupt handler for this channel may set work_pending
- * as soon as we acknowledge the events we've seen. Make sure
- * it's cleared before then. */
- channel->work_pending = false;
- smp_wmb();
-
- efx_nic_eventq_read_ack(channel);
-}
-
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device, which
@@ -316,58 +296,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
- * since efx_channel_processed() will have no effect if
+ * since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
napi_complete(napi);
- efx_channel_processed(channel);
+ efx_nic_eventq_read_ack(channel);
}
return spent;
}
-/* Process the eventq of the specified channel immediately on this CPU
- *
- * Disable hardware generated interrupts, wait for any existing
- * processing to finish, then directly poll (and ack ) the eventq.
- * Finally reenable NAPI and interrupts.
- *
- * This is for use only during a loopback self-test. It must not
- * deliver any packets up the stack as this can result in deadlock.
- */
-void efx_process_channel_now(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- BUG_ON(channel->channel >= efx->n_channels);
- BUG_ON(!channel->enabled);
- BUG_ON(!efx->loopback_selftest);
-
- /* Disable interrupts and wait for ISRs to complete */
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
- synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- /* Wait for any NAPI processing to complete */
- napi_disable(&channel->napi_str);
-
- /* Poll the channel */
- efx_process_channel(channel, channel->eventq_mask + 1);
-
- /* Ack the eventq. This may cause an interrupt to be generated
- * when they are reenabled */
- efx_channel_processed(channel);
-
- napi_enable(&channel->napi_str);
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
-}
-
/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
@@ -391,14 +329,23 @@ static int efx_probe_eventq(struct efx_channel *channel)
}
/* Prepare channel's event queue */
-static void efx_init_eventq(struct efx_channel *channel)
+static int efx_init_eventq(struct efx_channel *channel)
{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d init event queue\n", channel->channel);
+ struct efx_nic *efx = channel->efx;
+ int rc;
- channel->eventq_read_ptr = 0;
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
- efx_nic_init_eventq(channel);
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
}
/* Enable event queue processing and NAPI */
@@ -407,11 +354,7 @@ static void efx_start_eventq(struct efx_channel *channel)
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);
- /* The interrupt handler for this channel may set work_pending
- * as soon as we enable it. Make sure it's cleared before
- * then. Similarly, make sure it sees the enabled flag set.
- */
- channel->work_pending = false;
+ /* Make sure the NAPI handler sees the enabled flag set */
channel->enabled = true;
smp_wmb();
@@ -431,10 +374,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel)
{
+ if (!channel->eventq_init)
+ return;
+
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
}
static void efx_remove_eventq(struct efx_channel *channel)
@@ -583,8 +530,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
channel->type->get_name(channel,
- efx->channel_name[channel->channel],
- sizeof(efx->channel_name[0]));
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
}
static int efx_probe_channels(struct efx_nic *efx)
@@ -634,13 +581,13 @@ static void efx_start_datapath(struct efx_nic *efx)
* support the current MTU, including padding for header
* alignment and overruns.
*/
- efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
+ efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
NET_IP_ALIGN + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = false;
+ efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
} else if (efx->type->can_rx_scatter) {
BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
@@ -668,9 +615,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
- /* RX filters also have scatter-enabled flags */
+ /* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
- efx_filter_update_rx_scatter(efx);
+ efx->type->filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
@@ -684,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx)
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
efx_nic_generate_fill_event(rx_queue);
}
@@ -704,30 +654,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- struct pci_dev *dev = efx->pci_dev;
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
- /* Only perform flush if dma is enabled */
- if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
- rc = efx_nic_flush_queues(efx);
-
- if (rc && EFX_WORKAROUND_7803(efx)) {
- /* Schedule a reset to recover from the flush failure. The
- * descriptor caches reference memory we're about to free,
- * but falcon_reconfigure_mac_wrapper() won't reconnect
- * the MACs because of the pending reset. */
- netif_err(efx, drv, efx->net_dev,
- "Resetting to recover from flush failure\n");
- efx_schedule_reset(efx, RESET_TYPE_ALL);
- } else if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
}
efx_for_each_channel(channel, efx) {
@@ -741,7 +676,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_stop_eventq(channel);
efx_start_eventq(channel);
}
+ }
+
+ rc = efx->type->fini_dmaq(efx);
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+ efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
@@ -779,7 +733,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0;
- int rc;
+ int rc, rc2;
rc = efx_check_disabled(efx);
if (rc)
@@ -809,7 +763,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, true);
+ efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */
memset(other_channel, 0, sizeof(other_channel));
@@ -859,9 +813,16 @@ out:
}
}
- efx_start_interrupts(efx, true);
- efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ }
return rc;
rollback:
@@ -931,10 +892,9 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */
if (link_state->up)
netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ "link up at %uMbps %s-duplex (MTU %d)\n",
link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu,
- (efx->promiscuous ? " [PROMISC]" : ""));
+ efx->net_dev->mtu);
else
netif_info(efx, link, efx->net_dev, "link down\n");
}
@@ -983,10 +943,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
WARN_ON(!mutex_is_locked(&efx->mac_lock));
- /* Serialise the promiscuous flag with efx_set_rx_mode. */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
/* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx))
@@ -1144,6 +1100,7 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
+ unsigned int mem_map_size = efx->type->mem_map_size(efx);
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1196,20 +1153,18 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size);
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size, efx->membase);
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1288,8 +1243,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
*/
static int efx_probe_interrupts(struct efx_nic *efx)
{
- unsigned int max_channels =
- min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
unsigned int extra_channels = 0;
unsigned int i, j;
int rc;
@@ -1306,7 +1259,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
- n_channels = min(n_channels, max_channels);
+ n_channels = min(n_channels, efx->max_channels);
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
@@ -1392,31 +1345,42 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0;
}
-/* Enable interrupts, then probe and start the event queues */
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
- struct efx_channel *channel;
+ struct efx_channel *channel, *end_channel;
+ int rc;
BUG_ON(efx->state == STATE_DISABLED);
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
+ efx->irq_soft_enabled = true;
+ smp_wmb();
efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq || !may_keep_eventq)
- efx_init_eventq(channel);
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
efx_start_eventq(channel);
}
efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
}
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1425,20 +1389,79 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
efx_mcdi_mode_poll(efx);
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
efx_stop_eventq(channel);
- if (!channel->type->keep_eventq || !may_keep_eventq)
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+static int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
efx_fini_eventq(channel);
}
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+static void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
}
static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1495,9 +1518,11 @@ static int efx_probe_nic(struct efx_nic *efx)
* in MSI-X interrupts. */
rc = efx_probe_interrupts(efx);
if (rc)
- goto fail;
+ goto fail1;
- efx->type->dimension_resources(efx);
+ rc = efx->type->dimension_resources(efx);
+ if (rc)
+ goto fail2;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1515,7 +1540,9 @@ static int efx_probe_nic(struct efx_nic *efx)
return 0;
-fail:
+fail2:
+ efx_remove_interrupts(efx);
+fail1:
efx->type->remove(efx);
return rc;
}
@@ -1528,6 +1555,44 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
+static int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ spin_lock_init(&efx->filter_lock);
+
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*efx->rps_flow_id),
+ GFP_KERNEL);
+ if (!efx->rps_flow_id) {
+ efx->type->filter_table_remove(efx);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_flow_id);
+#endif
+ efx->type->filter_table_remove(efx);
+}
+
+static void efx_restore_filters(struct efx_nic *efx)
+{
+ efx->type->filter_table_restore(efx);
+}
+
/**************************************************************************
*
* NIC startup/shutdown
@@ -1917,34 +1982,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
-
- efx->type->update_stats(efx);
-
- stats->rx_packets = mac_stats->rx_packets;
- stats->tx_packets = mac_stats->tx_packets;
- stats->rx_bytes = mac_stats->rx_bytes;
- stats->tx_bytes = mac_stats->tx_bytes;
- stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
- stats->multicast = mac_stats->rx_multicast;
- stats->collisions = mac_stats->tx_collision;
- stats->rx_length_errors = (mac_stats->rx_gtjumbo +
- mac_stats->rx_length_error);
- stats->rx_crc_errors = mac_stats->rx_bad;
- stats->rx_frame_errors = mac_stats->rx_align_error;
- stats->rx_fifo_errors = mac_stats->rx_overflow;
- stats->rx_missed_errors = mac_stats->rx_missed;
- stats->tx_window_errors = mac_stats->tx_late_collision;
-
- stats->rx_errors = (stats->rx_length_errors +
- stats->rx_crc_errors +
- stats->rx_frame_errors +
- mac_stats->rx_symbol_error);
- stats->tx_errors = (stats->tx_window_errors +
- mac_stats->tx_bad);
-
+ efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
return stats;
@@ -2018,30 +2058,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_rx_mode(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct netdev_hw_addr *ha;
- union efx_multicast_hash *mc_hash = &efx->multicast_hash;
- u32 crc;
- int bit;
-
- efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
-
- /* Build multicast hash table */
- if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
- memset(mc_hash, 0xff, sizeof(*mc_hash));
- } else {
- memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(ha, net_dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
- __set_bit_le(bit, mc_hash);
- }
-
- /* Broadcast packets go through the multicast hash filter.
- * ether_crc_le() of the broadcast address is 0xbe2612ff
- * so we always add bit 0xff to the mask.
- */
- __set_bit_le(0xff, mc_hash);
- }
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -2059,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_netdev_ops = {
+static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2086,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
};
+static const struct net_device_ops efx_ef10_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_rx_mode,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2098,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (net_dev->netdev_ops == &efx_netdev_ops &&
+ if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
+ net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2125,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->priv_flags |= IFF_UNICAST_FLT;
+ } else {
+ net_dev->netdev_ops = &efx_farch_netdev_ops;
+ }
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2185,22 +2227,11 @@ fail_locked:
static void efx_unregister_netdev(struct efx_nic *efx)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
if (!efx->net_dev)
return;
BUG_ON(netdev_priv(efx->net_dev) != efx);
- /* Free up any skbs still remaining. This has to happen before
- * we try to unregister the netdev as running their destructors
- * may be needed to get the device ref. count to 0. */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_release_tx_buffers(tx_queue);
- }
-
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2223,7 +2254,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2260,9 +2291,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
"could not restore PHY settings\n");
}
- efx->type->reconfigure_mac(efx);
-
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
efx_restore_filters(efx);
efx_sriov_reset(efx);
@@ -2458,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
};
@@ -2516,6 +2549,9 @@ static int efx_init_struct(struct efx_nic *efx,
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2527,10 +2563,10 @@ static int efx_init_struct(struct efx_nic *efx,
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
}
- EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
-
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
@@ -2579,7 +2615,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
@@ -2601,7 +2637,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
dev_close(efx->net_dev);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
rtnl_unlock();
efx_sriov_fini(efx);
@@ -2703,10 +2739,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail6;
return 0;
+ fail6:
+ efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
fail4:
@@ -2824,7 +2864,7 @@ static int efx_pm_freeze(struct device *dev)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
}
rtnl_unlock();
@@ -2834,12 +2874,15 @@ static int efx_pm_freeze(struct device *dev)
static int efx_pm_thaw(struct device *dev)
{
+ int rc;
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
@@ -2860,6 +2903,11 @@ static int efx_pm_thaw(struct device *dev)
queue_work(reset_workqueue, &efx->reset_work);
return 0;
+
+fail:
+ rtnl_unlock();
+
+ return rc;
}
static int efx_pm_poweroff(struct device *dev)
@@ -2896,8 +2944,8 @@ static int efx_pm_resume(struct device *dev)
rc = efx->type->init(efx);
if (rc)
return rc;
- efx_pm_thaw(dev);
- return 0;
+ rc = efx_pm_thaw(dev);
+ return rc;
}
static int efx_pm_suspend(struct device *dev)
@@ -2942,7 +2990,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET;
} else {
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index bdb30bbb0c9..34d00f5771f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
@@ -69,27 +68,99 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */
-extern int efx_probe_filters(struct efx_nic *efx);
-extern void efx_restore_filters(struct efx_nic *efx);
-extern void efx_remove_filters(struct efx_nic *efx);
-extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_filter_insert_filter(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace);
-extern int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-extern int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *);
-extern void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size);
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ * is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ * support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+/**
+ * efx_farch_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+static inline void efx_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_clear_rx(efx, priority);
+}
+
+static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
@@ -105,11 +176,11 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
+extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
@@ -141,7 +212,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
-extern int efx_mtd_probe(struct efx_nic *efx);
+extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
+static inline int efx_mtd_probe(struct efx_nic *efx)
+{
+ return efx->type->mtd_probe(efx);
+}
extern void efx_mtd_rename(struct efx_nic *efx);
extern void efx_mtd_remove(struct efx_nic *efx);
#else
@@ -155,7 +231,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
- channel->work_pending = true;
napi_schedule(&channel->napi_str);
}
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index ab8fb5889e5..7fdfee01909 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -147,8 +147,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
- * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
- * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
+ * @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
*/
@@ -163,8 +162,7 @@ enum reset_type {
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
- RESET_TYPE_RX_DESC_FETCH,
- RESET_TYPE_TX_DESC_FETCH,
+ RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
RESET_TYPE_MAX,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1fc21458413..5b471cf5c32 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,14 +19,9 @@
#include "filter.h"
#include "nic.h"
-struct ethtool_string {
- char name[ETH_GSTRING_LEN];
-};
-
-struct efx_ethtool_stat {
+struct efx_sw_stat_desc {
const char *name;
enum {
- EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
EFX_ETHTOOL_STAT_SOURCE_channel,
EFX_ETHTOOL_STAT_SOURCE_tx_queue
@@ -35,7 +30,7 @@ struct efx_ethtool_stat {
u64(*get_stat) (void *field); /* Reader function */
};
-/* Initialiser for a struct #efx_ethtool_stat with type-checking */
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
get_stat_function) { \
.name = #stat_name, \
@@ -52,24 +47,11 @@ static u64 efx_get_uint_stat(void *field)
return *(unsigned int *)field;
}
-static u64 efx_get_u64_stat(void *field)
-{
- return *(u64 *) field;
-}
-
static u64 efx_get_atomic_stat(void *field)
{
return atomic_read((atomic_t *) field);
}
-#define EFX_ETHTOOL_U64_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
- u64, efx_get_u64_stat)
-
-#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
- EFX_ETHTOOL_STAT(name, nic, n_##name, \
- unsigned int, efx_get_uint_stat)
-
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
EFX_ETHTOOL_STAT(field, nic, field, \
atomic_t, efx_get_atomic_stat)
@@ -82,72 +64,12 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
unsigned int, efx_get_uint_stat)
-static const struct efx_ethtool_stat efx_ethtool_stats[] = {
- EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(tx_control),
- EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
- EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
- EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(rx_control),
- EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
- EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
- EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
- EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
- EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -155,10 +77,11 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
};
-/* Number of ethtool statistics */
-#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
@@ -205,8 +128,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* GMAC does not support 1000Mbps HD */
- ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -291,12 +212,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
*
* Fill in an individual self-test entry.
*/
-static void efx_fill_test(unsigned int test_index,
- struct ethtool_string *strings, u64 *data,
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
int *test, const char *unit_format, int unit_id,
const char *test_format, const char *test_id)
{
- struct ethtool_string unit_str, test_str;
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
/* Fill data value, if applicable */
if (data)
@@ -305,15 +225,14 @@ static void efx_fill_test(unsigned int test_index,
/* Fill string, if applicable */
if (strings) {
if (strchr(unit_format, '%'))
- snprintf(unit_str.name, sizeof(unit_str.name),
+ snprintf(unit_str, sizeof(unit_str),
unit_format, unit_id);
else
- strcpy(unit_str.name, unit_format);
- snprintf(test_str.name, sizeof(test_str.name),
- test_format, test_id);
- snprintf(strings[test_index].name,
- sizeof(strings[test_index].name),
- "%-6s %-24s", unit_str.name, test_str.name);
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
}
}
@@ -336,7 +255,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
enum efx_loopback_mode mode,
unsigned int test_index,
- struct ethtool_string *strings, u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel =
efx_get_channel(efx, efx->tx_channel_offset);
@@ -373,8 +292,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
- struct ethtool_string *strings,
- u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel;
unsigned int n = 0, i;
@@ -433,12 +351,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
static int efx_ethtool_get_sset_count(struct net_device *net_dev,
int string_set)
{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
switch (string_set) {
case ETH_SS_STATS:
- return EFX_ETHTOOL_NUM_STATS;
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT;
case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
- NULL, NULL, NULL);
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
return -EINVAL;
}
@@ -448,20 +368,18 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_string *ethtool_strings =
- (struct ethtool_string *)strings;
int i;
switch (string_set) {
case ETH_SS_STATS:
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strlcpy(ethtool_strings[i].name,
- efx_ethtool_stats[i].name,
- sizeof(ethtool_strings[i].name));
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
break;
case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL,
- ethtool_strings, NULL);
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
default:
/* No other string sets */
@@ -474,27 +392,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
- const struct efx_ethtool_stat *stat;
+ const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
int i;
- EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
-
spin_lock_bh(&efx->stats_lock);
- /* Update MAC and NIC statistics */
- efx->type->update_stats(efx);
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
- /* Fill detailed statistics buffer */
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
- stat = &efx_ethtool_stats[i];
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
- data[i] = stat->get_stat((void *)mac_stats +
- stat->offset);
- break;
case EFX_ETHTOOL_STAT_SOURCE_nic:
data[i] = stat->get_stat((void *)efx + stat->offset);
break;
@@ -709,7 +620,6 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
- bool reset;
int rc = 0;
mutex_lock(&efx->mac_lock);
@@ -732,24 +642,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- /* TX flow control may automatically turn itself off if the
- * link partner (intermittently) stops responding to pause
- * frames. There isn't any indication that this has happened,
- * so the best we do is leave it up to the user to spot this
- * and fix it be cycling transmit flow control on this end. */
- reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
- if (EFX_WORKAROUND_11482(efx) && reset) {
- if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
- /* Recover by resetting the EM block */
- falcon_stop_nic_stats(efx);
- falcon_drain_tx_fifo(efx);
- falcon_reconfigure_xmac(efx);
- falcon_start_nic_stats(efx);
- } else {
- /* Schedule a reset to recover */
- efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
- }
- }
+ /* Hook for Falcon bug 11482 workaround */
+ if (efx->type->prepare_enable_fc_tx &&
+ (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+ efx->type->prepare_enable_fc_tx(efx);
old_adv = efx->link_advertising;
old_fc = efx->wanted_fc;
@@ -814,11 +710,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
-/* MAC address mask including only MC flag */
-static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
@@ -828,8 +725,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec;
- u16 vid;
- u8 proto;
int rc;
rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
@@ -837,44 +732,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
if (rc)
return rc;
- if (spec.dmaq_id == 0xfff)
+ if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
rule->ring_cookie = RX_CLS_FLOW_DISC;
else
rule->ring_cookie = spec.dmaq_id;
- if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
- rule->flow_type = ETHER_FLOW;
- memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
- if (spec.type == EFX_FILTER_MC_DEF)
- memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
- return 0;
- }
-
- rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
- if (rc == 0) {
+ if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V4_FLOW : UDP_V4_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ ip_entry->ip4dst = spec.loc_host[0];
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ ip_entry->ip4src = spec.rem_host[0];
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip_entry->pdst = spec.loc_port;
+ ip_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip_entry->psrc = spec.rem_port;
+ ip_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if (!(spec.match_flags &
+ ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_OUTER_VID))) {
rule->flow_type = ETHER_FLOW;
- memset(mac_mask->h_dest, ~0, ETH_ALEN);
- if (vid != EFX_FILTER_VID_UNSPEC) {
- rule->flow_type |= FLOW_EXT;
- rule->h_ext.vlan_tci = htons(vid);
- rule->m_ext.vlan_tci = htons(0xfff);
+ if (spec.match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+ memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+ memset(mac_mask->h_dest, ~0, ETH_ALEN);
+ else
+ memcpy(mac_mask->h_dest, mac_addr_ig_mask,
+ ETH_ALEN);
}
- return 0;
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+ memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
+ memset(mac_mask->h_source, ~0, ETH_ALEN);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ mac_entry->h_proto = spec.ether_type;
+ mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ } else {
+ /* The above should handle all filters that we insert */
+ WARN_ON(1);
+ return -EINVAL;
}
- rc = efx_filter_get_ipv4_local(&spec, &proto,
- &ip_entry->ip4dst, &ip_entry->pdst);
- if (rc != 0) {
- rc = efx_filter_get_ipv4_full(
- &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
- &ip_entry->ip4src, &ip_entry->psrc);
- EFX_WARN_ON_PARANOID(rc);
- ip_mask->ip4src = IP4_ADDR_FULL_MASK;
- ip_mask->psrc = PORT_FULL_MASK;
+ if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = spec.outer_vid;
+ rule->m_ext.vlan_tci = htons(0xfff);
}
- rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
- ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
- ip_mask->pdst = PORT_FULL_MASK;
+
return rc;
}
@@ -982,82 +905,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
- 0xfff : rule->ring_cookie);
+ EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
- switch (rule->flow_type) {
+ switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- case UDP_V4_FLOW: {
- u8 proto = (rule->flow_type == TCP_V4_FLOW ?
- IPPROTO_TCP : IPPROTO_UDP);
-
- /* Must match all of destination, */
- if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
- ip_mask->pdst == PORT_FULL_MASK))
- return -EINVAL;
- /* all or none of source, */
- if ((ip_mask->ip4src || ip_mask->psrc) &&
- !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
- ip_mask->psrc == PORT_FULL_MASK))
- return -EINVAL;
- /* and nothing else */
- if (ip_mask->tos || rule->m_ext.vlan_tci)
+ case UDP_V4_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IP);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (ip_mask->ip4dst) {
+ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = ip_entry->ip4dst;
+ }
+ if (ip_mask->ip4src) {
+ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = ip_entry->ip4src;
+ }
+ if (ip_mask->pdst) {
+ if (ip_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip_entry->pdst;
+ }
+ if (ip_mask->psrc) {
+ if (ip_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip_entry->psrc;
+ }
+ if (ip_mask->tos)
return -EINVAL;
-
- if (ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst,
- ip_entry->ip4src,
- ip_entry->psrc);
- else
- rc = efx_filter_set_ipv4_local(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst);
- if (rc)
- return rc;
break;
- }
-
- case ETHER_FLOW | FLOW_EXT:
- case ETHER_FLOW: {
- u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
- ntohs(rule->m_ext.vlan_tci) : 0);
-
- /* Must not match on source address or Ethertype */
- if (!is_zero_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto)
- return -EINVAL;
- /* Is it a default UC or MC filter? */
- if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
- vlan_tag_mask == 0) {
- if (is_multicast_ether_addr(mac_entry->h_dest))
- rc = efx_filter_set_mc_def(&spec);
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest)) {
+ if (ether_addr_equal(mac_mask->h_dest,
+ mac_addr_ig_mask))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ else if (is_broadcast_ether_addr(mac_mask->h_dest))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
else
- rc = efx_filter_set_uc_def(&spec);
+ return -EINVAL;
+ memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
}
- /* Otherwise, it must match all of destination and all
- * or none of VID.
- */
- else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
- (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
- rc = efx_filter_set_eth_local(
- &spec,
- vlan_tag_mask ?
- ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
- mac_entry->h_dest);
- } else {
- rc = -EINVAL;
+ if (!is_zero_ether_addr(mac_mask->h_source)) {
+ if (!is_broadcast_ether_addr(mac_mask->h_source))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
+ }
+ if (mac_mask->h_proto) {
+ if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = mac_entry->h_proto;
}
- if (rc)
- return rc;
break;
- }
default:
return -EINVAL;
}
+ if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+ if (rule->m_ext.vlan_tci != htons(0xfff))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec.outer_vid = rule->h_ext.vlan_tci;
+ }
+
rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 71998e7995d..ff5d322b9b4 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,17 +19,284 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
-#include "spi.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
#include "selftest.h"
+#include "mdio_10g.h"
/* Hardware control for SFC4000 (aka Falcon). */
+/**************************************************************************
+ *
+ * NIC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
+ (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
+
+#define FALCON_DMA_STAT(ext_name, hw_name) \
+ [FALCON_STAT_ ## ext_name] = \
+ { #ext_name, \
+ /* 48-bit stats are zero-padded to 64 on DMA */ \
+ hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
+ hw_name ## _ ## offset }
+#define FALCON_OTHER_STAT(ext_name) \
+ [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
+ FALCON_DMA_STAT(tx_bytes, XgTxOctets),
+ FALCON_DMA_STAT(tx_packets, XgTxPkts),
+ FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
+ FALCON_DMA_STAT(tx_control, XgTxControlPkts),
+ FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
+ FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
+ FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
+ FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
+ FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
+ FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
+ FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
+ FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
+ FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
+ FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
+ FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
+ FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
+ FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
+ FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
+ FALCON_DMA_STAT(rx_bytes, XgRxOctets),
+ FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
+ FALCON_OTHER_STAT(rx_bad_bytes),
+ FALCON_DMA_STAT(rx_packets, XgRxPkts),
+ FALCON_DMA_STAT(rx_good, XgRxPktsOK),
+ FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
+ FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
+ FALCON_DMA_STAT(rx_control, XgRxControlPkts),
+ FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
+ FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
+ FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
+ FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
+ FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
+ FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
+ FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
+ FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
+ FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
+ FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
+ FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
+ FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
+ FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
+ FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
+ FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
+ FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
+ FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
+ FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
+ FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
+};
+static const unsigned long falcon_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
+};
+
+/**************************************************************************
+ *
+ * Basic SPI command set and bit definitions
+ *
+ *************************************************************************/
+
+#define SPI_WRSR 0x01 /* Write status register */
+#define SPI_WRITE 0x02 /* Write data to memory array */
+#define SPI_READ 0x03 /* Read data from memory array */
+#define SPI_WRDI 0x04 /* Reset write enable latch */
+#define SPI_RDSR 0x05 /* Read status register */
+#define SPI_WREN 0x06 /* Set write enable latch */
+#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
+
+#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
+
+/**************************************************************************
+ *
+ * Non-volatile memory layout
+ *
+ **************************************************************************
+ */
+
+/* SFC4000 flash is partitioned into:
+ * 0-0x400 chip and board config (see struct falcon_nvconfig)
+ * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
+ * 0x8000-end boot code (mapped to PCI expansion ROM)
+ * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
+ * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
+ * 0-0x400 chip and board config
+ * configurable VPD
+ * 0x800-0x1800 boot config
+ * Aside from the chip and board config, all of these are optional and may
+ * be absent or truncated depending on the devices used.
+ */
+#define FALCON_NVCONFIG_END 0x400U
+#define FALCON_FLASH_BOOTCODE_START 0x8000U
+#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
+#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+ __le16 nports;
+ u8 port0_phy_addr;
+ u8 port0_phy_type;
+ u8 port1_phy_addr;
+ u8 port1_phy_type;
+ __le16 asic_sub_revision;
+ __le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+ __le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field) \
+ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+ efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
+ u8 mac_address[2][8]; /* 0x310 */
+ efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
+ efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
+ efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
+ efx_oword_t hw_init_reg; /* 0x350 */
+ efx_oword_t nic_stat_reg; /* 0x360 */
+ efx_oword_t glb_ctl_reg; /* 0x370 */
+ efx_oword_t srm_cfg_reg; /* 0x380 */
+ efx_oword_t spare_reg; /* 0x390 */
+ __le16 board_magic_num; /* 0x3A0 */
+ __le16 board_struct_ver;
+ __le16 board_checksum;
+ struct falcon_nvconfig_board_v2 board_v2;
+ efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
+} __packed;
+
+/*************************************************************************/
+
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -146,7 +413,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
*
* NB most hardware supports MSI interrupts
*/
-inline void falcon_irq_ack_a1(struct efx_nic *efx)
+static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{
efx_dword_t reg;
@@ -156,7 +423,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
}
-irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
@@ -177,10 +444,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ return efx_farch_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
@@ -241,9 +511,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
}
}
-int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
- unsigned int command, int address,
- const void *in, void *out, size_t len)
+static int
+falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ unsigned int command, int address,
+ const void *in, void *out, size_t len)
{
bool addressed = (address >= 0);
bool reading = (out != NULL);
@@ -297,48 +568,16 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
return 0;
}
-static size_t
-falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
-{
- return min(FALCON_SPI_MAX_LEN,
- (spi->block_size - (start & (spi->block_size - 1))));
-}
-
static inline u8
-efx_spi_munge_command(const struct efx_spi_device *spi,
- const u8 command, const unsigned int address)
+falcon_spi_munge_command(const struct falcon_spi_device *spi,
+ const u8 command, const unsigned int address)
{
return command | (((address >> 8) & spi->munge_address) << 3);
}
-/* Wait up to 10 ms for buffered write completion */
-int
-falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
- u8 status;
- int rc;
-
- for (;;) {
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (time_after_eq(jiffies, timeout)) {
- netif_err(efx, hw, efx->net_dev,
- "SPI write timeout on device %d"
- " last status=0x%02x\n",
- spi->device_id, status);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- }
-}
-
-int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
- loff_t start, size_t len, size_t *retlen, u8 *buffer)
+static int
+falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, u8 *buffer)
{
size_t block_len, pos = 0;
unsigned int command;
@@ -347,7 +586,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
while (pos < len) {
block_len = min(len - pos, FALCON_SPI_MAX_LEN);
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
buffer + pos, block_len);
if (rc)
@@ -367,8 +606,52 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
-int
-falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
+#ifdef CONFIG_SFC_MTD
+
+struct falcon_mtd_partition {
+ struct efx_mtd_partition common;
+ const struct falcon_spi_device *spi;
+ size_t offset;
+};
+
+#define to_falcon_mtd_partition(mtd) \
+ container_of(mtd, struct falcon_mtd_partition, common.mtd)
+
+static size_t
+falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
+{
+ return min(FALCON_SPI_MAX_LEN,
+ (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+/* Wait up to 10 ms for buffered write completion */
+static int
+falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
+ u8 status;
+ int rc;
+
+ for (;;) {
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static int
+falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, const u8 *buffer)
{
u8 verify_buffer[FALCON_SPI_MAX_LEN];
@@ -383,7 +666,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
block_len = min(len - pos,
falcon_spi_write_limit(spi, start + pos));
- command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
buffer + pos, NULL, block_len);
if (rc)
@@ -393,7 +676,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
if (rc)
break;
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
NULL, verify_buffer, block_len);
if (memcmp(verify_buffer, buffer + pos, block_len)) {
@@ -416,6 +699,520 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
+static int
+falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ u8 status;
+ int rc, i;
+
+ /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
+ for (i = 0; i < 40; i++) {
+ __set_current_state(uninterruptible ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ pr_err("%s: timed out waiting for %s\n",
+ part->common.name, part->common.dev_type_name);
+ return -ETIMEDOUT;
+}
+
+static int
+falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
+ SPI_STATUS_BP0);
+ u8 status;
+ int rc;
+
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+
+ if (!(status & unlock_mask))
+ return 0; /* already unlocked */
+
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+
+ status &= ~unlock_mask;
+ rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
+ NULL, sizeof(status));
+ if (rc)
+ return rc;
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+#define FALCON_SPI_VERIFY_BUF_LEN 16
+
+static int
+falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ unsigned pos, block_len;
+ u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
+ u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
+ int rc;
+
+ if (len != spi->erase_size)
+ return -EINVAL;
+
+ if (spi->erase_command == 0)
+ return -EOPNOTSUPP;
+
+ rc = falcon_spi_unlock(efx, spi);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
+ NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_slow_wait(part, false);
+
+ /* Verify the entire region has been wiped */
+ memset(empty, 0xff, sizeof(empty));
+ for (pos = 0; pos < len; pos += block_len) {
+ block_len = min(len - pos, sizeof(buffer));
+ rc = falcon_spi_read(efx, spi, start + pos, block_len,
+ NULL, buffer);
+ if (rc)
+ return rc;
+ if (memcmp(empty, buffer, block_len))
+ return -EIO;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return rc;
+}
+
+static void falcon_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s",
+ efx->name, part->type_name);
+}
+
+static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_read(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_erase(part, part->offset + start, len);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_write(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_sync(struct mtd_info *mtd)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = falcon_spi_slow_wait(part, true);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_probe(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_mtd_partition *parts;
+ struct falcon_spi_device *spi;
+ size_t n_parts;
+ int rc = -ENODEV;
+
+ ASSERT_RTNL();
+
+ /* Allocate space for maximum number of partitions */
+ parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+ n_parts = 0;
+
+ spi = &nic_data->spi_flash;
+ if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.dev_type_name = "flash";
+ parts[n_parts].common.type_name = "sfc_flash_bootrom";
+ parts[n_parts].common.mtd.type = MTD_NORFLASH;
+ parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
+ parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ spi = &nic_data->spi_eeprom;
+ if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.dev_type_name = "EEPROM";
+ parts[n_parts].common.type_name = "sfc_bootconfig";
+ parts[n_parts].common.mtd.type = MTD_RAM;
+ parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
+ parts[n_parts].common.mtd.size =
+ min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
+ FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * XMAC operations
+ *
+ **************************************************************************
+ */
+
+/* Configure the XAUI driver that is an output from Falcon */
+static void falcon_setup_xaui(struct efx_nic *efx)
+{
+ efx_oword_t sdctl, txdrv;
+
+ /* Move the XAUI into low power, unless there is no PHY, in
+ * which case the XAUI will have to drive a cable. */
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return;
+
+ efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
+
+ EFX_POPULATE_OWORD_8(txdrv,
+ FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+ efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
+}
+
+int falcon_reset_xaui(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int count;
+
+ /* Don't fetch MAC statistics over an XMAC reset */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ /* Start reset sequence */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
+
+ /* Wait up to 10 ms for completion, then reinitialise */
+ for (count = 0; count < 1000; count++) {
+ efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+ EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
+ falcon_setup_xaui(efx);
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_ack_status_intr(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
+ return;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up)
+ return;
+
+ /* We can only use this interrupt to signal the negative edge of
+ * xaui_align [we have to poll the positive edge]. */
+ if (nic_data->xmac_poll_required)
+ return;
+
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
+}
+
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool align_done, link_ok = false;
+ int sync_status;
+
+ /* Read link status */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+ sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+ if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
+ link_ok = true;
+
+ /* Clear link status ready for next read */
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ return link_ok;
+}
+
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
+static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+{
+ unsigned int max_frame_len;
+ efx_oword_t reg;
+ bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
+ bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
+
+ /* Configure MAC - cut-thru mode is hard wired on */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AB_XM_RX_JUMBO_MODE, 1,
+ FRF_AB_XM_TX_STAT_EN, 1,
+ FRF_AB_XM_RX_STAT_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ /* Configure TX */
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AB_XM_TXEN, 1,
+ FRF_AB_XM_TX_PRMBL, 1,
+ FRF_AB_XM_AUTO_PAD, 1,
+ FRF_AB_XM_TXCRC, 1,
+ FRF_AB_XM_FCNTL, tx_fc,
+ FRF_AB_XM_IPG, 0x3);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
+
+ /* Configure RX */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_XM_RXEN, 1,
+ FRF_AB_XM_AUTO_DEPAD, 0,
+ FRF_AB_XM_ACPT_ALL_MCAST, 1,
+ FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
+ FRF_AB_XM_PASS_CRC_ERR, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
+
+ /* Set frame length */
+ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+ FRF_AB_XM_TX_JUMBO_MODE, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+ FRF_AB_XM_DIS_FCNTL, !rx_fc);
+ efx_writeo(efx, &reg, FR_AB_XM_FC);
+
+ /* Set MAC address */
+ memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+ memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
+}
+
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+ bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+ bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
+ bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ if ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback))
+ falcon_reset_xaui(efx);
+
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
+}
+
+
+/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
+{
+ bool mac_up = falcon_xmac_link_ok(efx);
+
+ if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
+ efx_phy_mode_disabled(efx->phy_mode))
+ /* XAUI link is expected to be down */
+ return mac_up;
+
+ falcon_stop_nic_stats(efx);
+
+ while (!mac_up && tries) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
+ falcon_reset_xaui(efx);
+ udelay(200);
+
+ mac_up = falcon_xmac_link_ok(efx);
+ --tries;
+ }
+
+ falcon_start_nic_stats(efx);
+
+ return mac_up;
+}
+
+static bool falcon_xmac_check_fault(struct efx_nic *efx)
+{
+ return !falcon_xmac_link_ok_retry(efx, 5);
+}
+
+static int falcon_reconfigure_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ efx_farch_filter_sync_rx_mode(efx);
+
+ falcon_reconfigure_xgxs_core(efx);
+ falcon_reconfigure_xmac_core(efx);
+
+ falcon_reconfigure_mac_wrapper(efx);
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ falcon_ack_status_intr(efx);
+
+ return 0;
+}
+
+static void falcon_poll_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up || !nic_data->xmac_poll_required)
+ return;
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ falcon_ack_status_intr(efx);
+}
+
/**************************************************************************
*
* MAC wrapper
@@ -497,7 +1294,7 @@ static void falcon_reset_macs(struct efx_nic *efx)
falcon_setup_xaui(efx);
}
-void falcon_drain_tx_fifo(struct efx_nic *efx)
+static void falcon_drain_tx_fifo(struct efx_nic *efx)
{
efx_oword_t reg;
@@ -529,7 +1326,7 @@ static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
falcon_drain_tx_fifo(efx);
}
-void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
efx_oword_t reg;
@@ -550,7 +1347,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_POPULATE_OWORD_5(reg,
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
FRF_AB_MAC_BCAD_ACPT, 1,
- FRF_AB_MAC_UC_PROM, efx->promiscuous,
+ FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
FRF_AB_MAC_LINK_STATUS, 1, /* always set */
FRF_AB_MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get
@@ -583,10 +1380,7 @@ static void falcon_stats_request(struct efx_nic *efx)
WARN_ON(nic_data->stats_pending);
WARN_ON(nic_data->stats_disable_count);
- if (nic_data->stats_dma_done == NULL)
- return; /* no mac selected */
-
- *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
+ FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
nic_data->stats_pending = true;
wmb(); /* ensure done flag is clear */
@@ -608,9 +1402,11 @@ static void falcon_stats_complete(struct efx_nic *efx)
return;
nic_data->stats_pending = false;
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, nic_data->stats,
+ efx->stats_buffer.addr, true);
} else {
netif_err(efx, hw, efx->net_dev,
"timed out waiting for statistics\n");
@@ -678,6 +1474,28 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
return 0;
}
+/* TX flow control may automatically turn itself off if the link
+ * partner (intermittently) stops responding to pause frames. There
+ * isn't any indication that this has happened, so the best we do is
+ * leave it up to the user to spot this and fix it by cycling transmit
+ * flow control on this end.
+ */
+
+static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Schedule a reset to recover */
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+}
+
+static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Recover by resetting the EM block */
+ falcon_stop_nic_stats(efx);
+ falcon_drain_tx_fifo(efx);
+ falcon_reconfigure_xmac(efx);
+ falcon_start_nic_stats(efx);
+}
+
/**************************************************************************
*
* PHY access via GMII
@@ -861,7 +1679,7 @@ static int falcon_probe_port(struct efx_nic *efx)
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- FALCON_MAC_STATS_SIZE);
+ FALCON_MAC_STATS_SIZE, GFP_KERNEL);
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
@@ -869,7 +1687,6 @@ static int falcon_probe_port(struct efx_nic *efx)
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
return 0;
}
@@ -926,15 +1743,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
- struct efx_spi_device *spi;
+ struct falcon_spi_device *spi;
void *region;
int rc, magic_num, struct_ver;
__le16 *word, *limit;
u32 csum;
- if (efx_spi_present(&nic_data->spi_flash))
+ if (falcon_spi_present(&nic_data->spi_flash))
spi = &nic_data->spi_flash;
- else if (efx_spi_present(&nic_data->spi_eeprom))
+ else if (falcon_spi_present(&nic_data->spi_eeprom))
spi = &nic_data->spi_eeprom;
else
return -EINVAL;
@@ -949,7 +1766,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
mutex_unlock(&nic_data->spi_lock);
if (rc) {
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
- efx_spi_present(&nic_data->spi_flash) ?
+ falcon_spi_present(&nic_data->spi_flash) ?
"flash" : "EEPROM");
rc = -EIO;
goto out;
@@ -998,7 +1815,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
return falcon_read_nvram(efx, NULL);
}
-static const struct efx_nic_register_test falcon_b0_register_tests[] = {
+static const struct efx_farch_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
@@ -1058,8 +1875,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
efx_reset_down(efx, reset_method);
tests->registers =
- efx_nic_test_registers(efx, falcon_b0_register_tests,
- ARRAY_SIZE(falcon_b0_register_tests))
+ efx_farch_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
? -1 : 1;
rc = falcon_reset_hw(efx, reset_method);
@@ -1078,8 +1895,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason)
{
switch (reason) {
case RESET_TYPE_RX_RECOVERY:
- case RESET_TYPE_RX_DESC_FETCH:
- case RESET_TYPE_TX_DESC_FETCH:
+ case RESET_TYPE_DMA_ERROR:
case RESET_TYPE_TX_SKIP:
/* These can occasionally occur due to hardware bugs.
* We try to reset without disrupting the link.
@@ -1294,7 +2110,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
}
static void falcon_spi_device_init(struct efx_nic *efx,
- struct efx_spi_device *spi_device,
+ struct falcon_spi_device *spi_device,
unsigned int device_id, u32 device_type)
{
if (device_type != 0) {
@@ -1360,10 +2176,11 @@ out:
return rc;
}
-static void falcon_dimension_resources(struct efx_nic *efx)
+static int falcon_dimension_resources(struct efx_nic *efx)
{
efx->rx_dc_base = 0x20000;
efx->tx_dc_base = 0x26000;
+ return 0;
}
/* Probe all SPI devices on the NIC */
@@ -1410,6 +2227,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
large_eeprom_type);
}
+static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
+{
+ return 0x20000;
+}
+
+static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
+{
+ /* Map everything up to and including the RSS indirection table.
+ * The PCI core takes care of mapping the MSI-X tables.
+ */
+ return FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
+}
+
static int falcon_probe_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data;
@@ -1424,7 +2255,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Falcon FPGA not supported\n");
goto fail1;
@@ -1478,7 +2309,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -1499,6 +2331,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
+ EFX_MAX_CHANNELS);
efx->timer_quantum_ns = 4968; /* 621 cycles */
/* Initialise I2C adapter */
@@ -1657,7 +2491,7 @@ static int falcon_init_nic(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
}
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -1688,24 +2522,65 @@ static void falcon_remove_nic(struct efx_nic *efx)
efx->nic_data = NULL;
}
-static void falcon_update_nic_stats(struct efx_nic *efx)
+static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, names);
+}
+
+static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
struct falcon_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
efx_oword_t cnt;
- if (nic_data->stats_disable_count)
- return;
+ if (!nic_data->stats_disable_count) {
+ efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+ stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
+ EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+ if (nic_data->stats_pending &&
+ FALCON_XMAC_STATS_DMA_FLAG(efx)) {
+ nic_data->stats_pending = false;
+ rmb(); /* read the done flag before the stats */
+ efx_nic_update_stats(
+ falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask,
+ stats, efx->stats_buffer.addr, true);
+ }
- efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
- efx->n_rx_nodesc_drop_cnt +=
- EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+ /* Update derived statistic */
+ efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
+ stats[FALCON_STAT_rx_bytes] -
+ stats[FALCON_STAT_rx_good_bytes] -
+ stats[FALCON_STAT_rx_control] * 64);
+ }
- if (nic_data->stats_pending &&
- *nic_data->stats_dma_done == FALCON_STATS_DONE) {
- nic_data->stats_pending = false;
- rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
+ core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
+ core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[FALCON_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[FALCON_STAT_rx_gtjumbo] +
+ stats[FALCON_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[FALCON_STAT_rx_symbol_error]);
}
+
+ return FALCON_STAT_COUNT;
}
void falcon_start_nic_stats(struct efx_nic *efx)
@@ -1734,7 +2609,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
/* Wait enough time for the most recent transfer to
* complete. */
for (i = 0; i < 4 && nic_data->stats_pending; i++) {
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx))
break;
msleep(1);
}
@@ -1778,11 +2653,12 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
.dimension_resources = falcon_dimension_resources,
- .fini = efx_port_dummy_op_void,
+ .fini = falcon_irq_ack_a1,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
.map_reset_flags = falcon_map_reset_flags,
@@ -1790,23 +2666,71 @@ const struct efx_nic_type falcon_a1_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = falcon_legacy_interrupt_a1,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+
+ /* We don't expose the filter table on Falcon A1 as it is not
+ * mapped into function 0, but these implementations still
+ * work with a degenerate case of all tables set to size 0.
+ */
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_A1,
- .mem_map_size = 0x20000,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
@@ -1816,12 +2740,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
.rx_buffer_padding = 0x24,
.can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI,
- .phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
+ .mcdi_max_ver = -1,
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
@@ -1834,14 +2759,17 @@ const struct efx_nic_type falcon_b0_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
@@ -1849,28 +2777,67 @@ const struct efx_nic_type falcon_b0_nic_type = {
.resume_wol = efx_port_dummy_op_void,
.test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_B0,
- /* Map everything up to and including the RSS indirection
- * table. Don't map MSI-X table, MSI-X PBA since Linux
- * requires that they not be mapped. */
- .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP *
- FR_BZ_RX_INDIRECTION_TBL_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
+ .mcdi_max_ver = -1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index ec1e99d0dca..1736f4b806a 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
deleted file mode 100644
index 8333865d4c9..00000000000
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/delay.h>
-#include "net_driver.h"
-#include "efx.h"
-#include "nic.h"
-#include "regs.h"
-#include "io.h"
-#include "mdio_10g.h"
-#include "workarounds.h"
-
-/**************************************************************************
- *
- * MAC operations
- *
- *************************************************************************/
-
-/* Configure the XAUI driver that is an output from Falcon */
-void falcon_setup_xaui(struct efx_nic *efx)
-{
- efx_oword_t sdctl, txdrv;
-
- /* Move the XAUI into low power, unless there is no PHY, in
- * which case the XAUI will have to drive a cable. */
- if (efx->phy_type == PHY_TYPE_NONE)
- return;
-
- efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
-
- EFX_POPULATE_OWORD_8(txdrv,
- FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
- efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
-}
-
-int falcon_reset_xaui(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
- int count;
-
- /* Don't fetch MAC statistics over an XMAC reset */
- WARN_ON(nic_data->stats_disable_count == 0);
-
- /* Start reset sequence */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
-
- /* Wait up to 10 ms for completion, then reinitialise */
- for (count = 0; count < 1000; count++) {
- efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
- if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
- EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
- falcon_setup_xaui(efx);
- return 0;
- }
- udelay(10);
- }
- netif_err(efx, hw, efx->net_dev,
- "timed out waiting for XAUI/XGXS reset\n");
- return -ETIMEDOUT;
-}
-
-static void falcon_ack_status_intr(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
-
- if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
- return;
-
- /* We expect xgmii faults if the wireside link is down */
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
- return;
-
- /* We can only use this interrupt to signal the negative edge of
- * xaui_align [we have to poll the positive edge]. */
- if (nic_data->xmac_poll_required)
- return;
-
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-}
-
-static bool falcon_xgxs_link_ok(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool align_done, link_ok = false;
- int sync_status;
-
- /* Read link status */
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
-
- align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
- sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
- if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
- link_ok = true;
-
- /* Clear link status ready for next read */
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- return link_ok;
-}
-
-static bool falcon_xmac_link_ok(struct efx_nic *efx)
-{
- /*
- * Check MAC's XGXS link status except when using XGMII loopback
- * which bypasses the XGXS block.
- * If possible, check PHY's XGXS link status except when using
- * MAC loopback.
- */
- return (efx->loopback_mode == LOOPBACK_XGMII ||
- falcon_xgxs_link_ok(efx)) &&
- (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
- LOOPBACK_INTERNAL(efx) ||
- efx_mdio_phyxgxs_lane_sync(efx));
-}
-
-static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
-{
- unsigned int max_frame_len;
- efx_oword_t reg;
- bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
- bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
-
- /* Configure MAC - cut-thru mode is hard wired on */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AB_XM_RX_JUMBO_MODE, 1,
- FRF_AB_XM_TX_STAT_EN, 1,
- FRF_AB_XM_RX_STAT_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
-
- /* Configure TX */
- EFX_POPULATE_OWORD_6(reg,
- FRF_AB_XM_TXEN, 1,
- FRF_AB_XM_TX_PRMBL, 1,
- FRF_AB_XM_AUTO_PAD, 1,
- FRF_AB_XM_TXCRC, 1,
- FRF_AB_XM_FCNTL, tx_fc,
- FRF_AB_XM_IPG, 0x3);
- efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
-
- /* Configure RX */
- EFX_POPULATE_OWORD_5(reg,
- FRF_AB_XM_RXEN, 1,
- FRF_AB_XM_AUTO_DEPAD, 0,
- FRF_AB_XM_ACPT_ALL_MCAST, 1,
- FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
- FRF_AB_XM_PASS_CRC_ERR, 1);
- efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
-
- /* Set frame length */
- max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
- efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
- FRF_AB_XM_TX_JUMBO_MODE, 1);
- efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
- FRF_AB_XM_DIS_FCNTL, !rx_fc);
- efx_writeo(efx, &reg, FR_AB_XM_FC);
-
- /* Set MAC address */
- memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
- memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
-}
-
-static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
- bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
- bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
-
- /* XGXS block is flaky and will need to be reset if moving
- * into our out of XGMII, XGXS or XAUI loopbacks. */
- if (EFX_WORKAROUND_5147(efx)) {
- bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
- bool reset_xgxs;
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
- old_xgmii_loopback =
- EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
-
- /* The PHY driver may have turned XAUI off */
- reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
- (xaui_loopback != old_xaui_loopback) ||
- (xgmii_loopback != old_xgmii_loopback));
-
- if (reset_xgxs)
- falcon_reset_xaui(efx);
- }
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
- (xgxs_loopback || xaui_loopback) ?
- FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
-}
-
-
-/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
-{
- bool mac_up = falcon_xmac_link_ok(efx);
-
- if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
- efx_phy_mode_disabled(efx->phy_mode))
- /* XAUI link is expected to be down */
- return mac_up;
-
- falcon_stop_nic_stats(efx);
-
- while (!mac_up && tries) {
- netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
- falcon_reset_xaui(efx);
- udelay(200);
-
- mac_up = falcon_xmac_link_ok(efx);
- --tries;
- }
-
- falcon_start_nic_stats(efx);
-
- return mac_up;
-}
-
-bool falcon_xmac_check_fault(struct efx_nic *efx)
-{
- return !falcon_xmac_link_ok_retry(efx, 5);
-}
-
-int falcon_reconfigure_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- falcon_reconfigure_xgxs_core(efx);
- falcon_reconfigure_xmac_core(efx);
-
- falcon_reconfigure_mac_wrapper(efx);
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_ack_status_intr(efx);
-
- return 0;
-}
-
-void falcon_update_stats_xmac(struct efx_nic *efx)
-{
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
-
- /* Update MAC stats from DMAed values */
- FALCON_STAT(efx, XgRxOctets, rx_bytes);
- FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
- FALCON_STAT(efx, XgRxPkts, rx_packets);
- FALCON_STAT(efx, XgRxPktsOK, rx_good);
- FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
- FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
- FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
- FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
- FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
- FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
- FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
- FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
- FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
- FALCON_STAT(efx, XgRxAlignError, rx_align_error);
- FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
- FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
- FALCON_STAT(efx, XgRxControlPkts, rx_control);
- FALCON_STAT(efx, XgRxPausePkts, rx_pause);
- FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
- FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
- FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
- FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
- FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
- FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
- FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
- FALCON_STAT(efx, XgRxLengthError, rx_length_error);
- FALCON_STAT(efx, XgTxPkts, tx_packets);
- FALCON_STAT(efx, XgTxOctets, tx_bytes);
- FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
- FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
- FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
- FALCON_STAT(efx, XgTxControlPkts, tx_control);
- FALCON_STAT(efx, XgTxPausePkts, tx_pause);
- FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
- FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
- FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
- FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
- FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
- FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
- FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
- FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
- FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
- FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
- FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
- FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
-
- /* Update derived statistics */
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
- mac_stats->tx_control * 64);
- efx_update_diff_stat(&mac_stats->rx_bad_bytes,
- mac_stats->rx_bytes - mac_stats->rx_good_bytes -
- mac_stats->rx_control * 64);
-}
-
-void falcon_poll_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
- !nic_data->xmac_poll_required)
- return;
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_ack_status_intr(efx);
-}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
new file mode 100644
index 00000000000..c0907d884d7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -0,0 +1,2942 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Falcon-architecture (SFC4000 and SFC9000-family) support */
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/* Driver generated events */
+#define _EFX_CHANNEL_MAGIC_TEST 0x000101
+#define _EFX_CHANNEL_MAGIC_FILL 0x000102
+#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
+#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
+
+#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
+#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
+
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
+#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
+
+/**************************************************************************
+ *
+ * Hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+ unsigned int index)
+{
+ efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ efx_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ efx_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_qword_t buf_desc;
+ unsigned int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EFX_BUG_ON_PARANOID(!buffer->buf.addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EFX_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EFX_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer,
+ unsigned int len)
+{
+ len = ALIGN(len, EFX_BUF_SIZE);
+
+ if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
+ return -ENOMEM;
+ buffer->entries = len / EFX_BUF_SIZE;
+ BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+#ifdef CONFIG_SFC_SRIOV
+ BUG_ON(efx_sriov_enabled(efx) &&
+ efx->vf_buftbl_base < efx->next_buffer_table);
+#endif
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ if (!buffer->buf.addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, buffer->buf.len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ efx_nic_free_buffer(efx, &buffer->buf);
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
+{
+
+ struct efx_tx_buffer *buffer;
+ efx_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
+
+ /* Create TX descriptor ring entry */
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_farch_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_farch_notify_tx_desc(tx_queue);
+ }
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
+
+ /* Pin TX descriptor ring */
+ efx_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
+
+ efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ __clear_bit_le(tx_queue->queue, &reg);
+ else
+ __set_bit_le(tx_queue->queue, &reg);
+ efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
+ atomic_set(&tx_queue->flush_outstanding, 1);
+
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_desc_ptr;
+
+ /* Remove TX descriptor ring from card */
+ EFX_ZERO_OWORD(tx_desc_ptr);
+ efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ efx_farch_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
+}
+
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+ bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->scatter_n = 0;
+
+ /* Pin RX descriptor ring */
+ efx_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ efx_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_oword_t rx_flush_descq;
+
+ EFX_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
+ efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Remove RX descriptor ring from card */
+ EFX_ZERO_OWORD(rx_desc_ptr);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+/* efx_farch_flush_queues() must be woken up when all flushes are completed,
+ * or more RX flushes can be kicked off.
+ */
+static bool efx_farch_flush_wake(struct efx_nic *efx)
+{
+ /* Ensure that all updates are visible to efx_farch_flush_queues() */
+ smp_mb();
+
+ return (atomic_read(&efx->active_queues) == 0 ||
+ (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+}
+
+static bool efx_check_tx_flush_complete(struct efx_nic *efx)
+{
+ bool i = true;
+ efx_oword_t txd_ptr_tbl;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_reado_table(efx, &txd_ptr_tbl,
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
+ if (EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_FLUSH) ||
+ EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush did not complete on TXQ %d\n",
+ tx_queue->queue);
+ i = false;
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
+ 1, 0)) {
+ /* The flush is complete, but we didn't
+ * receive a flush completion event
+ */
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush complete on TXQ %d, so drain "
+ "the queue\n", tx_queue->queue);
+ /* Don't need to increment active_queues as it
+ * has already been incremented for the queues
+ * which did not drain
+ */
+ efx_farch_magic_event(channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(
+ tx_queue));
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * are no more RX and TX events left on any channel. */
+static int efx_farch_do_flush(struct efx_nic *efx)
+{
+ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_farch_flush_tx_queue(tx_queue);
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ }
+ }
+
+ while (timeout && atomic_read(&efx->active_queues) > 0) {
+ /* If SRIOV is enabled, then offload receive queue flushing to
+ * the firmware (though we will still have to poll for
+ * completion). If that fails, fall back to the old scheme.
+ */
+ if (efx_sriov_enabled(efx)) {
+ rc = efx_mcdi_flush_rxqs(efx);
+ if (!rc)
+ goto wait;
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of
+ * which may need to be retried if there is an outstanding
+ * descriptor fetch
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (atomic_read(&efx->rxq_flush_outstanding) >=
+ EFX_RX_FLUSH_COUNT)
+ break;
+
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ atomic_inc(&efx->rxq_flush_outstanding);
+ efx_farch_flush_rx_queue(rx_queue);
+ }
+ }
+ }
+
+ wait:
+ timeout = wait_event_timeout(efx->flush_wq,
+ efx_farch_flush_wake(efx),
+ timeout);
+ }
+
+ if (atomic_read(&efx->active_queues) &&
+ !efx_check_tx_flush_complete(efx)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->active_queues),
+ atomic_read(&efx->rxq_flush_outstanding),
+ atomic_read(&efx->rxq_flush_pending));
+ rc = -ETIMEDOUT;
+
+ atomic_set(&efx->active_queues, 0);
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+ return rc;
+}
+
+int efx_farch_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc = 0;
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ /* Only perform flush if DMA is enabled */
+ if (efx->pci_dev->is_busmaster) {
+ efx->type->prepare_flush(efx);
+ rc = efx_farch_do_flush(efx);
+ efx->type->finish_flush(efx);
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_farch_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_farch_tx_fini(tx_queue);
+ }
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void efx_farch_ev_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
+ */
+ efx_writed(efx, &reg,
+ efx->type->evq_rptr_tbl_base +
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event)
+{
+ efx_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
+ efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
+{
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_farch_generate_event(channel->efx, channel->channel, &event);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct efx_tx_queue *tx_queue;
+ struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+
+ netif_tx_lock(efx->net_dev);
+ efx_farch_notify_tx_desc(tx_queue);
+ netif_tx_unlock(efx->net_dev);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+ 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+
+ /* The frame must be discarded if any of these are true. */
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm) ?
+ EFX_RX_PKT_DISCARD : 0;
+}
+
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
+efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
+ u16 flags;
+ struct efx_rx_queue *rx_queue;
+ struct efx_nic *efx = channel->efx;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return;
+
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK then we can rely on the
+ * hardware checksum and classification.
+ */
+ flags = 0;
+ switch (rx_ev_hdr_type) {
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags |= EFX_RX_PKT_TCP;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags |= EFX_RX_PKT_CSUMMED;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ break;
+ }
+ } else {
+ flags = efx_farch_handle_rx_not_ok(rx_queue, event);
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ flags |= EFX_RX_PKT_DISCARD;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+}
+
+/* If this flush done event corresponds to a &struct efx_tx_queue, then
+ * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
+ * of all transmit completions.
+ */
+static void
+efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_tx_queue *tx_queue;
+ int qid;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
+ qid % EFX_TXQ_TYPES);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ efx_farch_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
+ }
+}
+
+/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
+ * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * the RX queue back to the mask of RX queues in need of flushing.
+ */
+static void
+efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ int qid;
+ bool failed;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (qid >= efx->n_channels)
+ return;
+ channel = efx_get_channel(efx, qid);
+ if (!efx_channel_has_rx_queue(channel))
+ return;
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (failed) {
+ netif_info(efx, hw, efx->net_dev,
+ "RXQ %d flush retry\n", qid);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ } else {
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
+ }
+ atomic_dec(&efx->rxq_flush_outstanding);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void
+efx_farch_handle_drain_event(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ WARN_ON(atomic_read(&efx->active_queues) == 0);
+ atomic_dec(&efx->active_queues);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void efx_farch_handle_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue =
+ efx_channel_has_rx_queue(channel) ?
+ efx_channel_get_rx_queue(channel) : NULL;
+ unsigned magic, code;
+
+ magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ code = _EFX_CHANNEL_MAGIC_CODE(magic);
+
+ if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
+ channel->event_test_cpu = raw_smp_processor_id();
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(rx_queue);
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
+ efx_farch_handle_drain_event(channel);
+ } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
+ efx_farch_handle_drain_event(channel);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+ }
+}
+
+static void
+efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_tx_flush_done(efx, event);
+ efx_sriov_tx_flush_done(efx, event);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_rx_flush_done(efx, event);
+ efx_sriov_rx_flush_done(efx, event);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx,
+ EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int efx_farch_ev_process(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ efx_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ efx_farch_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += efx_farch_handle_tx_event(channel,
+ &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ efx_farch_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ efx_farch_handle_driver_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_USER_EV:
+ efx_sriov_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Allocate buffer table entries for event queue */
+int efx_farch_ev_probe(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return efx_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(efx_qword_t));
+}
+
+int efx_farch_ev_init(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+ }
+
+ /* Pin event queue buffer */
+ efx_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ /* Push event queue to card */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ return 0;
+}
+
+void efx_farch_ev_fini(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EFX_ZERO_OWORD(reg);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+
+ /* Unpin event queue */
+ efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_farch_ev_remove(struct efx_channel *channel)
+{
+ efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void efx_farch_ev_test_generate(struct efx_channel *channel)
+{
+ efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+}
+
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_FILL(rx_queue));
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_farch_interrupts(struct efx_nic *efx,
+ bool enabled, bool force)
+{
+ efx_oword_t int_en_reg_ker;
+
+ EFX_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_farch_irq_enable_master(struct efx_nic *efx)
+{
+ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ efx_farch_interrupts(efx, true, false);
+}
+
+void efx_farch_irq_disable_master(struct efx_nic *efx)
+{
+ /* Disable interrupts */
+ efx_farch_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_farch_irq_test_generate(struct efx_nic *efx)
+{
+ efx_farch_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ efx_oword_t fatal_intr;
+ int error, mem_perr;
+
+ efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (efx_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ efx_farch_irq_disable_master(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, FR_BZ_INT_ISR0);
+ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Legacy interrupts are disabled too late by the EEH kernel
+ * code. Disable them earlier.
+ * If an EEH error occurred, the read will have returned all ones.
+ */
+ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+ !efx->eeh_disabled_legacy_irq) {
+ disable_irq_nosync(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = true;
+ }
+
+ /* Handle non-event-queue sources */
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ if (queues != 0) {
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+ result = IRQ_HANDLED;
+
+ } else {
+ efx_qword_t *event;
+
+ /* Legacy ISR read can return zero once (SF bug 15783) */
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel,
+ channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel_irq(channel);
+ else
+ efx_farch_ev_read_ack(channel);
+ }
+ }
+ }
+
+ if (result == IRQ_HANDLED)
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
+ /* Handle non-event-queue sources */
+ if (context->index == efx->irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+
+ return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void efx_farch_rx_push_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ efx_writed(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ }
+}
+
+/* Looks at available SRAM resources and works out how many queues we
+ * can support, and where things like descriptor caches should live.
+ *
+ * SRAM is split up as follows:
+ * 0 buftbl entries for channels
+ * efx->vf_buftbl_base buftbl entries for SR-IOV
+ * efx->rx_dc_base RX descriptor caches
+ * efx->tx_dc_base TX descriptor caches
+ */
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ efx->n_channels * EFX_MAX_EVQ_SIZE)
+ * sizeof(efx_qword_t) / EFX_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef CONFIG_SFC_SRIOV
+ if (efx_sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
+
+ efx->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
+ }
+#endif
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx)
+{
+ efx_oword_t altera_build;
+ efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_farch_init_common(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EFX_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EFX_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EFX_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_INVERT_OWORD(temp);
+ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ efx_farch_rx_push_indir_table(efx);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/**************************************************************************
+ *
+ * Filter tables
+ *
+ **************************************************************************
+ */
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum search limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_farch_filter_search() when the
+ * table is full.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
+
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
+
+enum efx_farch_filter_type {
+ EFX_FARCH_FILTER_TCP_FULL = 0,
+ EFX_FARCH_FILTER_TCP_WILD,
+ EFX_FARCH_FILTER_UDP_FULL,
+ EFX_FARCH_FILTER_UDP_WILD,
+ EFX_FARCH_FILTER_MAC_FULL = 4,
+ EFX_FARCH_FILTER_MAC_WILD,
+ EFX_FARCH_FILTER_UC_DEF = 8,
+ EFX_FARCH_FILTER_MC_DEF,
+ EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
+};
+
+enum efx_farch_filter_table_id {
+ EFX_FARCH_FILTER_TABLE_RX_IP = 0,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF,
+ EFX_FARCH_FILTER_TABLE_TX_MAC,
+ EFX_FARCH_FILTER_TABLE_COUNT,
+};
+
+enum efx_farch_filter_index {
+ EFX_FARCH_FILTER_INDEX_UC_DEF,
+ EFX_FARCH_FILTER_INDEX_MC_DEF,
+ EFX_FARCH_FILTER_SIZE_RX_DEF,
+};
+
+struct efx_farch_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+struct efx_farch_filter_table {
+ enum efx_farch_filter_table_id id;
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct efx_farch_filter_spec *spec;
+ unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
+};
+
+struct efx_farch_filter_state {
+ struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
+};
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx);
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 efx_farch_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_farch_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum efx_farch_filter_table_id
+efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
+ EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
+ return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
+}
+
+static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter_ctl;
+
+ efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
+ }
+
+ efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t tx_cfg;
+
+ efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
+}
+
+static int
+efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
+ const struct efx_filter_spec *gen_spec)
+{
+ bool is_full = false;
+
+ if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
+ return -EINVAL;
+
+ spec->priority = gen_spec->priority;
+ spec->flags = gen_spec->flags;
+ spec->dmaq_id = gen_spec->dmaq_id;
+
+ switch (gen_spec->match_flags) {
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
+ is_full = true;
+ /* fall through */
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
+ __be32 rhost, host1, host2;
+ __be16 rport, port1, port2;
+
+ EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
+
+ if (gen_spec->ether_type != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+ if (gen_spec->loc_port == 0 ||
+ (is_full && gen_spec->rem_port == 0))
+ return -EADDRNOTAVAIL;
+ switch (gen_spec->ip_proto) {
+ case IPPROTO_TCP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
+ EFX_FARCH_FILTER_TCP_WILD);
+ break;
+ case IPPROTO_UDP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
+ EFX_FARCH_FILTER_UDP_WILD);
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ rhost = is_full ? gen_spec->rem_host[0] : 0;
+ rport = is_full ? gen_spec->rem_port : 0;
+ host1 = rhost;
+ host2 = gen_spec->loc_host[0];
+ if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
+ port1 = gen_spec->loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->loc_port;
+ }
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = true;
+ /* fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
+ EFX_FARCH_FILTER_MAC_WILD);
+ spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
+ spec->data[1] = (gen_spec->loc_mac[2] << 24 |
+ gen_spec->loc_mac[3] << 16 |
+ gen_spec->loc_mac[4] << 8 |
+ gen_spec->loc_mac[5]);
+ spec->data[2] = (gen_spec->loc_mac[0] << 8 |
+ gen_spec->loc_mac[1]);
+ break;
+
+ case EFX_FILTER_MATCH_LOC_MAC_IG:
+ spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
+ EFX_FARCH_FILTER_MC_DEF :
+ EFX_FARCH_FILTER_UC_DEF);
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ break;
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return 0;
+}
+
+static void
+efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
+ const struct efx_farch_filter_spec *spec)
+{
+ bool is_full = false;
+
+ /* *gen_spec should be completely initialised, to be consistent
+ * with efx_filter_init_{rx,tx}() and in case we want to copy
+ * it back to userland.
+ */
+ memset(gen_spec, 0, sizeof(*gen_spec));
+
+ gen_spec->priority = spec->priority;
+ gen_spec->flags = spec->flags;
+ gen_spec->dmaq_id = spec->dmaq_id;
+
+ switch (spec->type) {
+ case EFX_FARCH_FILTER_TCP_FULL:
+ case EFX_FARCH_FILTER_UDP_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_TCP_WILD:
+ case EFX_FARCH_FILTER_UDP_WILD: {
+ __be32 host1, host2;
+ __be16 port1, port2;
+
+ gen_spec->match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ if (is_full)
+ gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_REM_PORT);
+ gen_spec->ether_type = htons(ETH_P_IP);
+ gen_spec->ip_proto =
+ (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
+ spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
+ IPPROTO_TCP : IPPROTO_UDP;
+
+ host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ port1 = htons(spec->data[0]);
+ host2 = htonl(spec->data[2]);
+ port2 = htons(spec->data[1] >> 16);
+ if (spec->flags & EFX_FILTER_FLAG_TX) {
+ gen_spec->loc_host[0] = host1;
+ gen_spec->rem_host[0] = host2;
+ } else {
+ gen_spec->loc_host[0] = host2;
+ gen_spec->rem_host[0] = host1;
+ }
+ if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
+ (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
+ gen_spec->loc_port = port1;
+ gen_spec->rem_port = port2;
+ } else {
+ gen_spec->loc_port = port2;
+ gen_spec->rem_port = port1;
+ }
+
+ break;
+ }
+
+ case EFX_FARCH_FILTER_MAC_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_MAC_WILD:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
+ if (is_full)
+ gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ gen_spec->loc_mac[0] = spec->data[2] >> 8;
+ gen_spec->loc_mac[1] = spec->data[2];
+ gen_spec->loc_mac[2] = spec->data[1] >> 24;
+ gen_spec->loc_mac[3] = spec->data[1] >> 16;
+ gen_spec->loc_mac[4] = spec->data[1] >> 8;
+ gen_spec->loc_mac[5] = spec->data[1];
+ gen_spec->outer_vid = htons(spec->data[0]);
+ break;
+
+ case EFX_FARCH_FILTER_UC_DEF:
+ case EFX_FARCH_FILTER_MC_DEF:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
+ gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
+{
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ spec->priority = EFX_FILTER_PRI_REQUIRED;
+ spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
+ (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
+ spec->dmaq_id = 0;
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_farch_filter_build(efx_oword_t *filter,
+ struct efx_farch_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (efx_farch_filter_spec_table_id(spec)) {
+ case EFX_FARCH_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
+ spec->type == EFX_FARCH_FILTER_UDP_WILD);
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EFX_DWORD_2, spec->data[2],
+ EFX_DWORD_1, spec->data[1],
+ EFX_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_TX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
+ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild | spec->dmaq_id << 1;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
+ const struct efx_farch_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ if (left->flags & EFX_FILTER_FLAG_TX &&
+ left->dmaq_id != right->dmaq_id)
+ return false;
+
+ return true;
+}
+
+/*
+ * Construct/deconstruct external filter IDs. At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
+ *
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
+ */
+
+#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
+
+static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
+ [EFX_FARCH_FILTER_TCP_FULL] = 0,
+ [EFX_FARCH_FILTER_UDP_FULL] = 0,
+ [EFX_FARCH_FILTER_TCP_WILD] = 1,
+ [EFX_FARCH_FILTER_UDP_WILD] = 1,
+ [EFX_FARCH_FILTER_MAC_FULL] = 2,
+ [EFX_FARCH_FILTER_MAC_WILD] = 3,
+ [EFX_FARCH_FILTER_UC_DEF] = 4,
+ [EFX_FARCH_FILTER_MC_DEF] = 4,
+};
+
+static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
+ EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_RX_IP,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
+};
+
+#define EFX_FARCH_FILTER_INDEX_WIDTH 13
+#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32
+efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
+ unsigned int index)
+{
+ unsigned int range;
+
+ range = efx_farch_filter_type_match_pri[spec->type];
+ if (!(spec->flags & EFX_FILTER_FLAG_RX))
+ range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
+
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_farch_filter_table_id
+efx_farch_filter_id_table_id(u32 id)
+{
+ unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
+
+ if (range < ARRAY_SIZE(efx_farch_filter_range_table))
+ return efx_farch_filter_range_table[range];
+ else
+ return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
+}
+
+static inline unsigned int efx_farch_filter_id_index(u32 id)
+{
+ return id & EFX_FARCH_FILTER_INDEX_MASK;
+}
+
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
+ enum efx_farch_filter_table_id table_id;
+
+ do {
+ table_id = efx_farch_filter_range_table[range];
+ if (state->table[table_id].size != 0)
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH |
+ state->table[table_id].size;
+ } while (range--);
+
+ return 0;
+}
+
+s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec,
+ bool replace_equal)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec spec;
+ efx_oword_t filter;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
+ int rc;
+
+ rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
+ if (rc)
+ return rc;
+
+ table = &state->table[efx_farch_filter_spec_table_id(&spec)];
+ if (table->size == 0)
+ return -EINVAL;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_limit=%d", __func__, spec.type,
+ table->search_limit[spec.type]);
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
+ EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
+ rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
+ ins_index = rep_index;
+
+ spin_lock_bh(&efx->filter_lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_farch_filter_build(&filter, &spec);
+ unsigned int hash = efx_farch_filter_hash(key);
+ unsigned int incr = efx_farch_filter_increment(key);
+ unsigned int max_rep_depth = table->search_limit[spec.type];
+ unsigned int max_ins_depth =
+ spec.priority <= EFX_FILTER_PRI_HINT ?
+ EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
+ EFX_FARCH_FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_farch_filter_equal(&spec,
+ &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_farch_filter_spec *saved_spec =
+ &table->spec[rep_index];
+
+ if (spec.priority == saved_spec->priority && !replace_equal) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec.priority < saved_spec->priority &&
+ !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out;
+ }
+ if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ rc = 0;
+ goto out;
+ }
+ /* Retain the RX_STACK flag */
+ spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
+ ++table->used;
+ }
+ table->spec[ins_index] = spec;
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ if (table->search_limit[spec.type] < depth) {
+ table->search_limit[spec.type] = depth;
+ if (spec.flags & EFX_FILTER_FLAG_TX)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+
+ efx_writeo(efx, &filter,
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_farch_filter_table_clear_entry(efx, table,
+ rep_index);
+ }
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec.type, ins_index, spec.dmaq_id);
+ rc = efx_farch_filter_make_id(&spec, ins_index);
+
+out:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx)
+{
+ static efx_oword_t filter;
+
+ EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
+ BUG_ON(table->offset == 0); /* can't clear MAC default filters */
+
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ /* If this filter required a greater search depth than
+ * any other, the search limit for its type can now be
+ * decreased. However, it is hard to determine that
+ * unless the table has become completely empty - in
+ * which case, all its search limits can be set to 0.
+ */
+ if (unlikely(table->used == 0)) {
+ memset(table->search_limit, 0, sizeof(table->search_limit));
+ if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+}
+
+static int efx_farch_filter_remove(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
+
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ spec->priority > priority)
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ efx_farch_filter_table_clear_entry(efx, table, filter_idx);
+ }
+
+ return 0;
+}
+
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_farch_filter_spec *spec;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+ rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority) {
+ efx_farch_filter_to_gen_spec(spec_buf, spec);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear(struct efx_nic *efx,
+ enum efx_farch_filter_table_id table_id,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table = &state->table[table_id];
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
+ efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
+ priority);
+}
+
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_farch_filter_make_id(
+ &table->spec[filter_idx], filter_idx);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+/* Restore filter stater after reset */
+void efx_farch_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+
+ /* Check whether this is a regular register table */
+ if (table->step == 0)
+ continue;
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+ efx_farch_filter_push_tx_limits(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+ kfree(state);
+}
+
+int efx_farch_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state;
+ struct efx_farch_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
+ table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ /* RX default filters must always exist */
+ struct efx_farch_filter_spec *spec;
+ unsigned i;
+
+ for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
+ spec = &table->spec[i];
+ spec->type = EFX_FARCH_FILTER_UC_DEF + i;
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ __set_bit(i, table->used_bitmap);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ return 0;
+
+fail:
+ efx_farch_filter_table_remove(efx);
+ return -ENOMEM;
+}
+
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_farch_filter_push_rx_config() */
+ continue;
+
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec)
+{
+ return efx_farch_filter_insert(efx, gen_spec, true);
+}
+
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table =
+ &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
+ flow_id, index)) {
+ efx_farch_filter_table_clear_entry(efx, table, index);
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *ha;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ netif_addr_lock_bh(net_dev);
+
+ efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ __set_bit_le(bit, mc_hash);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ __set_bit_le(0xff, mc_hash);
+ }
+
+ netif_addr_unlock_bh(net_dev);
+}
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/farch_regs.h
index ade4c4dc56c..7019a712e79 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/farch_regs.h
@@ -1,15 +1,15 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#ifndef EFX_REGS_H
-#define EFX_REGS_H
+#ifndef EFX_FARCH_REGS_H
+#define EFX_FARCH_REGS_H
/*
* Falcon hardware architecture definitions have a name prefix following
@@ -2925,264 +2925,8 @@
#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- *
- */
-
-#define GRxGoodOct_offset 0x0
-#define GRxGoodOct_WIDTH 48
-#define GRxBadOct_offset 0x8
-#define GRxBadOct_WIDTH 48
-#define GRxMissPkt_offset 0x10
-#define GRxMissPkt_WIDTH 32
-#define GRxFalseCRS_offset 0x14
-#define GRxFalseCRS_WIDTH 32
-#define GRxPausePkt_offset 0x18
-#define GRxPausePkt_WIDTH 32
-#define GRxBadPkt_offset 0x1C
-#define GRxBadPkt_WIDTH 32
-#define GRxUcastPkt_offset 0x20
-#define GRxUcastPkt_WIDTH 32
-#define GRxMcastPkt_offset 0x24
-#define GRxMcastPkt_WIDTH 32
-#define GRxBcastPkt_offset 0x28
-#define GRxBcastPkt_WIDTH 32
-#define GRxGoodLt64Pkt_offset 0x2C
-#define GRxGoodLt64Pkt_WIDTH 32
-#define GRxBadLt64Pkt_offset 0x30
-#define GRxBadLt64Pkt_WIDTH 32
-#define GRx64Pkt_offset 0x34
-#define GRx64Pkt_WIDTH 32
-#define GRx65to127Pkt_offset 0x38
-#define GRx65to127Pkt_WIDTH 32
-#define GRx128to255Pkt_offset 0x3C
-#define GRx128to255Pkt_WIDTH 32
-#define GRx256to511Pkt_offset 0x40
-#define GRx256to511Pkt_WIDTH 32
-#define GRx512to1023Pkt_offset 0x44
-#define GRx512to1023Pkt_WIDTH 32
-#define GRx1024to15xxPkt_offset 0x48
-#define GRx1024to15xxPkt_WIDTH 32
-#define GRx15xxtoJumboPkt_offset 0x4C
-#define GRx15xxtoJumboPkt_WIDTH 32
-#define GRxGtJumboPkt_offset 0x50
-#define GRxGtJumboPkt_WIDTH 32
-#define GRxFcsErr64to15xxPkt_offset 0x54
-#define GRxFcsErr64to15xxPkt_WIDTH 32
-#define GRxFcsErr15xxtoJumboPkt_offset 0x58
-#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
-#define GRxFcsErrGtJumboPkt_offset 0x5C
-#define GRxFcsErrGtJumboPkt_WIDTH 32
-#define GTxGoodBadOct_offset 0x80
-#define GTxGoodBadOct_WIDTH 48
-#define GTxGoodOct_offset 0x88
-#define GTxGoodOct_WIDTH 48
-#define GTxSglColPkt_offset 0x90
-#define GTxSglColPkt_WIDTH 32
-#define GTxMultColPkt_offset 0x94
-#define GTxMultColPkt_WIDTH 32
-#define GTxExColPkt_offset 0x98
-#define GTxExColPkt_WIDTH 32
-#define GTxDefPkt_offset 0x9C
-#define GTxDefPkt_WIDTH 32
-#define GTxLateCol_offset 0xA0
-#define GTxLateCol_WIDTH 32
-#define GTxExDefPkt_offset 0xA4
-#define GTxExDefPkt_WIDTH 32
-#define GTxPausePkt_offset 0xA8
-#define GTxPausePkt_WIDTH 32
-#define GTxBadPkt_offset 0xAC
-#define GTxBadPkt_WIDTH 32
-#define GTxUcastPkt_offset 0xB0
-#define GTxUcastPkt_WIDTH 32
-#define GTxMcastPkt_offset 0xB4
-#define GTxMcastPkt_WIDTH 32
-#define GTxBcastPkt_offset 0xB8
-#define GTxBcastPkt_WIDTH 32
-#define GTxLt64Pkt_offset 0xBC
-#define GTxLt64Pkt_WIDTH 32
-#define GTx64Pkt_offset 0xC0
-#define GTx64Pkt_WIDTH 32
-#define GTx65to127Pkt_offset 0xC4
-#define GTx65to127Pkt_WIDTH 32
-#define GTx128to255Pkt_offset 0xC8
-#define GTx128to255Pkt_WIDTH 32
-#define GTx256to511Pkt_offset 0xCC
-#define GTx256to511Pkt_WIDTH 32
-#define GTx512to1023Pkt_offset 0xD0
-#define GTx512to1023Pkt_WIDTH 32
-#define GTx1024to15xxPkt_offset 0xD4
-#define GTx1024to15xxPkt_WIDTH 32
-#define GTx15xxtoJumboPkt_offset 0xD8
-#define GTx15xxtoJumboPkt_WIDTH 32
-#define GTxGtJumboPkt_offset 0xDC
-#define GTxGtJumboPkt_WIDTH 32
-#define GTxNonTcpUdpPkt_offset 0xE0
-#define GTxNonTcpUdpPkt_WIDTH 16
-#define GTxMacSrcErrPkt_offset 0xE4
-#define GTxMacSrcErrPkt_WIDTH 16
-#define GTxIpSrcErrPkt_offset 0xE8
-#define GTxIpSrcErrPkt_WIDTH 16
-#define GDmaDone_offset 0xEC
-#define GDmaDone_WIDTH 32
-
-#define XgRxOctets_offset 0x0
-#define XgRxOctets_WIDTH 48
-#define XgRxOctetsOK_offset 0x8
-#define XgRxOctetsOK_WIDTH 48
-#define XgRxPkts_offset 0x10
-#define XgRxPkts_WIDTH 32
-#define XgRxPktsOK_offset 0x14
-#define XgRxPktsOK_WIDTH 32
-#define XgRxBroadcastPkts_offset 0x18
-#define XgRxBroadcastPkts_WIDTH 32
-#define XgRxMulticastPkts_offset 0x1C
-#define XgRxMulticastPkts_WIDTH 32
-#define XgRxUnicastPkts_offset 0x20
-#define XgRxUnicastPkts_WIDTH 32
-#define XgRxUndersizePkts_offset 0x24
-#define XgRxUndersizePkts_WIDTH 32
-#define XgRxOversizePkts_offset 0x28
-#define XgRxOversizePkts_WIDTH 32
-#define XgRxJabberPkts_offset 0x2C
-#define XgRxJabberPkts_WIDTH 32
-#define XgRxUndersizeFCSerrorPkts_offset 0x30
-#define XgRxUndersizeFCSerrorPkts_WIDTH 32
-#define XgRxDropEvents_offset 0x34
-#define XgRxDropEvents_WIDTH 32
-#define XgRxFCSerrorPkts_offset 0x38
-#define XgRxFCSerrorPkts_WIDTH 32
-#define XgRxAlignError_offset 0x3C
-#define XgRxAlignError_WIDTH 32
-#define XgRxSymbolError_offset 0x40
-#define XgRxSymbolError_WIDTH 32
-#define XgRxInternalMACError_offset 0x44
-#define XgRxInternalMACError_WIDTH 32
-#define XgRxControlPkts_offset 0x48
-#define XgRxControlPkts_WIDTH 32
-#define XgRxPausePkts_offset 0x4C
-#define XgRxPausePkts_WIDTH 32
-#define XgRxPkts64Octets_offset 0x50
-#define XgRxPkts64Octets_WIDTH 32
-#define XgRxPkts65to127Octets_offset 0x54
-#define XgRxPkts65to127Octets_WIDTH 32
-#define XgRxPkts128to255Octets_offset 0x58
-#define XgRxPkts128to255Octets_WIDTH 32
-#define XgRxPkts256to511Octets_offset 0x5C
-#define XgRxPkts256to511Octets_WIDTH 32
-#define XgRxPkts512to1023Octets_offset 0x60
-#define XgRxPkts512to1023Octets_WIDTH 32
-#define XgRxPkts1024to15xxOctets_offset 0x64
-#define XgRxPkts1024to15xxOctets_WIDTH 32
-#define XgRxPkts15xxtoMaxOctets_offset 0x68
-#define XgRxPkts15xxtoMaxOctets_WIDTH 32
-#define XgRxLengthError_offset 0x6C
-#define XgRxLengthError_WIDTH 32
-#define XgTxPkts_offset 0x80
-#define XgTxPkts_WIDTH 32
-#define XgTxOctets_offset 0x88
-#define XgTxOctets_WIDTH 48
-#define XgTxMulticastPkts_offset 0x90
-#define XgTxMulticastPkts_WIDTH 32
-#define XgTxBroadcastPkts_offset 0x94
-#define XgTxBroadcastPkts_WIDTH 32
-#define XgTxUnicastPkts_offset 0x98
-#define XgTxUnicastPkts_WIDTH 32
-#define XgTxControlPkts_offset 0x9C
-#define XgTxControlPkts_WIDTH 32
-#define XgTxPausePkts_offset 0xA0
-#define XgTxPausePkts_WIDTH 32
-#define XgTxPkts64Octets_offset 0xA4
-#define XgTxPkts64Octets_WIDTH 32
-#define XgTxPkts65to127Octets_offset 0xA8
-#define XgTxPkts65to127Octets_WIDTH 32
-#define XgTxPkts128to255Octets_offset 0xAC
-#define XgTxPkts128to255Octets_WIDTH 32
-#define XgTxPkts256to511Octets_offset 0xB0
-#define XgTxPkts256to511Octets_WIDTH 32
-#define XgTxPkts512to1023Octets_offset 0xB4
-#define XgTxPkts512to1023Octets_WIDTH 32
-#define XgTxPkts1024to15xxOctets_offset 0xB8
-#define XgTxPkts1024to15xxOctets_WIDTH 32
-#define XgTxPkts1519toMaxOctets_offset 0xBC
-#define XgTxPkts1519toMaxOctets_WIDTH 32
-#define XgTxUndersizePkts_offset 0xC0
-#define XgTxUndersizePkts_WIDTH 32
-#define XgTxOversizePkts_offset 0xC4
-#define XgTxOversizePkts_WIDTH 32
-#define XgTxNonTcpUdpPkt_offset 0xC8
-#define XgTxNonTcpUdpPkt_WIDTH 16
-#define XgTxMacSrcErrPkt_offset 0xCC
-#define XgTxMacSrcErrPkt_WIDTH 16
-#define XgTxIpSrcErrPkt_offset 0xD0
-#define XgTxIpSrcErrPkt_WIDTH 16
-#define XgDmaDone_offset 0xD4
-#define XgDmaDone_WIDTH 32
-
-#define FALCON_STATS_NOT_DONE 0x00000000
-#define FALCON_STATS_DONE 0xffffffff
-
-/**************************************************************************
- *
- * Falcon non-volatile configuration
- *
- **************************************************************************
- */
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
-/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
-struct falcon_nvconfig_board_v2 {
- __le16 nports;
- u8 port0_phy_addr;
- u8 port0_phy_type;
- u8 port1_phy_addr;
- u8 port1_phy_type;
- __le16 asic_sub_revision;
- __le16 board_revision;
-} __packed;
-
-/* Board configuration v3 extra information */
-struct falcon_nvconfig_board_v3 {
- __le32 spi_device_type[2];
-} __packed;
-
-/* Bit numbers for spi_device_type */
-#define SPI_DEV_TYPE_SIZE_LBN 0
-#define SPI_DEV_TYPE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
-#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
-#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
-#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
-#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
-#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
-#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_FIELD(type, field) \
- (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
-
-#define FALCON_NVCONFIG_OFFSET 0x300
-
-#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
-struct falcon_nvconfig {
- efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
- u8 mac_address[2][8]; /* 0x310 */
- efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
- efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
- efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
- efx_oword_t hw_init_reg; /* 0x350 */
- efx_oword_t nic_stat_reg; /* 0x360 */
- efx_oword_t glb_ctl_reg; /* 0x370 */
- efx_oword_t srm_cfg_reg; /* 0x380 */
- efx_oword_t spare_reg; /* 0x390 */
- __le16 board_magic_num; /* 0x3A0 */
- __le16 board_struct_ver;
- __le16 board_checksum;
- struct falcon_nvconfig_board_v2 board_v2;
- efx_oword_t ee_base_page_reg; /* 0x3B0 */
- struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
-} __packed;
-
-#endif /* EFX_REGS_H */
+#endif /* EFX_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
deleted file mode 100644
index 30d744235d2..00000000000
--- a/drivers/net/ethernet/sfc/filter.c
+++ /dev/null
@@ -1,1274 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/in.h>
-#include <net/ip.h>
-#include "efx.h"
-#include "filter.h"
-#include "io.h"
-#include "nic.h"
-#include "regs.h"
-
-/* "Fudge factors" - difference between programmed value and actual depth.
- * Due to pipelined implementation we need to program H/W with a value that
- * is larger than the hop limit we want.
- */
-#define FILTER_CTL_SRCH_FUDGE_WILD 3
-#define FILTER_CTL_SRCH_FUDGE_FULL 1
-
-/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
- * We also need to avoid infinite loops in efx_filter_search() when the
- * table is full.
- */
-#define FILTER_CTL_SRCH_MAX 200
-
-/* Don't try very hard to find space for performance hints, as this is
- * counter-productive. */
-#define FILTER_CTL_SRCH_HINT_MAX 5
-
-enum efx_filter_table_id {
- EFX_FILTER_TABLE_RX_IP = 0,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF,
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_COUNT,
-};
-
-enum efx_filter_index {
- EFX_FILTER_INDEX_UC_DEF,
- EFX_FILTER_INDEX_MC_DEF,
- EFX_FILTER_SIZE_RX_DEF,
-};
-
-struct efx_filter_table {
- enum efx_filter_table_id id;
- u32 offset; /* address of table relative to BAR */
- unsigned size; /* number of entries */
- unsigned step; /* step between entries */
- unsigned used; /* number currently used */
- unsigned long *used_bitmap;
- struct efx_filter_spec *spec;
- unsigned search_depth[EFX_FILTER_TYPE_COUNT];
-};
-
-struct efx_filter_state {
- spinlock_t lock;
- struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
-#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
- unsigned rps_expire_index;
-#endif
-};
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx);
-
-/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
- * key derived from the n-tuple. The initial LFSR state is 0xffff. */
-static u16 efx_filter_hash(u32 key)
-{
- u16 tmp;
-
- /* First 16 rounds */
- tmp = 0x1fff ^ key >> 16;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- tmp = tmp ^ tmp >> 9;
- /* Last 16 rounds */
- tmp = tmp ^ tmp << 13 ^ key;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- return tmp ^ tmp >> 9;
-}
-
-/* To allow for hash collisions, filter search continues at these
- * increments from the first possible entry selected by the hash. */
-static u16 efx_filter_increment(u32 key)
-{
- return key * 2 - 1;
-}
-
-static enum efx_filter_table_id
-efx_filter_spec_table_id(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
- EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
- return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
-}
-
-static struct efx_filter_table *
-efx_filter_spec_table(struct efx_filter_state *state,
- const struct efx_filter_spec *spec)
-{
- if (spec->type == EFX_FILTER_UNSPEC)
- return NULL;
- else
- return &state->table[efx_filter_spec_table_id(spec)];
-}
-
-static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
-{
- memset(table->search_depth, 0, sizeof(table->search_depth));
-}
-
-static void efx_filter_push_rx_config(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t filter_ctl;
-
- efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
-
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
-
- /* There is a single bit to enable RX scatter for all
- * unmatched packets. Only set it if scatter is
- * enabled in both filter specs.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_SCATTER));
- } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* We don't expose 'default' filters because unmatched
- * packets always go to the queue number found in the
- * RSS table. But we still need to set the RX scatter
- * bit here.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- efx->rx_scatter);
- }
-
- efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-}
-
-static void efx_filter_push_tx_limits(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t tx_cfg;
-
- efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
-}
-
-static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
- __be32 host1, __be16 port1,
- __be32 host2, __be16 port2)
-{
- spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
- spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
- spec->data[2] = ntohl(host2);
-}
-
-static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
- __be32 *host1, __be16 *port1,
- __be32 *host2, __be16 *port2)
-{
- *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
- *port1 = htons(spec->data[0]);
- *host2 = htonl(spec->data[2]);
- *port2 = htons(spec->data[1] >> 16);
-}
-
-/**
- * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- */
-int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port)
-{
- __be32 host1;
- __be16 port1;
-
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_WILD;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_WILD;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- /* Filter is constructed in terms of source and destination,
- * with the odd wrinkle that the ports are swapped in a UDP
- * wildcard filter. We need to convert from local and remote
- * (= zero for wildcard) addresses.
- */
- host1 = 0;
- if (proto != IPPROTO_UDP) {
- port1 = 0;
- } else {
- port1 = port;
- port = 0;
- }
-
- __efx_filter_set_ipv4(spec, host1, port1, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port)
-{
- __be32 host1;
- __be16 port1;
-
- switch (spec->type) {
- case EFX_FILTER_TCP_WILD:
- *proto = IPPROTO_TCP;
- __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
- return 0;
- case EFX_FILTER_UDP_WILD:
- *proto = IPPROTO_UDP;
- __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- * @rhost: Remote host address (network byte order)
- * @rport: Remote port (network byte order)
- */
-int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0 || rport == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_FULL;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_FULL;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- __efx_filter_set_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport)
-{
- switch (spec->type) {
- case EFX_FILTER_TCP_FULL:
- *proto = IPPROTO_TCP;
- break;
- case EFX_FILTER_UDP_FULL:
- *proto = IPPROTO_UDP;
- break;
- default:
- return -EINVAL;
- }
-
- __efx_filter_get_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-/**
- * efx_filter_set_eth_local - specify local Ethernet address and optional VID
- * @spec: Specification to initialise
- * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
- * @addr: Local Ethernet MAC address
- */
-int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (vid == EFX_FILTER_VID_UNSPEC) {
- spec->type = EFX_FILTER_MAC_WILD;
- spec->data[0] = 0;
- } else {
- spec->type = EFX_FILTER_MAC_FULL;
- spec->data[0] = vid;
- }
-
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
- return 0;
-}
-
-/**
- * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_uc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_UC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-/**
- * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_mc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_MC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- struct efx_filter_spec *spec = &table->spec[filter_idx];
- enum efx_filter_flags flags = 0;
-
- /* If there's only one channel then disable RSS for non VF
- * traffic, thereby allowing VFs to use RSS when the PF can't.
- */
- if (efx->n_rx_channels > 1)
- flags |= EFX_FILTER_FLAG_RX_RSS;
-
- if (efx->rx_scatter)
- flags |= EFX_FILTER_FLAG_RX_SCATTER;
-
- efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
- spec->type = EFX_FILTER_UC_DEF + filter_idx;
- table->used_bitmap[0] |= 1 << filter_idx;
-}
-
-int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr)
-{
- switch (spec->type) {
- case EFX_FILTER_MAC_WILD:
- *vid = EFX_FILTER_VID_UNSPEC;
- break;
- case EFX_FILTER_MAC_FULL:
- *vid = spec->data[0];
- break;
- default:
- return -EINVAL;
- }
-
- addr[0] = spec->data[2] >> 8;
- addr[1] = spec->data[2];
- addr[2] = spec->data[1] >> 24;
- addr[3] = spec->data[1] >> 16;
- addr[4] = spec->data[1] >> 8;
- addr[5] = spec->data[1];
- return 0;
-}
-
-/* Build a filter entry and return its n-tuple key. */
-static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
-{
- u32 data3;
-
- switch (efx_filter_spec_table_id(spec)) {
- case EFX_FILTER_TABLE_RX_IP: {
- bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
- spec->type == EFX_FILTER_UDP_WILD);
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_BZ_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_BZ_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_BZ_TCP_UDP, is_udp,
- FRF_BZ_RXQ_ID, spec->dmaq_id,
- EFX_DWORD_2, spec->data[2],
- EFX_DWORD_1, spec->data[1],
- EFX_DWORD_0, spec->data[0]);
- data3 = is_udp;
- break;
- }
-
- case EFX_FILTER_TABLE_RX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_CZ_RMFT_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_CZ_RMFT_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
- FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
- FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
- FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild;
- break;
- }
-
- case EFX_FILTER_TABLE_TX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_5(*filter,
- FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
- FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
- FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
- FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild | spec->dmaq_id << 1;
- break;
- }
-
- default:
- BUG();
- }
-
- return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
-}
-
-static bool efx_filter_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if (left->type != right->type ||
- memcmp(left->data, right->data, sizeof(left->data)))
- return false;
-
- if (left->flags & EFX_FILTER_FLAG_TX &&
- left->dmaq_id != right->dmaq_id)
- return false;
-
- return true;
-}
-
-/*
- * Construct/deconstruct external filter IDs. At least the RX filter
- * IDs must be ordered by matching priority, for RX NFC semantics.
- *
- * Deconstruction needs to be robust against invalid IDs so that
- * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
- * accept user-provided IDs.
- */
-
-#define EFX_FILTER_MATCH_PRI_COUNT 5
-
-static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
- [EFX_FILTER_TCP_FULL] = 0,
- [EFX_FILTER_UDP_FULL] = 0,
- [EFX_FILTER_TCP_WILD] = 1,
- [EFX_FILTER_UDP_WILD] = 1,
- [EFX_FILTER_MAC_FULL] = 2,
- [EFX_FILTER_MAC_WILD] = 3,
- [EFX_FILTER_UC_DEF] = 4,
- [EFX_FILTER_MC_DEF] = 4,
-};
-
-static const enum efx_filter_table_id efx_filter_range_table[] = {
- EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
- EFX_FILTER_TABLE_RX_IP,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
- EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
- EFX_FILTER_TABLE_COUNT, /* invalid */
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
-};
-
-#define EFX_FILTER_INDEX_WIDTH 13
-#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
-
-static inline u32
-efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
-{
- unsigned int range;
-
- range = efx_filter_type_match_pri[spec->type];
- if (!(spec->flags & EFX_FILTER_FLAG_RX))
- range += EFX_FILTER_MATCH_PRI_COUNT;
-
- return range << EFX_FILTER_INDEX_WIDTH | index;
-}
-
-static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < ARRAY_SIZE(efx_filter_range_table))
- return efx_filter_range_table[range];
- else
- return EFX_FILTER_TABLE_COUNT; /* invalid */
-}
-
-static inline unsigned int efx_filter_id_index(u32 id)
-{
- return id & EFX_FILTER_INDEX_MASK;
-}
-
-static inline u8 efx_filter_id_flags(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < EFX_FILTER_MATCH_PRI_COUNT)
- return EFX_FILTER_FLAG_RX;
- else
- return EFX_FILTER_FLAG_TX;
-}
-
-u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
- enum efx_filter_table_id table_id;
-
- do {
- table_id = efx_filter_range_table[range];
- if (state->table[table_id].size != 0)
- return range << EFX_FILTER_INDEX_WIDTH |
- state->table[table_id].size;
- } while (range--);
-
- return 0;
-}
-
-/**
- * efx_filter_insert_filter - add or replace a filter
- * @efx: NIC in which to insert the filter
- * @spec: Specification for the filter
- * @replace_equal: Flag for whether the specified filter may replace an
- * existing filter with equal priority
- *
- * On success, return the filter ID.
- * On failure, return a negative error code.
- *
- * If an existing filter has equal match values to the new filter
- * spec, then the new filter might replace it, depending on the
- * relative priorities. If the existing filter has lower priority, or
- * if @replace_equal is set and it has equal priority, then it is
- * replaced. Otherwise the function fails, returning -%EPERM if
- * the existing filter has higher priority or -%EEXIST if it has
- * equal priority.
- */
-s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace_equal)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- efx_oword_t filter;
- int rep_index, ins_index;
- unsigned int depth = 0;
- int rc;
-
- if (!table || table->size == 0)
- return -EINVAL;
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: type %d search_depth=%d", __func__, spec->type,
- table->search_depth[spec->type]);
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* One filter spec per type */
- BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
- BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
- EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- rep_index = spec->type - EFX_FILTER_UC_DEF;
- ins_index = rep_index;
-
- spin_lock_bh(&state->lock);
- } else {
- /* Search concurrently for
- * (1) a filter to be replaced (rep_index): any filter
- * with the same match values, up to the current
- * search depth for this type, and
- * (2) the insertion point (ins_index): (1) or any
- * free slot before it or up to the maximum search
- * depth for this priority
- * We fail if we cannot find (2).
- *
- * We can stop once either
- * (a) we find (1), in which case we have definitely
- * found (2) as well; or
- * (b) we have searched exhaustively for (1), and have
- * either found (2) or searched exhaustively for it
- */
- u32 key = efx_filter_build(&filter, spec);
- unsigned int hash = efx_filter_hash(key);
- unsigned int incr = efx_filter_increment(key);
- unsigned int max_rep_depth = table->search_depth[spec->type];
- unsigned int max_ins_depth =
- spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
- unsigned int i = hash & (table->size - 1);
-
- ins_index = -1;
- depth = 1;
-
- spin_lock_bh(&state->lock);
-
- for (;;) {
- if (!test_bit(i, table->used_bitmap)) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_filter_equal(spec, &table->spec[i])) {
- /* Case (a) */
- if (ins_index < 0)
- ins_index = i;
- rep_index = i;
- break;
- }
-
- if (depth >= max_rep_depth &&
- (ins_index >= 0 || depth >= max_ins_depth)) {
- /* Case (b) */
- if (ins_index < 0) {
- rc = -EBUSY;
- goto out;
- }
- rep_index = -1;
- break;
- }
-
- i = (i + incr) & (table->size - 1);
- ++depth;
- }
- }
-
- /* If we found a filter to be replaced, check whether we
- * should do so
- */
- if (rep_index >= 0) {
- struct efx_filter_spec *saved_spec = &table->spec[rep_index];
-
- if (spec->priority == saved_spec->priority && !replace_equal) {
- rc = -EEXIST;
- goto out;
- }
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto out;
- }
- }
-
- /* Insert the filter */
- if (ins_index != rep_index) {
- __set_bit(ins_index, table->used_bitmap);
- ++table->used;
- }
- table->spec[ins_index] = *spec;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- efx_filter_push_rx_config(efx);
- } else {
- if (table->search_depth[spec->type] < depth) {
- table->search_depth[spec->type] = depth;
- if (spec->flags & EFX_FILTER_FLAG_TX)
- efx_filter_push_tx_limits(efx);
- else
- efx_filter_push_rx_config(efx);
- }
-
- efx_writeo(efx, &filter,
- table->offset + table->step * ins_index);
-
- /* If we were able to replace a filter by inserting
- * at a lower depth, clear the replaced filter
- */
- if (ins_index != rep_index && rep_index >= 0)
- efx_filter_table_clear_entry(efx, table, rep_index);
- }
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: filter type %d index %d rxq %u set",
- __func__, spec->type, ins_index, spec->dmaq_id);
- rc = efx_filter_make_id(spec, ins_index);
-
-out:
- spin_unlock_bh(&state->lock);
- return rc;
-}
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx)
-{
- static efx_oword_t filter;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* RX default filters must always exist */
- efx_filter_reset_rx_def(efx, filter_idx);
- efx_filter_push_rx_config(efx);
- } else if (test_bit(filter_idx, table->used_bitmap)) {
- __clear_bit(filter_idx, table->used_bitmap);
- --table->used;
- memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
-
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
-}
-
-/**
- * efx_filter_remove_id_safe - remove a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- struct efx_filter_spec *spec;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-/**
- * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- * @spec: Buffer in which to store filter specification
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *spec_buf)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- *spec_buf = *spec;
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-static void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[table_id];
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- if (table->spec[filter_idx].priority <= priority)
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
-}
-
-/**
- * efx_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
-{
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
-}
-
-u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- u32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority)
- ++count;
- }
- }
-
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- s32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority) {
- if (count == size) {
- count = -EMSGSIZE;
- goto out;
- }
- buf[count++] = efx_filter_make_id(
- &table->spec[filter_idx], filter_idx);
- }
- }
- }
-out:
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-/* Restore filter stater after reset */
-void efx_restore_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
-
- /* Check whether this is a regular register table */
- if (table->step == 0)
- continue;
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap))
- continue;
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
- efx_filter_push_tx_limits(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-int efx_probe_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state;
- struct efx_filter_table *table;
- unsigned table_id;
-
- state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
- efx->filter_state = state;
-
- spin_lock_init(&state->lock);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
-#ifdef CONFIG_RFS_ACCEL
- state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
- sizeof(*state->rps_flow_id),
- GFP_KERNEL);
- if (!state->rps_flow_id)
- goto fail;
-#endif
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- table->id = EFX_FILTER_TABLE_RX_IP;
- table->offset = FR_BZ_RX_FILTER_TBL0;
- table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
- table->step = FR_BZ_RX_FILTER_TBL0_STEP;
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- table->id = EFX_FILTER_TABLE_RX_MAC;
- table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
- table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- table->id = EFX_FILTER_TABLE_RX_DEF;
- table->size = EFX_FILTER_SIZE_RX_DEF;
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- table->id = EFX_FILTER_TABLE_TX_MAC;
- table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
- table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
- }
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
- if (table->size == 0)
- continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!table->used_bitmap)
- goto fail;
- table->spec = vzalloc(table->size * sizeof(*table->spec));
- if (!table->spec)
- goto fail;
- }
-
- if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
- /* RX default filters must always exist */
- unsigned i;
- for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
- efx_filter_reset_rx_def(efx, i);
- }
-
- efx_filter_push_rx_config(efx);
-
- return 0;
-
-fail:
- efx_remove_filters(efx);
- return -ENOMEM;
-}
-
-void efx_remove_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
- vfree(state->table[table_id].spec);
- }
-#ifdef CONFIG_RFS_ACCEL
- kfree(state->rps_flow_id);
-#endif
- kfree(state);
-}
-
-/* Update scatter enable flags for filters pointing to our own RX queues */
-void efx_filter_update_rx_scatter(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap) ||
- table->spec[filter_idx].dmaq_id >=
- efx->n_rx_channels)
- continue;
-
- if (efx->rx_scatter)
- table->spec[filter_idx].flags |=
- EFX_FILTER_FLAG_RX_SCATTER;
- else
- table->spec[filter_idx].flags &=
- ~EFX_FILTER_FLAG_RX_SCATTER;
-
- if (table_id == EFX_FILTER_TABLE_RX_DEF)
- /* Pushed by efx_filter_push_rx_config() */
- continue;
-
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-#ifdef CONFIG_RFS_ACCEL
-
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_spec spec;
- const struct iphdr *ip;
- const __be16 *ports;
- int nhoff;
- int rc;
-
- nhoff = skb_network_offset(skb);
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(struct vlan_hdr));
- if (((const struct vlan_hdr *)skb->data + nhoff)->
- h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -EPROTONOSUPPORT;
-
- /* This is IP over 802.1q VLAN. We can't filter on the
- * IP 5-tuple and the vlan together, so just strip the
- * vlan header and filter on the IP part.
- */
- nhoff += sizeof(struct vlan_hdr);
- } else if (skb->protocol != htons(ETH_P_IP)) {
- return -EPROTONOSUPPORT;
- }
-
- /* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
-
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
- efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
- rxq_index);
- rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
- ip->daddr, ports[1], ip->saddr, ports[0]);
- if (rc)
- return rc;
-
- rc = efx_filter_insert_filter(efx, &spec, true);
- if (rc < 0)
- return rc;
-
- /* Remember this so we can check whether to expire the filter later */
- state->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
- ++channel->rfs_filters_added;
-
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
- &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
- rxq_index, flow_id, rc);
-
- return rc;
-}
-
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
- unsigned mask = table->size - 1;
- unsigned index;
- unsigned stop;
-
- if (!spin_trylock_bh(&state->lock))
- return false;
-
- index = state->rps_expire_index;
- stop = (index + quota) & mask;
-
- while (index != stop) {
- if (test_bit(index, table->used_bitmap) &&
- table->spec[index].priority == EFX_FILTER_PRI_HINT &&
- rps_may_expire_flow(efx->net_dev,
- table->spec[index].dmaq_id,
- state->rps_flow_id[index], index)) {
- netif_info(efx, rx_status, efx->net_dev,
- "expiring filter %d [flow %u]\n",
- index, state->rps_flow_id[index]);
- efx_filter_table_clear_entry(efx, table, index);
- }
- index = (index + 1) & mask;
- }
-
- state->rps_expire_index = stop;
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
- return true;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 5cb54723b82..63c77a55717 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,32 +11,49 @@
#define EFX_FILTER_H
#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
/**
- * enum efx_filter_type - type of hardware filter
- * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
- * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
- * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
- * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
- * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
- * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
- * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
- * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
- * @EFX_FILTER_UNSPEC: Match type is unspecified
+ * enum efx_filter_match_flags - Flags for hardware filter match type
+ * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * Used for RX default unicast and multicast/broadcast filters.
*
- * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ * local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ * or local 2-tuple, or local MAC with or without outer VID, and RX
+ * default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ * with or without outer and inner VID
*/
-enum efx_filter_type {
- EFX_FILTER_TCP_FULL = 0,
- EFX_FILTER_TCP_WILD,
- EFX_FILTER_UDP_FULL,
- EFX_FILTER_UDP_WILD,
- EFX_FILTER_MAC_FULL = 4,
- EFX_FILTER_MAC_WILD,
- EFX_FILTER_UC_DEF = 8,
- EFX_FILTER_MC_DEF,
- EFX_FILTER_TYPE_COUNT, /* number of specific types */
- EFX_FILTER_UNSPEC = 0xf,
+enum efx_filter_match_flags {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001,
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002,
+ EFX_FILTER_MATCH_REM_MAC = 0x0004,
+ EFX_FILTER_MATCH_REM_PORT = 0x0008,
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010,
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020,
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
+ EFX_FILTER_MATCH_INNER_VID = 0x0080,
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100,
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200,
+ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
};
/**
@@ -61,37 +78,75 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
+ * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
+ * network stack. The filter must have a priority of
+ * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
+ * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
+ * request with priority %EFX_FILTER_PRI_MANUAL will reset the
+ * steering (but not remove the filter).
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_STACK = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
/**
* struct efx_filter_spec - specification for a hardware filter
- * @type: Type of match to be performed, from &enum efx_filter_type
+ * @match_flags: Match type flags, from &enum efx_filter_match_flags
* @priority: Priority of the filter, from &enum efx_filter_priority
* @flags: Miscellaneous flags, from &enum efx_filter_flags
- * @dmaq_id: Source/target queue index
- * @data: Match data (type-dependent)
+ * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
+ * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
+ * an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
+ * %EFX_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
+ * is set
+ * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
*
- * Use the efx_filter_set_*() functions to initialise the @type and
- * @data fields.
+ * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
+ * used to initialise the structure. The efx_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
- * depends on the filter type.
+ * depends on which fields are matched.
*/
struct efx_filter_spec {
- u8 type:4;
- u8 priority:4;
- u8 flags;
- u16 dmaq_id;
- u32 data[3];
+ u32 match_flags:12;
+ u32 priority:2;
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ __be16 ether_type;
+ u8 ip_proto;
+ __be32 loc_host[4];
+ __be32 rem_host[4];
+ __be16 loc_port;
+ __be16 rem_port;
+ /* total 64 bytes */
+};
+
+enum {
+ EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
+ EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
};
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
enum efx_filter_flags flags,
unsigned rxq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = priority;
spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
spec->dmaq_id = rxq_id;
}
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
unsigned txq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = EFX_FILTER_PRI_REQUIRED;
spec->flags = EFX_FILTER_FLAG_TX;
spec->dmaq_id = txq_id;
}
-extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port);
-extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port);
-extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport);
-extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport);
-extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr);
-extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr);
-extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
-extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = host;
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 lhost, __be16 lport,
+ __be32 rhost, __be16 rport)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = lhost;
+ spec->loc_port = lport;
+ spec->rem_host[0] = rhost;
+ spec->rem_port = rport;
+ return 0;
+}
+
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
+ return -EINVAL;
+
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->outer_vid = htons(vid);
+ }
+ if (addr != NULL) {
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->loc_mac, addr, ETH_ALEN);
+ }
+ return 0;
+}
+
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ spec->loc_mac[0] = 1;
+ return 0;
+}
+
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96759aee1c6..96ce507d860 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -20,7 +20,7 @@
*
**************************************************************************
*
- * Notes on locking strategy:
+ * Notes on locking strategy for the Falcon architecture:
*
* Many CSRs are very wide and cannot be read or written atomically.
* Writes from the host are buffered by the Bus Interface Unit (BIU)
@@ -54,6 +54,12 @@
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
*/
#if BITS_PER_LONG == 64
@@ -83,7 +89,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
}
/* Write a normal 128-bit CSR, locking as appropriate. */
-static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
@@ -108,7 +114,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
- efx_qword_t *value, unsigned int index)
+ const efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
@@ -129,7 +135,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
-static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
@@ -190,8 +196,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
}
/* Write a 128-bit CSR forming part of a table */
-static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
- unsigned int reg, unsigned int index)
+static inline void
+efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
@@ -203,12 +210,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Page-mapped register block size */
-#define EFX_PAGE_BLOCK_SIZE 0x2000
+/* Page size used as step between per-VI registers */
+#define EFX_VI_PAGE_SIZE 0x2000
-/* Calculate offset to page-mapped register block */
+/* Calculate offset to page-mapped register */
#define EFX_PAGED_REG(page, reg) \
- ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+ ((page) * EFX_VI_PAGE_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
@@ -236,19 +243,24 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
-/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
- * RX_DESC_UPD or TX_DESC_UPD)
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
*/
-static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int page)
+static inline void
+_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg, unsigned int page)
{
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
- BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
- && (reg) != 0xa1c), \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ (reg) != 0x420 && \
+ (reg) != 0x830 && \
+ (reg) != 0x83c && \
+ (reg) != 0xa18 && \
+ (reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
@@ -256,7 +268,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
* collector register.
*/
static inline void _efx_writed_page_locked(struct efx_nic *efx,
- efx_dword_t *value,
+ const efx_dword_t *value,
unsigned int reg,
unsigned int page)
{
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 97dd8f18c00..c082562dbf4 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -8,10 +8,11 @@
*/
#include <linux/delay.h>
+#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "mcdi_pcol.h"
#include "phy.h"
@@ -24,112 +25,235 @@
#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_PDU(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
-#define MCDI_DOORBELL(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
-#define MCDI_STATUS(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
-
/* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot
- * via these mechanisms then wait 10ms for the status word to be set. */
+ * via these mechanisms then wait 250ms for the status word to be set.
+ */
#define MCDI_STATUS_DELAY_US 100
-#define MCDI_STATUS_DELAY_COUNT 100
+#define MCDI_STATUS_DELAY_COUNT 2500
#define MCDI_STATUS_SLEEP_MS \
(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
#define SEQ_MASK \
EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+struct efx_mcdi_async_param {
+ struct list_head list;
+ unsigned int cmd;
+ size_t inlen;
+ size_t outlen;
+ efx_mcdi_async_completer *complete;
+ unsigned long cookie;
+ /* followed by request/response buffer */
+};
+
+static void efx_mcdi_timeout_async(unsigned long context);
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
+
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->mcdi;
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->iface;
}
-void efx_mcdi_init(struct efx_nic *efx)
+int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
+ bool already_attached;
+ int rc;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return;
+ efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
+ if (!efx->mcdi)
+ return -ENOMEM;
mcdi = efx_mcdi(efx);
+ mcdi->efx = efx;
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ mcdi->state = MCDI_STATE_QUIESCENT;
mcdi->mode = MCDI_MODE_POLL;
+ spin_lock_init(&mcdi->async_lock);
+ INIT_LIST_HEAD(&mcdi->async_list);
+ setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
+ (unsigned long)mcdi);
(void) efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Let the MC (and BMC, if this is a LOM) know that the driver
+ * is loaded. We should do this before we reset the NIC.
+ */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ return rc;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ return 0;
}
-static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen)
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
+
+ BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
+
+ /* Relinquish the device (back to the BMC, if this is a LOM) */
+ efx_mcdi_drv_attach(efx, false, NULL);
+
+ kfree(efx->mcdi);
+}
+
+static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
- unsigned int i;
- efx_dword_t hdr;
+ efx_dword_t hdr[2];
+ size_t hdr_len;
u32 xflags, seqno;
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
+ BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ spin_unlock_bh(&mcdi->iface_lock);
seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
- EFX_POPULATE_DWORD_6(hdr,
- MCDI_HEADER_RESPONSE, 0,
- MCDI_HEADER_RESYNC, 1,
- MCDI_HEADER_CODE, cmd,
- MCDI_HEADER_DATALEN, inlen,
- MCDI_HEADER_SEQ, seqno,
- MCDI_HEADER_XFLAGS, xflags);
-
- efx_writed(efx, &hdr, pdu);
+ if (efx->type->mcdi_max_ver == 1) {
+ /* MCDI v1 */
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ hdr_len = 4;
+ } else {
+ /* MCDI v2 */
+ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
+ hdr_len = 8;
+ }
- for (i = 0; i < inlen; i += 4)
- _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+ efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
- /* Ensure the payload is written out before the header */
- wmb();
+ mcdi->new_epoch = false;
+}
- /* ring the doorbell with a distinctive value */
- _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+static int efx_mcdi_errno(unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ return 0;
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ return -name;
+ TRANSLATE_ERROR(EPERM);
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EAGAIN);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+ TRANSLATE_ERROR(EALREADY);
+ TRANSLATE_ERROR(ENOSPC);
+#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EPROTO;
+ }
}
-static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- int i;
+ unsigned int respseq, respcmd, error;
+ efx_dword_t hdr;
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
+ efx->type->mcdi_read_response(efx, &hdr, 0, 4);
+ respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
+
+ if (respcmd != MC_CMD_V2_EXTN) {
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
+ } else {
+ efx->type->mcdi_read_response(efx, &hdr, 4, 4);
+ mcdi->resp_hdr_len = 8;
+ mcdi->resp_data_len =
+ EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
- for (i = 0; i < outlen; i += 4)
- *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+ if (error && mcdi->resp_data_len == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ mcdi->resprc = -EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ mcdi->resprc = -EIO;
+ } else if (error) {
+ efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
+ mcdi->resprc =
+ efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
+ } else {
+ mcdi->resprc = 0;
+ }
}
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned long time, finish;
- unsigned int respseq, respcmd, error;
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned int rc, spins;
- efx_dword_t reg;
+ unsigned int spins;
+ int rc;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
- rc = -efx_mcdi_poll_reboot(efx);
- if (rc)
- goto out;
+ rc = efx_mcdi_poll_reboot(efx);
+ if (rc) {
+ spin_lock_bh(&mcdi->iface_lock);
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ spin_unlock_bh(&mcdi->iface_lock);
+ return 0;
+ }
/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
* because generally mcdi responses are fast. After that, back off
@@ -149,59 +273,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
rmb();
- efx_readd(efx, &reg, pdu);
-
- /* All 1's indicates that shared memory is in reset (and is
- * not a valid header). Wait for it to come out reset before
- * completing the command */
- if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
- EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ if (efx->type->mcdi_poll_response(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
- mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
- respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
- respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
- error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
-
- if (error && mcdi->resplen == 0) {
- netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
- rc = EIO;
- } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
- netif_err(efx, hw, efx->net_dev,
- "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
- respseq, mcdi->seqno);
- rc = EIO;
- } else if (error) {
- efx_readd(efx, &reg, pdu + 4);
- switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
-#define TRANSLATE_ERROR(name) \
- case MC_CMD_ERR_ ## name: \
- rc = name; \
- break
- TRANSLATE_ERROR(ENOENT);
- TRANSLATE_ERROR(EINTR);
- TRANSLATE_ERROR(EACCES);
- TRANSLATE_ERROR(EBUSY);
- TRANSLATE_ERROR(EINVAL);
- TRANSLATE_ERROR(EDEADLK);
- TRANSLATE_ERROR(ENOSYS);
- TRANSLATE_ERROR(ETIME);
-#undef TRANSLATE_ERROR
- default:
- rc = EIO;
- break;
- }
- } else
- rc = 0;
-
-out:
- mcdi->resprc = rc;
- if (rc)
- mcdi->resplen = 0;
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
/* Return rc=0 like wait_event_timeout() */
return 0;
@@ -212,52 +293,36 @@ out:
*/
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
- efx_dword_t reg;
- uint32_t value;
-
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return false;
-
- efx_readd(efx, &reg, addr);
- value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
-
- if (value == 0)
+ if (!efx->mcdi)
return 0;
- /* MAC statistics have been cleared on the NIC; clear our copy
- * so that efx_update_diff_stat() can continue to work.
- */
- memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
-
- EFX_ZERO_DWORD(reg);
- efx_writed(efx, &reg, addr);
+ return efx->type->mcdi_poll_reboot(efx);
+}
- if (value == MC_STATUS_DWORD_ASSERT)
- return -EINTR;
- else
- return -EIO;
+static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
+{
+ return cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
+ MCDI_STATE_QUIESCENT;
}
-static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
+static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
{
/* Wait until the interface becomes QUIESCENT and we win the race
- * to mark it RUNNING. */
+ * to mark it RUNNING_SYNC.
+ */
wait_event(mcdi->wq,
- atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING)
- == MCDI_STATE_QUIESCENT);
+ cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
+ MCDI_STATE_QUIESCENT);
}
static int efx_mcdi_await_completion(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- if (wait_event_timeout(
- mcdi->wq,
- atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
- MCDI_RPC_TIMEOUT) == 0)
+ if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
+ MCDI_RPC_TIMEOUT) == 0)
return -ETIMEDOUT;
/* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -274,17 +339,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
return 0;
}
-static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
+/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
+ * requester. Return whether this was done. Does not take any locks.
+ */
+static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
{
- /* If the interface is RUNNING, then move to COMPLETED and wake any
- * waiters. If the interface isn't in RUNNING then we've received a
- * duplicate completion after we've already transitioned back to
- * QUIESCENT. [A subsequent invocation would increment seqno, so would
- * have failed the seqno check].
- */
- if (atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_RUNNING,
- MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
+ MCDI_STATE_RUNNING_SYNC) {
wake_up(&mcdi->wq);
return true;
}
@@ -294,12 +356,93 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
{
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ struct efx_mcdi_async_param *async;
+ struct efx_nic *efx = mcdi->efx;
+
+ /* Process the asynchronous request queue */
+ spin_lock_bh(&mcdi->async_lock);
+ async = list_first_entry_or_null(
+ &mcdi->async_list, struct efx_mcdi_async_param, list);
+ if (async) {
+ mcdi->state = MCDI_STATE_RUNNING_ASYNC;
+ efx_mcdi_send_request(efx, async->cmd,
+ (const efx_dword_t *)(async + 1),
+ async->inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ spin_unlock_bh(&mcdi->async_lock);
+
+ if (async)
+ return;
+ }
+
+ mcdi->state = MCDI_STATE_QUIESCENT;
wake_up(&mcdi->wq);
}
+/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
+ * asynchronous completion function, and release the interface.
+ * Return whether this was done. Must be called in bh-disabled
+ * context. Will take iface_lock and async_lock.
+ */
+static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
+{
+ struct efx_nic *efx = mcdi->efx;
+ struct efx_mcdi_async_param *async;
+ size_t hdr_len, data_len;
+ efx_dword_t *outbuf;
+ int rc;
+
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
+ MCDI_STATE_RUNNING_ASYNC)
+ return false;
+
+ spin_lock(&mcdi->iface_lock);
+ if (timeout) {
+ /* Ensure that if the completion event arrives later,
+ * the seqno check in efx_mcdi_ev_cpl() will fail
+ */
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ rc = -ETIMEDOUT;
+ hdr_len = 0;
+ data_len = 0;
+ } else {
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ }
+ spin_unlock(&mcdi->iface_lock);
+
+ /* Stop the timer. In case the timer function is running, we
+ * must wait for it to return so that there is no possibility
+ * of it aborting the next request.
+ */
+ if (!timeout)
+ del_timer_sync(&mcdi->async_timer);
+
+ spin_lock(&mcdi->async_lock);
+ async = list_first_entry(&mcdi->async_list,
+ struct efx_mcdi_async_param, list);
+ list_del(&async->list);
+ spin_unlock(&mcdi->async_lock);
+
+ outbuf = (efx_dword_t *)(async + 1);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(async->outlen, data_len));
+ async->complete(efx, async->cookie, rc, outbuf, data_len);
+ kfree(async);
+
+ efx_mcdi_release(mcdi);
+
+ return true;
+}
+
static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
- unsigned int datalen, unsigned int errno)
+ unsigned int datalen, unsigned int mcdi_err)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool wake = false;
@@ -315,52 +458,161 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
"MC response mismatch tx seq 0x%x rx "
"seq 0x%x\n", seqno, mcdi->seqno);
} else {
- mcdi->resprc = errno;
- mcdi->resplen = datalen;
+ if (efx->type->mcdi_max_ver >= 2) {
+ /* MCDI v2 responses don't fit in an event */
+ efx_mcdi_read_response_header(efx);
+ } else {
+ mcdi->resprc = efx_mcdi_errno(mcdi_err);
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = datalen;
+ }
wake = true;
}
spin_unlock(&mcdi->iface_lock);
- if (wake)
- efx_mcdi_complete(mcdi);
+ if (wake) {
+ if (!efx_mcdi_complete_async(mcdi, false))
+ (void) efx_mcdi_complete_sync(mcdi);
+
+ /* If the interface isn't RUNNING_ASYNC or
+ * RUNNING_SYNC then we've received a duplicate
+ * completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment
+ * seqno, so would have failed the seqno check].
+ */
+ }
+}
+
+static void efx_mcdi_timeout_async(unsigned long context)
+{
+ struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
+
+ efx_mcdi_complete_async(mcdi, true);
+}
+
+static int
+efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
+{
+ if (efx->type->mcdi_max_ver < 0 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
+ return -EINVAL;
+
+ if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ inlen > MCDI_CTL_SDU_LEN_MAX_V1))
+ return -EMSGSIZE;
+
+ return 0;
}
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc)
+ return rc;
return efx_mcdi_rpc_finish(efx, cmd, inlen,
outbuf, outlen, outlen_actual);
}
-void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen)
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ return 0;
+}
- /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- spin_unlock_bh(&mcdi->iface_lock);
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ struct efx_mcdi_async_param *async;
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
+ GFP_ATOMIC);
+ if (!async)
+ return -ENOMEM;
+
+ async->cmd = cmd;
+ async->inlen = inlen;
+ async->outlen = outlen;
+ async->complete = complete;
+ async->cookie = cookie;
+ memcpy(async + 1, inbuf, inlen);
+
+ spin_lock_bh(&mcdi->async_lock);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ list_add_tail(&async->list, &mcdi->async_list);
+
+ /* If this is at the front of the queue, try to start it
+ * immediately
+ */
+ if (mcdi->async_list.next == &async->list &&
+ efx_mcdi_acquire_async(mcdi)) {
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ } else {
+ kfree(async);
+ rc = -ENETDOWN;
+ }
+
+ spin_unlock_bh(&mcdi->async_lock);
- efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+ return rc;
}
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen, size_t *outlen_actual)
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
-
if (mcdi->mode == MCDI_MODE_POLL)
rc = efx_mcdi_poll(efx);
else
@@ -380,22 +632,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
"MC command 0x%x inlen %d mode %d timed out\n",
cmd, (int)inlen, mcdi->mode);
} else {
- size_t resplen;
+ size_t hdr_len, data_len;
/* At the very least we need a memory barrier here to ensure
* we pick up changes from efx_mcdi_ev_cpl(). Protect against
* a spurious efx_mcdi_ev_cpl() running concurrently by
* acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock);
- rc = -mcdi->resprc;
- resplen = mcdi->resplen;
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
spin_unlock_bh(&mcdi->iface_lock);
+ BUG_ON(rc > 0);
+
if (rc == 0) {
- efx_mcdi_copyout(efx, outbuf,
- min(outlen, mcdi->resplen + 3) & ~0x3);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
if (outlen_actual != NULL)
- *outlen_actual = resplen;
+ *outlen_actual = data_len;
} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
; /* Don't reset if MC_CMD_REBOOT returns EIO */
else if (rc == -EIO || rc == -EINTR) {
@@ -410,6 +665,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
if (rc == -EIO || rc == -EINTR) {
msleep(MCDI_STATUS_SLEEP_MS);
efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
}
}
@@ -417,11 +673,15 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
return rc;
}
+/* Switch to polled MCDI completions. This can be called in various
+ * error conditions with various locks held, so it must be lockless.
+ * Caller is responsible for flushing asynchronous requests later.
+ */
void efx_mcdi_mode_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -434,18 +694,57 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
* efx_mcdi_await_completion() will then call efx_mcdi_poll().
*
* We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
- * which efx_mcdi_complete() provides for us.
+ * which efx_mcdi_complete_sync() provides for us.
*/
mcdi->mode = MCDI_MODE_POLL;
- efx_mcdi_complete(mcdi);
+ efx_mcdi_complete_sync(mcdi);
+}
+
+/* Flush any running or queued asynchronous requests, after event processing
+ * is stopped
+ */
+void efx_mcdi_flush_async(struct efx_nic *efx)
+{
+ struct efx_mcdi_async_param *async, *next;
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ /* We must be in polling mode so no more requests can be queued */
+ BUG_ON(mcdi->mode != MCDI_MODE_POLL);
+
+ del_timer_sync(&mcdi->async_timer);
+
+ /* If a request is still running, make sure we give the MC
+ * time to complete it so that the response won't overwrite our
+ * next request.
+ */
+ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
+ efx_mcdi_poll(efx);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ }
+
+ /* Nothing else will access the async list now, so it is safe
+ * to walk it without holding async_lock. If we hold it while
+ * calling a completer then lockdep may warn that we have
+ * acquired locks in the wrong order.
+ */
+ list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ list_del(&async->list);
+ kfree(async);
+ }
}
void efx_mcdi_mode_event(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -460,7 +759,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
* write memory barrier ensure that efx_mcdi_rpc() sees it, which
* efx_mcdi_acquire() provides.
*/
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
mcdi->mode = MCDI_MODE_EVENTS;
efx_mcdi_release(mcdi);
}
@@ -477,68 +776,45 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
* are sent to the same queue, we can't be racing with
* efx_mcdi_ev_cpl()]
*
- * There's a race here with efx_mcdi_rpc(), because we might receive
- * a REBOOT event *before* the request has been copied out. In polled
- * mode (during startup) this is irrelevant, because efx_mcdi_complete()
- * is ignored. In event mode, this condition is just an edge-case of
- * receiving a REBOOT event after posting the MCDI request. Did the mc
- * reboot before or after the copyout? The best we can do always is
- * just return failure.
+ * If there is an outstanding asynchronous request, we can't
+ * complete it now (efx_mcdi_complete() would deadlock). The
+ * reset process will take care of this.
+ *
+ * There's a race here with efx_mcdi_send_request(), because
+ * we might receive a REBOOT event *before* the request has
+ * been copied out. In polled mode (during startup) this is
+ * irrelevant, because efx_mcdi_complete_sync() is ignored. In
+ * event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI
+ * request. Did the mc reboot before or after the copyout? The
+ * best we can do always is just return failure.
*/
spin_lock(&mcdi->iface_lock);
- if (efx_mcdi_complete(mcdi)) {
+ if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
- mcdi->resplen = 0;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
++mcdi->credits;
}
} else {
int count;
- /* Nobody was waiting for an MCDI request, so trigger a reset */
- efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
-
/* Consume the status word since efx_mcdi_rpc_finish() won't */
for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
if (efx_mcdi_poll_reboot(efx))
break;
udelay(MCDI_STATUS_DELAY_US);
}
+ mcdi->new_epoch = true;
+
+ /* Nobody was waiting for an MCDI request, so trigger a reset */
+ efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
}
spin_unlock(&mcdi->iface_lock);
}
-static unsigned int efx_mcdi_event_link_speed[] = {
- [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
- [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
- [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
-};
-
-
-static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
-{
- u32 flags, fcntl, speed, lpa;
-
- speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
- speed = efx_mcdi_event_link_speed[speed];
-
- flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
- fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
- lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
-
- /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
- * which is only run after flushing the event queues. Therefore, it
- * is safe to modify the link state outside of the mac_lock here.
- */
- efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
-
- efx_mcdi_phy_check_fcntl(efx, lpa);
-
- efx_link_status_changed(efx);
-}
-
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -551,7 +827,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_BADSSERT:
netif_err(efx, hw, efx->net_dev,
"MC watchdog or assertion failure at 0x%x\n", data);
- efx_mcdi_ev_death(efx, EINTR);
+ efx_mcdi_ev_death(efx, -EINTR);
break;
case MCDI_EVENT_CODE_PMNOTICE:
@@ -576,8 +852,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
"MC Scheduler error address=0x%x\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
+ case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
- efx_mcdi_ev_death(efx, EIO);
+ efx_mcdi_ev_death(efx, -EIO);
break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
@@ -590,7 +867,27 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
-
+ case MCDI_EVENT_CODE_TX_FLUSH:
+ case MCDI_EVENT_CODE_RX_FLUSH:
+ /* Two flush events will be sent: one to the same event
+ * queue as completions, and one to event queue 0.
+ * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
+ * flag will be set, and we should ignore the event
+ * because we want to wait for all completions.
+ */
+ BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
+ MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
+ if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
+ efx_ef10_handle_drain_event(efx);
+ break;
+ case MCDI_EVENT_CODE_TX_ERR:
+ case MCDI_EVENT_CODE_RX_ERR:
+ netif_err(efx, hw, efx->net_dev,
+ "%s DMA error (event: "EFX_QWORD_FMT")\n",
+ code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
+ EFX_QWORD_VAL(*event));
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
code);
@@ -606,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
+ MCDI_DECLARE_BUF(outbuf,
+ max(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_CAPABILITIES_OUT_LEN));
size_t outlength;
const __le16 *ver_words;
+ size_t offset;
int rc;
BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
-
rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc)
goto fail;
-
if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
rc = -EIO;
goto fail;
}
ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
- snprintf(buf, len, "%u.%u.%u.%u",
- le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
- le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ offset = snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+
+ /* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
+ offset += snprintf(
+ buf + offset, len - offset, " rx? tx?");
+ else
+ offset += snprintf(
+ buf + offset, len - offset, " rx%x tx%x",
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
+ }
+
return;
fail:
@@ -634,17 +959,18 @@ fail:
buf[0] = 0;
}
-int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached)
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
{
- u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
- u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
driver_operating ? 1 : 0);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -667,8 +993,8 @@ fail:
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
- size_t outlen, offset, i;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
+ size_t outlen, i;
int port_num = efx_port_num(efx);
int rc;
@@ -684,22 +1010,21 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
}
- offset = (port_num)
- ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
- : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
if (mac_address)
- memcpy(mac_address, outbuf + offset, ETH_ALEN);
+ memcpy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
+ ETH_ALEN);
if (fw_subtype_list) {
- /* Byte-swap and truncate or zero-pad as necessary */
- offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
for (i = 0;
- i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
- i++) {
- fw_subtype_list[i] =
- (offset + 2 <= outlen) ?
- le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
- offset += 2;
- }
+ i < MCDI_VAR_ARRAY_LEN(outlen,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ i++)
+ fw_subtype_list[i] = MCDI_ARRAY_WORD(
+ outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
+ for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
+ fw_subtype_list[i] = 0;
}
if (capabilities) {
if (port_num)
@@ -721,7 +1046,7 @@ fail:
int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
{
- u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
u32 dest = 0;
int rc;
@@ -749,7 +1074,7 @@ fail:
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
{
- u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
size_t outlen;
int rc;
@@ -777,8 +1102,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out)
{
- u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
size_t outlen;
int rc;
@@ -804,127 +1129,10 @@ fail:
return rc;
}
-int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
- memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
{
- u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
@@ -976,9 +1184,9 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
- unsigned int flags, index, ofst;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ unsigned int flags, index;
const char *reason;
size_t outlen;
int retry;
@@ -1020,19 +1228,20 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
- ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
- for (index = 1; index < 32; index++) {
- netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(outbuf, ofst));
- ofst += sizeof(efx_dword_t);
- }
+ for (index = 0;
+ index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++)
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
+ 1 + index,
+ MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
+ index));
return 0;
}
static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
@@ -1062,7 +1271,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
- u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
int rc;
BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
@@ -1080,7 +1289,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
__func__, rc);
}
-int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_port(struct efx_nic *efx)
{
int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
if (rc)
@@ -1089,9 +1298,9 @@ int efx_mcdi_reset_port(struct efx_nic *efx)
return rc;
}
-int efx_mcdi_reset_mc(struct efx_nic *efx)
+static int efx_mcdi_reset_mc(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
@@ -1107,11 +1316,31 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
return rc;
}
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_RECOVER_OR_ALL;
+}
+
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_port(efx);
+}
+
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
const u8 *mac, int *id_out)
{
- u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
- u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
size_t outlen;
int rc;
@@ -1151,7 +1380,7 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
{
- u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
size_t outlen;
int rc;
@@ -1178,7 +1407,7 @@ fail:
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
- u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
@@ -1199,34 +1428,31 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
- __le32 *qid;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
int rc, count;
BUILD_BUG_ON(EFX_MAX_CHANNELS >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
- if (qid == NULL)
- return -ENOMEM;
-
count = 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
- qid[count++] = cpu_to_le32(
- efx_rx_queue_index(rx_queue));
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ count, efx_rx_queue_index(rx_queue));
+ count++;
}
}
}
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
- count * sizeof(*qid), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
WARN_ON(rc < 0);
- kfree(qid);
-
return rc;
}
@@ -1245,3 +1471,247 @@ fail:
return rc;
}
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
+ return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+
+static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf,
+ MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk = part->common.mtd.erasesize;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ /* The MCDI interface can in fact do multiple erase blocks at once;
+ * but erasing may be slow, so we make multiple calls here to avoid
+ * tripping the MCDI RPC timeout. */
+ while (offset < end) {
+ rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
+ chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ }
+out:
+ return rc;
+}
+
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc = 0;
+
+ if (part->updating) {
+ part->updating = false;
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ }
+
+ return rc;
+}
+
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_mcdi_mtd_partition *mcdi_part =
+ container_of(part, struct efx_mcdi_mtd_partition, common);
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s:%02x",
+ efx->name, part->type_name, mcdi_part->fw_subtype);
+}
+
+#endif /* CONFIG_SFC_MTD */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 3ba2e5b5a9c..c34d0d4e10e 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,18 +11,20 @@
#define EFX_MCDI_H
/**
- * enum efx_mcdi_state
+ * enum efx_mcdi_state - MCDI request handling state
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
- * mcdi_lock then they are able to move to MCDI_STATE_RUNNING
- * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
- * moved into this state is allowed to move out of it.
+ * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
+ * Only the thread that moved into this state is allowed to move out of it.
+ * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
- * MCDI_STATE_RUNNING.
+ * %MCDI_STATE_RUNNING.
*/
enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_SYNC,
+ MCDI_STATE_RUNNING_ASYNC,
MCDI_STATE_COMPLETED,
};
@@ -32,28 +34,39 @@ enum efx_mcdi_mode {
};
/**
- * struct efx_mcdi_iface
- * @state: Interface state. Waited for by mcdi_wq.
- * @wq: Wait queue for threads waiting for state != STATE_RUNNING
- * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
+ * struct efx_mcdi_iface - MCDI protocol context
+ * @efx: The associated NIC.
+ * @state: Request handling state. Waited for by @wq.
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
- * Serialised by @lock
+ * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @iface_lock: Serialises access to @seqno, @credits and response metadata
* @seqno: The next sequence number to use for mcdi requests.
- * Serialised by @lock
* @credits: Number of spurious MCDI completion events allowed before we
- * trigger a fatal error. Protected by @lock
- * @resprc: Returned MCDI completion
- * @resplen: Returned payload length
+ * trigger a fatal error
+ * @resprc: Response error/success code (Linux numbering)
+ * @resp_hdr_len: Response header length
+ * @resp_data_len: Response data (SDU or error) length
+ * @async_lock: Serialises access to @async_list while event processing is
+ * enabled
+ * @async_list: Queue of asynchronous requests
+ * @async_timer: Timer for asynchronous request timeout
*/
struct efx_mcdi_iface {
- atomic_t state;
+ struct efx_nic *efx;
+ enum efx_mcdi_state state;
+ enum efx_mcdi_mode mode;
wait_queue_head_t wq;
spinlock_t iface_lock;
- enum efx_mcdi_mode mode;
+ bool new_epoch;
unsigned int credits;
unsigned int seqno;
- unsigned int resprc;
- size_t resplen;
+ int resprc;
+ size_t resp_hdr_len;
+ size_t resp_data_len;
+ spinlock_t async_lock;
+ struct list_head async_list;
+ struct timer_list async_timer;
};
struct efx_mcdi_mon {
@@ -65,65 +78,204 @@ struct efx_mcdi_mon {
unsigned int n_attrs;
};
-extern void efx_mcdi_init(struct efx_nic *efx);
+struct efx_mcdi_mtd_partition {
+ struct efx_mtd_partition common;
+ bool updating;
+ u16 nvram_type;
+ u16 fw_subtype;
+};
+
+#define to_efx_mcdi_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
+
+/**
+ * struct efx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @hwmon: Hardware monitor state
+ */
+struct efx_mcdi_data {
+ struct efx_mcdi_iface iface;
+#ifdef CONFIG_SFC_MCDI_MON
+ struct efx_mcdi_mon hwmon;
+#endif
+};
+
+#ifdef CONFIG_SFC_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->hwmon;
+}
+#endif
+
+extern int efx_mcdi_init(struct efx_nic *efx);
+extern void efx_mcdi_fini(struct efx_nic *efx);
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen, u8 *outbuf, size_t outlen,
+extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen);
+extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+typedef void efx_mcdi_async_completer(struct efx_nic *efx,
+ unsigned long cookie, int rc,
+ efx_dword_t *outbuf,
+ size_t outlen_actual);
+extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
extern void efx_mcdi_mode_poll(struct efx_nic *efx);
extern void efx_mcdi_mode_event(struct efx_nic *efx);
+extern void efx_mcdi_flush_async(struct efx_nic *efx);
extern void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event);
extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
-#define MCDI_PTR2(_buf, _ofst) \
- (((u8 *)_buf) + _ofst)
-#define MCDI_SET_DWORD2(_buf, _ofst, _value) \
- EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0, _value)
-#define MCDI_DWORD2(_buf, _ofst) \
- EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0)
-#define MCDI_QWORD2(_buf, _ofst) \
- EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_QWORD_0)
-
-#define MCDI_PTR(_buf, _ofst) \
- MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \
- MCDI_PTR2(_buf, \
- MC_CMD_ ## _field ## _OFST + \
- (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN)
-#define MCDI_SET_DWORD(_buf, _ofst, _value) \
- MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
-#define MCDI_DWORD(_buf, _ofst) \
- MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_QWORD(_buf, _ofst) \
- MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+/* We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned. Also, on Siena we must copy to the MC shared
+ * memory strictly 32 bits at a time, so add any necessary padding.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) \
+ efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_WORD(_buf, _field) \
+ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
+ _name2, _value2) \
+ EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2)
+#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3) \
+ EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3)
+#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4) \
+ EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4)
+#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5) \
+ EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5)
+#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6) \
+ EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6)
+#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6, _name7, _value7) \
+ EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6, \
+ MC_CMD_ ## _name7, _value7)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
+ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
+#define MCDI_FIELD(_ptr, _type, _field) \
+ EFX_EXTRACT_DWORD( \
+ *(efx_dword_t *) \
+ _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
+ MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
+ (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
+ MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
+
+#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
+ (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+ + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
+#define MCDI_DECLARE_STRUCT_PTR(_name) \
+ efx_dword_t *_name
+#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
+ ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_VAR_ARRAY_LEN(_len, _field) \
+ min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
+ ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
+#define MCDI_ARRAY_WORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *) \
+ _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
+#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
+ EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
+ EFX_DWORD_0, _value)
+#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
+#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
+ do { \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+ MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
+ _type ## _TYPEDEF, _field2)
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
- EFX_EXTRACT_DWORD( \
- *((efx_dword_t *) \
- (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
@@ -132,34 +284,29 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
-extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
- unsigned int type);
-extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length);
-extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer,
- size_t length);
-#define EFX_MCDI_NVRAM_LEN_MAX 128
-extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length);
-extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
- unsigned int type);
extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_reset_port(struct efx_nic *efx);
-extern int efx_mcdi_reset_mc(struct efx_nic *efx);
extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
const u8 *mac, int *id_out);
extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+extern int efx_mcdi_port_probe(struct efx_nic *efx);
+extern void efx_mcdi_port_remove(struct efx_nic *efx);
+extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+extern int efx_mcdi_port_get_number(struct efx_nic *efx);
+extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
extern int efx_mcdi_set_mac(struct efx_nic *efx);
-extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear);
-extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
#ifdef CONFIG_SFC_MCDI_MON
extern int efx_mcdi_mon_probe(struct efx_nic *efx);
@@ -169,4 +316,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_MTD
+extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer);
+extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer);
+extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+#endif
+
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
deleted file mode 100644
index 1003f309cba..00000000000
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include "net_driver.h"
-#include "efx.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-int efx_mcdi_set_mac(struct efx_nic *efx)
-{
- u32 reject, fcntl;
- u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
-
- memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
- efx->net_dev->dev_addr, ETH_ALEN);
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* The MCDI command provides for controlling accept/reject
- * of broadcast packets too, but the driver doesn't currently
- * expose this. */
- reject = (efx->promiscuous) ? 0 :
- (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
-
- switch (efx->wanted_fc) {
- case EFX_FC_RX | EFX_FC_TX:
- fcntl = MC_CMD_FCNTL_BIDIR;
- break;
- case EFX_FC_RX:
- fcntl = MC_CMD_FCNTL_RESPOND;
- break;
- default:
- fcntl = MC_CMD_FCNTL_OFF;
- break;
- }
- if (efx->wanted_fc & EFX_FC_AUTO)
- fcntl = MC_CMD_FCNTL_AUTO;
- if (efx->fc_disable)
- fcntl = MC_CMD_FCNTL_OFF;
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
- NULL, 0, NULL);
-}
-
-bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
-{
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- size_t outlength;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
- return true;
- }
-
- return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
-}
-
-int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
-{
- u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
- int rc;
- efx_dword_t *cmd_ptr;
- int period = enable ? 1000 : 0;
- u32 addr_hi;
- u32 addr_lo;
-
- BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
-
- addr_lo = ((u64)dma_addr) >> 0;
- addr_hi = ((u64)dma_addr) >> 32;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
- cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- EFX_POPULATE_DWORD_7(*cmd_ptr,
- MC_CMD_MAC_STATS_IN_DMA, !!enable,
- MC_CMD_MAC_STATS_IN_CLEAR, clear,
- MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
- MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
- MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
- return rc;
-}
-
-int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
-{
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- rc = efx_mcdi_set_mac(efx);
- if (rc != 0)
- return rc;
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
- efx->multicast_hash.byte,
- sizeof(efx->multicast_hash),
- NULL, 0, NULL);
-}
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 1d552f0664d..4cc5d95b2a5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,31 +21,62 @@ enum efx_hwmon_type {
EFX_HWMON_UNKNOWN,
EFX_HWMON_TEMP, /* temperature */
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
- EFX_HWMON_IN /* input voltage */
+ EFX_HWMON_IN, /* voltage */
+ EFX_HWMON_CURR, /* current */
+ EFX_HWMON_POWER, /* power */
};
static const struct {
const char *label;
enum efx_hwmon_type hwmon_type;
int port;
-} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
-#define SENSOR(name, label, hwmon_type, port) \
- [MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
- SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1),
- SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0),
- SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1),
- SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1),
- SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1),
+} efx_mcdi_sensor_type[] = {
+#define SENSOR(name, label, hwmon_type, port) \
+ [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
+ SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
+ SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
+ SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(IN_1V0, "1.0V supply", IN, -1),
+ SENSOR(IN_1V2, "1.2V supply", IN, -1),
+ SENSOR(IN_1V8, "1.8V supply", IN, -1),
+ SENSOR(IN_2V5, "2.5V supply", IN, -1),
+ SENSOR(IN_3V3, "3.3V supply", IN, -1),
+ SENSOR(IN_12V0, "12.0V supply", IN, -1),
+ SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
+ SENSOR(IN_VREF, "ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
+ SENSOR(FAN_0, NULL, COOL, -1),
+ SENSOR(FAN_1, NULL, COOL, -1),
+ SENSOR(FAN_2, NULL, COOL, -1),
+ SENSOR(FAN_3, NULL, COOL, -1),
+ SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VAOE, "AOE input supply", IN, -1),
+ SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
+ SENSOR(IN_IAOE, "AOE input current", CURR, -1),
+ SENSOR(NIC_POWER, "Board power use", POWER, -1),
+ SENSOR(IN_0V9, "0.9V supply", IN, -1),
+ SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
+ SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT_EXTADC,
+ "Controller int. temp. raw (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
+ "Controller int. temp. (via ADC)", TEMP, -1),
+ SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
+ SENSOR(AIRFLOW, "Air flow raw", IN, -1),
#undef SENSOR
};
@@ -54,6 +85,7 @@ static const char *const sensor_status_names[] = {
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+ [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
};
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
@@ -85,6 +117,7 @@ struct efx_mcdi_mon_attribute {
struct device_attribute dev_attr;
unsigned int index;
unsigned int type;
+ enum efx_hwmon_type hwmon_type;
unsigned int limit_value;
char name[12];
};
@@ -92,13 +125,12 @@ struct efx_mcdi_mon_attribute {
static int efx_mcdi_mon_update(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
int rc;
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO,
- hwmon->dma_buf.dma_addr & 0xffffffff);
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI,
- (u64)hwmon->dma_buf.dma_addr >> 32);
+ MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
+ hwmon->dma_buf.dma_addr);
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
inbuf, sizeof(inbuf), NULL, 0, NULL);
@@ -146,18 +178,32 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
efx_dword_t entry;
- unsigned int value;
+ unsigned int value, state;
int rc;
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
if (rc)
return rc;
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ if (state == MC_CMD_SENSOR_STATE_NO_READING)
+ return -EBUSY;
+
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -172,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
value = mon_attr->limit_value;
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -221,6 +277,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
strlcpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ else
+ attr->hwmon_type = EFX_HWMON_UNKNOWN;
attr->limit_value = limit_value;
sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
@@ -234,36 +294,43 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
int efx_mcdi_mon_probe(struct efx_nic *efx)
{
+ unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
- u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
+ unsigned int n_pages, n_sensors, n_attrs, page;
size_t outlen;
char name[12];
u32 mask;
- int rc, i, type;
+ int rc, i, j, type;
- BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
+ /* Find out how many sensors are present */
+ n_sensors = 0;
+ page = 0;
+ do {
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
- rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
- return -EIO;
-
- /* Find out which sensors are present. Don't create a device
- * if there are none.
- */
- mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
- if (mask == 0)
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+ return -EIO;
+
+ mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+ n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ ++page;
+ } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ n_pages = page;
+
+ /* Don't create a device if there are none */
+ if (n_sensors == 0)
return 0;
- /* Check again for short response */
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
- return -EIO;
-
- rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
- 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
+ rc = efx_nic_alloc_buffer(
+ efx, &hwmon->dma_buf,
+ n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
+ GFP_KERNEL);
if (rc)
return rc;
@@ -274,7 +341,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
* attributes for this set of sensors: name of the driver plus
* value, min, max, crit, alarm and label for each sensor.
*/
- n_attrs = 1 + 6 * hweight32(mask);
+ n_attrs = 1 + 6 * n_sensors;
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
if (!hwmon->attrs) {
rc = -ENOMEM;
@@ -291,26 +358,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- for (i = 0, type = -1; ; i++) {
+ for (i = 0, j = -1, type = -1; ; i++) {
+ enum efx_hwmon_type hwmon_type;
const char *hwmon_prefix;
unsigned hwmon_index;
u16 min1, max1, min2, max2;
/* Find next sensor type or exit if there is none */
- type++;
- while (!(mask & (1 << type))) {
+ do {
type++;
- if (type == 32)
- return 0;
- }
- /* Skip sensors specific to a different port */
- if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
- efx_mcdi_sensor_type[type].port >= 0 &&
- efx_mcdi_sensor_type[type].port != efx_port_num(efx))
- continue;
+ if ((type % 32) == 0) {
+ page = type / 32;
+ j = -1;
+ if (page == n_pages)
+ return 0;
+
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
+ page);
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ mask = (MCDI_DWORD(outbuf,
+ SENSOR_INFO_OUT_MASK) &
+ ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
- switch (efx_mcdi_sensor_type[type].hwmon_type) {
+ /* Check again for short response */
+ if (outlen <
+ MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ } while (!(mask & (1 << type % 32)));
+ j++;
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+
+ /* Skip sensors specific to a different port */
+ if (hwmon_type != EFX_HWMON_UNKNOWN &&
+ efx_mcdi_sensor_type[type].port >= 0 &&
+ efx_mcdi_sensor_type[type].port !=
+ efx_port_num(efx))
+ continue;
+ } else {
+ hwmon_type = EFX_HWMON_UNKNOWN;
+ }
+
+ switch (hwmon_type) {
case EFX_HWMON_TEMP:
hwmon_prefix = "temp";
hwmon_index = ++n_temp; /* 1-based */
@@ -327,16 +431,24 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
hwmon_prefix = "in";
hwmon_index = n_in++; /* 0-based */
break;
+ case EFX_HWMON_CURR:
+ hwmon_prefix = "curr";
+ hwmon_index = ++n_curr; /* 1-based */
+ break;
+ case EFX_HWMON_POWER:
+ hwmon_prefix = "power";
+ hwmon_index = ++n_power; /* 1-based */
+ break;
}
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN1);
+ SENSOR_INFO_ENTRY, j, MIN1);
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX1);
+ SENSOR_INFO_ENTRY, j, MAX1);
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN2);
+ SENSOR_INFO_ENTRY, j, MIN2);
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX2);
+ SENSOR_INFO_ENTRY, j, MAX2);
if (min1 != max1) {
snprintf(name, sizeof(name), "%s%u_input",
@@ -346,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- snprintf(name, sizeof(name), "%s%u_min",
- hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
- efx, name, efx_mcdi_mon_show_limit,
- i, type, min1);
- if (rc)
- goto fail;
+ if (hwmon_type != EFX_HWMON_POWER) {
+ snprintf(name, sizeof(name), "%s%u_min",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, min1);
+ if (rc)
+ goto fail;
+ }
snprintf(name, sizeof(name), "%s%u_max",
hwmon_prefix, hwmon_index);
@@ -383,7 +497,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- if (efx_mcdi_sensor_type[type].label) {
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
+ efx_mcdi_sensor_type[type].label) {
snprintf(name, sizeof(name), "%s%u_label",
hwmon_prefix, hwmon_index);
rc = efx_mcdi_mon_add_attr(
@@ -400,8 +515,7 @@ fail:
void efx_mcdi_mon_remove(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
- struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
unsigned int i;
for (i = 0; i < hwmon->n_attrs; i++)
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c5c9747861b..b5cf62492f8 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,13 @@
#define MC_FW_STATE_BOOTING (4)
/* The Scheduler has started. */
#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
/* Siena MC shared memmory offsets */
/* The 'doorbell' addresses are hard-wired to alert the MC when written */
@@ -39,18 +46,21 @@
#define MC_STATUS_DWORD_REBOOT (0xb007b007)
#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
/* The current version of the MCDI protocol.
*
* Note that the ROM burnt into the card only talks V0, so at the very
* least every driver must support version 0 and MCDI_PCOL_VERSION
*/
-#define MCDI_PCOL_VERSION 1
+#define MCDI_PCOL_VERSION 2
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
/* MCDI version 1
*
- * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
* structure, filled in by the client.
*
* 0 7 8 16 20 22 23 24 31
@@ -87,9 +97,11 @@
#define MCDI_HEADER_DATALEN_LBN 8
#define MCDI_HEADER_DATALEN_WIDTH 8
#define MCDI_HEADER_SEQ_LBN 16
-#define MCDI_HEADER_RSVD_LBN 20
-#define MCDI_HEADER_RSVD_WIDTH 2
#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
#define MCDI_HEADER_ERROR_LBN 22
#define MCDI_HEADER_ERROR_WIDTH 1
#define MCDI_HEADER_RESPONSE_LBN 23
@@ -100,7 +112,11 @@
#define MCDI_HEADER_XFLAGS_EVREQ 0x01
/* Maximum number of payload bytes */
-#define MCDI_CTL_SDU_LEN_MAX 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
/* The MC can generate events for two reasons:
* - To complete a shared memory request if XFLAGS_EVREQ was set
@@ -145,22 +161,69 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
/* Non-existent command target */
#define MC_CMD_ERR_ENOENT 2
/* assert() has killed the MC */
#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
/* Caller does not hold required locks */
#define MC_CMD_ERR_EACCES 13
/* Resource is currently unavailable (e.g. lock contention) */
#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
/* Invalid argument to target */
#define MC_CMD_ERR_EINVAL 22
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
/* Non-recursive resource is already acquired */
#define MC_CMD_ERR_EDEADLK 35
/* Operation not implemented */
#define MC_CMD_ERR_ENOSYS 38
/* Operation timed out */
#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
#define MC_CMD_ERR_CODE_OFST 0
@@ -178,9 +241,11 @@
/* Vectors in the boot ROM */
/* Point to the copycode entry point. */
-#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
/* Points to the recovery mode entry point. */
-#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -209,16 +274,29 @@
(n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
/* MCDI_EVENT structuredef */
#define MCDI_EVENT_LEN 8
#define MCDI_EVENT_CONT_LBN 32
#define MCDI_EVENT_CONT_WIDTH 1
#define MCDI_EVENT_LEVEL_LBN 33
#define MCDI_EVENT_LEVEL_WIDTH 3
-#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
-#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
-#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
-#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
@@ -230,9 +308,14 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
-#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -247,26 +330,80 @@
#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
#define MCDI_EVENT_FWALERT_REASON_LBN 0
#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
-#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
#define MCDI_EVENT_FLR_VF_LBN 0
#define MCDI_EVENT_FLR_VF_WIDTH 8
#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
-#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
-#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
-#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
#define MCDI_EVENT_TX_ERR_INFO_LBN 16
#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
-#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */
-#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */
-#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */
-#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -275,21 +412,60 @@
#define MCDI_EVENT_EV_CODE_WIDTH 4
#define MCDI_EVENT_CODE_LBN 44
#define MCDI_EVENT_CODE_WIDTH 8
-#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
-#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
-#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
-#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
-#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
-#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
-#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
-#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
-#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
-#define MCDI_EVENT_CODE_FLR 0xa /* enum */
-#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
-#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
-#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
-#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
-#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -305,15 +481,114 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* Seconds field of timestamp */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* Nanoseconds field of timestamp */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* Lowest four bytes of sourceUUID from PTP packet */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: One or more PPS OUT events */
+#define FCDI_EVENT_CODE_PPS_OUT 0x7
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EVENT_PPS_COUNT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT structuredef */
+#define FCDI_EXTENDED_EVENT_LENMIN 16
+#define FCDI_EXTENDED_EVENT_LENMAX 248
+#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
/***********************************/
@@ -365,11 +640,27 @@
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to start the datapath CPUs.
+ * This is useful for certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to parse any configuration from
+ * flash. (In addition, the datapath CPUs will not be started, as for
+ * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
+ * certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
/* MC_CMD_COPYCODE_OUT msgresponse */
#define MC_CMD_COPYCODE_OUT_LEN 0
@@ -377,11 +668,13 @@
/***********************************/
/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
*/
#define MC_CMD_SET_FUNC 0x4
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
/* MC_CMD_SET_FUNC_OUT msgresponse */
@@ -390,6 +683,7 @@
/***********************************/
/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
@@ -398,7 +692,10 @@
/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
@@ -410,25 +707,38 @@
/***********************************/
/* MC_CMD_GET_ASSERTS
- * Get and clear any assertion status.
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
*/
#define MC_CMD_GET_ASSERTS 0x6
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
-#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -441,9 +751,12 @@
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -459,11 +772,20 @@
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
-/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
@@ -471,6 +793,7 @@
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
@@ -478,46 +801,22 @@
#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
-
-/***********************************/
-/* MC_CMD_GET_FPGAREG
- * Read multiple bytes from PTP FPGA.
- */
-#define MC_CMD_GET_FPGAREG 0x9
-
-/* MC_CMD_GET_FPGAREG_IN msgrequest */
-#define MC_CMD_GET_FPGAREG_IN_LEN 8
-#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4
-
-/* MC_CMD_GET_FPGAREG_OUT msgresponse */
-#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
-#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252
-#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
-
-
-/***********************************/
-/* MC_CMD_PUT_FPGAREG
- * Write multiple bytes to PTP FPGA.
- */
-#define MC_CMD_PUT_FPGAREG 0xa
-
-/* MC_CMD_PUT_FPGAREG_IN msgrequest */
-#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
-#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
-#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
-#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
-
-/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
-#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
/***********************************/
@@ -528,32 +827,74 @@
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
#define MC_CMD_PTP_IN_OP_OFST 0
#define MC_CMD_PTP_IN_OP_LEN 1
-#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */
-#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */
-#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */
-#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */
-#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */
-#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */
-#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */
-#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */
-#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */
-#define MC_CMD_PTP_OP_MAX 0xc /* enum */
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x16
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
-#define MC_CMD_PTP_MODE_V1 0x0 /* enum */
-#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
-#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
-#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
-#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
@@ -566,7 +907,9 @@
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
@@ -586,19 +929,27 @@
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
-#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
@@ -613,86 +964,240 @@
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queueid to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
-#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */
-#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */
-#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */
-#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */
-#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -702,6 +1207,7 @@
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
@@ -710,6 +1216,7 @@
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
#define MC_CMD_CSR_READ32_OUT_LENMAX 252
#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
@@ -726,6 +1233,7 @@
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
@@ -739,6 +1247,48 @@
/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
/* MC_CMD_STACKINFO
* Get stack information.
*/
@@ -751,6 +1301,7 @@
#define MC_CMD_STACKINFO_OUT_LENMIN 12
#define MC_CMD_STACKINFO_OUT_LENMAX 252
#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
@@ -765,19 +1316,35 @@
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
/***********************************/
@@ -788,18 +1355,34 @@
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -813,6 +1396,9 @@
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
@@ -826,9 +1412,15 @@
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -836,69 +1428,111 @@
/***********************************/
/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map.
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ32 0x14
/* MC_CMD_PORT_READ32_IN msgrequest */
#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
/***********************************/
/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map.
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE32 0x15
/* MC_CMD_PORT_WRITE32_IN msgrequest */
#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
/***********************************/
/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map.
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ128 0x16
/* MC_CMD_PORT_READ128_IN msgrequest */
#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
/***********************************/
/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map.
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE128 0x17
/* MC_CMD_PORT_WRITE128_IN msgrequest */
#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
/* MC_CMD_PORT_WRITE128_OUT msgresponse */
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
/***********************************/
/* MC_CMD_GET_BOARD_CFG
@@ -916,18 +1550,10 @@
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
-/* Enum values, see field(s): */
-/* CAPABILITIES_PORT0 */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
@@ -936,6 +1562,11 @@
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
@@ -944,7 +1575,7 @@
/***********************************/
/* MC_CMD_DBI_READX
- * Read DBI register(s).
+ * Read DBI register(s) -- extended functionality
*/
#define MC_CMD_DBI_READX 0x19
@@ -952,6 +1583,7 @@
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
@@ -963,11 +1595,27 @@
#define MC_CMD_DBI_READX_OUT_LENMIN 4
#define MC_CMD_DBI_READX_OUT_LENMAX 252
#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
/***********************************/
/* MC_CMD_SET_RAND_SEED
@@ -977,6 +1625,7 @@
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
@@ -986,7 +1635,7 @@
/***********************************/
/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the PCIE LTSSM.
+ * Retrieve the history of the LTSSM, if the build supports it.
*/
#define MC_CMD_LTSSM_HIST 0x1b
@@ -997,6 +1646,7 @@
#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
@@ -1005,41 +1655,47 @@
/***********************************/
/* MC_CMD_DRV_ATTACH
- * Inform MCPU that this port is managed on the host.
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
*/
#define MC_CMD_DRV_ATTACH 0x1c
/* MC_CMD_DRV_ATTACH_IN msgrequest */
-#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state (0=detached, 1=attached) to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state (0=detached, 1=attached) */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
-
-/***********************************/
-/* MC_CMD_NCSI_PROD
- * Trigger an NC-SI event.
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state (0=detached, 1=attached) */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
*/
-#define MC_CMD_NCSI_PROD 0x1d
-
-/* MC_CMD_NCSI_PROD_IN msgrequest */
-#define MC_CMD_NCSI_PROD_IN_LEN 4
-#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
-#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
-#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
-#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
-#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
-
-/* MC_CMD_NCSI_PROD_OUT msgresponse */
-#define MC_CMD_NCSI_PROD_OUT_LEN 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
/***********************************/
@@ -1050,6 +1706,7 @@
/* MC_CMD_SHMUART_IN msgrequest */
#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
/* MC_CMD_SHMUART_OUT msgresponse */
@@ -1057,13 +1714,33 @@
/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_ENTITY_RESET
- * Generic per-port reset.
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -1080,7 +1757,9 @@
/* MC_CMD_PCIE_CREDITS_IN msgrequest */
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
@@ -1141,7 +1820,7 @@
/***********************************/
/* MC_CMD_PUTS
- * puts(3) implementation over MCDI
+ * Copy the given ASCII string out onto UART and/or out of the network port.
*/
#define MC_CMD_PUTS 0x23
@@ -1167,7 +1846,8 @@
/***********************************/
/* MC_CMD_GET_PHY_CFG
- * Report PHY configuration.
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
*/
#define MC_CMD_GET_PHY_CFG 0x24
@@ -1176,6 +1856,7 @@
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
@@ -1191,7 +1872,9 @@
#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
@@ -1213,20 +1896,36 @@
#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
#define MC_CMD_PHY_CAP_AN_LBN 10
#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
-#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
-#define MC_CMD_MEDIA_CX4 0x2 /* enum */
-#define MC_CMD_MEDIA_KX4 0x3 /* enum */
-#define MC_CMD_MEDIA_XFP 0x4 /* enum */
-#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
-#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
-#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
@@ -1234,7 +1933,8 @@
#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
-#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
@@ -1243,18 +1943,31 @@
/***********************************/
/* MC_CMD_START_BIST
- * Start a BIST test on the PHY.
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_START_BIST 0x25
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
-#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
-#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
-#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
-#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
-#define MC_CMD_PHY_BIST 0x5 /* enum */
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
/* MC_CMD_START_BIST_OUT msgresponse */
#define MC_CMD_START_BIST_OUT_LEN 0
@@ -1262,7 +1975,12 @@
/***********************************/
/* MC_CMD_POLL_BIST
- * Poll for BIST completion.
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
*/
#define MC_CMD_POLL_BIST 0x26
@@ -1271,15 +1989,21 @@
/* MC_CMD_POLL_BIST_OUT msgresponse */
#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
-#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
-#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
-#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
-#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
@@ -1287,42 +2011,116 @@
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
-#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
/***********************************/
/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s).
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
*/
#define MC_CMD_FLUSH_RX_QUEUES 0x27
@@ -1341,7 +2139,7 @@
/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
- * Get port's loopback modes.
+ * Returns a bitmask of loopback modes available at each speed.
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
@@ -1349,61 +2147,116 @@
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
-#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
-#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
-#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
-#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
-#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
-#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
-#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
-#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
-#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
-#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
-#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
-#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
-#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
-#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
-#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
-#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
-#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
-#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
-#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
-#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
-#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
-#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
/***********************************/
/* MC_CMD_GET_LINK
- * Read the unified MAC/PHY link state.
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
*/
#define MC_CMD_GET_LINK 0x29
@@ -1412,9 +2265,15 @@
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
@@ -1427,10 +2286,14 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-#define MC_CMD_FCNTL_OFF 0x0 /* enum */
-#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
-#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -1444,13 +2307,16 @@
/***********************************/
/* MC_CMD_SET_LINK
- * Write the unified MAC/PHY link configuration.
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
*/
#define MC_CMD_SET_LINK 0x2a
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
@@ -1458,9 +2324,13 @@
#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
/* MC_CMD_SET_LINK_OUT msgresponse */
@@ -1469,12 +2339,13 @@
/***********************************/
/* MC_CMD_SET_ID_LED
- * Set indentification LED state.
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_ID_LED 0x2b
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
#define MC_CMD_LED_OFF 0x0 /* enum */
#define MC_CMD_LED_ON 0x1 /* enum */
@@ -1486,12 +2357,15 @@
/***********************************/
/* MC_CMD_SET_MAC
- * Set MAC configuration.
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_MAC 0x2c
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
@@ -1504,10 +2378,14 @@
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -1515,12 +2393,18 @@
/***********************************/
/* MC_CMD_PHY_STATS
- * Get generic PHY statistics.
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
*/
#define MC_CMD_PHY_STATS 0x2d
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1534,40 +2418,71 @@
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
-#define MC_CMD_OUI 0x0 /* enum */
-#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
-#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
-#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
-#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
-#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
-#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
-#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
-#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
-#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
-#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
-#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
-#define MC_CMD_PCS_BER 0xc /* enum */
-#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
-#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
-#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
-#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
-#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
-#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
-#define MC_CMD_AN_LINK_UP 0x13 /* enum */
-#define MC_CMD_AN_COMPLETE 0x14 /* enum */
-#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
-#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
-#define MC_CMD_PHY_NSTATS 0x17 /* enum */
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
/***********************************/
/* MC_CMD_MAC_STATS
- * Get generic MAC statistics.
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. Returns: 0, ETIME
*/
#define MC_CMD_MAC_STATS 0x2e
/* MC_CMD_MAC_STATS_IN msgrequest */
#define MC_CMD_MAC_STATS_IN_LEN 16
+/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1684,6 +2599,7 @@
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
@@ -1713,7 +2629,23 @@
/***********************************/
/* MC_CMD_MEMCPY
- * Perform memory copy operation.
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
*/
#define MC_CMD_MEMCPY 0x31
@@ -1721,6 +2653,7 @@
#define MC_CMD_MEMCPY_IN_LENMIN 32
#define MC_CMD_MEMCPY_IN_LENMAX 224
#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
@@ -1741,14 +2674,22 @@
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
-#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
-#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
-#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
-#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
-#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
-#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
-#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -1818,7 +2759,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_REMOVE
- * Remove a WoL filter.
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
@@ -1832,7 +2773,8 @@
/***********************************/
/* MC_CMD_WOL_FILTER_RESET
- * Reset (i.e. remove all) WoL filters.
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
@@ -1848,7 +2790,7 @@
/***********************************/
/* MC_CMD_SET_MCAST_HASH
- * Set the MCASH hash value.
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
*/
#define MC_CMD_SET_MCAST_HASH 0x35
@@ -1865,7 +2807,8 @@
/***********************************/
/* MC_CMD_NVRAM_TYPES
- * Get virtual NVRAM partitions information.
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
*/
#define MC_CMD_NVRAM_TYPES 0x36
@@ -1874,26 +2817,54 @@
/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
-#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
-#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
-#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
/***********************************/
/* MC_CMD_NVRAM_INFO
- * Read info about a virtual NVRAM partition.
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
*/
#define MC_CMD_NVRAM_INFO 0x37
@@ -1913,13 +2884,19 @@
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
- * Start a group of update operations on a virtual NVRAM partition.
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
@@ -1935,7 +2912,9 @@
/***********************************/
/* MC_CMD_NVRAM_READ
- * Read data from a virtual NVRAM partition.
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_READ 0x39
@@ -1945,6 +2924,7 @@
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
/* MC_CMD_NVRAM_READ_OUT msgresponse */
@@ -1959,7 +2939,9 @@
/***********************************/
/* MC_CMD_NVRAM_WRITE
- * Write data to a virtual NVRAM partition.
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_WRITE 0x3a
@@ -1983,7 +2965,9 @@
/***********************************/
/* MC_CMD_NVRAM_ERASE
- * Erase sector(s) from a virtual NVRAM partition.
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_ERASE 0x3b
@@ -2001,7 +2985,9 @@
/***********************************/
/* MC_CMD_NVRAM_UPDATE_FINISH
- * Finish a group of update operations on a virtual NVRAM partition.
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
@@ -2019,6 +3005,20 @@
/***********************************/
/* MC_CMD_REBOOT
* Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
*/
#define MC_CMD_REBOOT 0x3d
@@ -2033,7 +3033,9 @@
/***********************************/
/* MC_CMD_SCHEDINFO
- * Request scheduler info.
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
*/
#define MC_CMD_SCHEDINFO 0x3e
@@ -2052,14 +3054,24 @@
/***********************************/
/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
*/
#define MC_CMD_REBOOT_MODE 0x3f
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
@@ -2069,32 +3081,145 @@
/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
*/
#define MC_CMD_SENSOR_INFO 0x41
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
-#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
-#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
-#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
-#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
-#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
-#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
-#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
-#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
-#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
-#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
-#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
-#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
-#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
-#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */
-#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
@@ -2102,6 +3227,23 @@
#define MC_CMD_SENSOR_ENTRY_MINNUM 1
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
@@ -2124,39 +3266,80 @@
/***********************************/
/* MC_CMD_READ_SENSORS
- * Returns the current reading from each sensor.
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
*/
#define MC_CMD_READ_SENSORS 0x42
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
-#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
-#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
-#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
-#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
-#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
/***********************************/
/* MC_CMD_GET_PHY_STATE
- * Report current state of PHY.
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
*/
#define MC_CMD_GET_PHY_STATE 0x43
@@ -2166,13 +3349,16 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
-#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
-#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
/***********************************/
/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control.
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
*/
#define MC_CMD_SETUP_8021QBB 0x44
@@ -2187,7 +3373,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_GET
- * Retrieve ID of any WoL filters.
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
#define MC_CMD_WOL_FILTER_GET 0x45
@@ -2201,7 +3387,8 @@
/***********************************/
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state.
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
@@ -2241,7 +3428,8 @@
/***********************************/
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state.
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
@@ -2256,7 +3444,7 @@
/***********************************/
/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset.
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
*/
#define MC_CMD_MAC_RESET_RESTORE 0x48
@@ -2269,6 +3457,9 @@
/***********************************/
/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
*/
#define MC_CMD_TESTASSERT 0x49
@@ -2281,14 +3472,23 @@
/***********************************/
/* MC_CMD_WORKAROUND
- * Enable/Disable a given workaround.
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
*/
#define MC_CMD_WORKAROUND 0x4a
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
-#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
@@ -2297,7 +3497,12 @@
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
- * Read media-specific data from PHY.
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
@@ -2309,6 +3514,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
@@ -2318,7 +3524,8 @@
/***********************************/
/* MC_CMD_NVRAM_TEST
- * Test a particular NVRAM partition.
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
*/
#define MC_CMD_NVRAM_TEST 0x4c
@@ -2331,22 +3538,31 @@
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
-#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
-#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
-#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
/***********************************/
/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver.
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
*/
#define MC_CMD_MRSFP_TWEAK 0x4d
/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
@@ -2354,16 +3570,23 @@
/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
/***********************************/
/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits.
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
@@ -2372,9 +3595,13 @@
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
@@ -2396,9 +3623,3640 @@
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requestiong function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+
+/* MC_CMD_INIT_RXQ_IN msgrequest */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+
+/* MC_CMD_INIT_TXQ_IN msgrequest */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to port 0 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to port 1 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
+ * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
+ * a valid handle.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address or LUE index */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to support. */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to insert/remove. */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_STATS
+ * Retrieve rmon rx class statistics
+ */
+#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
+
+/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_CLASS_STATS
+ * Retrieve rmon tx class statistics
+ */
+#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
+
+/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
+ * Retrieve rmon rx super_class statistics
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
+ * Retrieve rmon tx super_class statistics
+ */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_CLASS
+ * Allocate an rmon class
+ */
+#define MC_CMD_RMON_ALLOC_CLASS 0xca
+
+/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_CLASS
+ * Deallocate an rmon class
+ */
+#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
+
+/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
+/* class */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS
+ * Allocate an rmon super_class
+ */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
+ * Deallocate an rmon tx super_class
+ */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
+/* super_class */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_UP_CONV_STATS
+ * Retrieve up converter statistics
+ */
+#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPI_STATS
+ * Retrieve rx ipi stats
+ */
+#define MC_CMD_RMON_RX_IPI_STATS 0xcf
+
+/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve rx ipsec cntxt_ptr indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
+ * Retrieve rx ipsec port indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
+ * Retrieve rx class drop stats
+ */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
+ * Retrieve rx super class drop stats
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPI_STATS
+ * Retrieve tx ipi stats
+ */
+#define MC_CMD_RMON_TX_IPI_STATS 0xd7
+
+/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve tx ipsec counters by cntxt_ptr
+ */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
+ * Retrieve tx ipsec counters by port
+ */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_STATS
+ * Retrieve tx nowhere stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
+ * Retrieve tx nowhere qbb stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 12
+/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 12
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_START_KR_EYE_PLOT
+ * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_START_KR_EYE_PLOT 0xee
+
+/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
+#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
+
+/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_KR_EYE_PLOT
+ * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
+ * should call this command repeatedly after starting eye plot, until no more
+ * data is returned.
+ */
+#define MC_CMD_POLL_KR_EYE_PLOT 0xef
+
+/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
+
+/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ */
+#define MC_CMD_LICENSING 0xf3
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 13cb40fe90c..7b6be61d549 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -36,7 +36,7 @@ struct efx_mcdi_phy_data {
static int
efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
- u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
size_t outlen;
int rc;
@@ -78,7 +78,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode,
u32 loopback_speed)
{
- u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -102,7 +102,7 @@ fail:
static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
- u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
size_t outlen;
int rc;
@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
if (rc)
goto fail;
- if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
rc = -EIO;
goto fail;
}
@@ -125,16 +126,16 @@ fail:
return rc;
}
-int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 *value_out, u32 *status_out)
+static int efx_mcdi_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
{
- u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
@@ -144,25 +145,27 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
- *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
- return 0;
+ if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
-int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 value, u32 *status_out)
+static int efx_mcdi_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
{
- u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
@@ -173,7 +176,10 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
+ if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
return 0;
fail:
@@ -304,10 +310,37 @@ static u32 mcdi_to_ethtool_media(u32 media)
}
}
+static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
u32 caps;
int rc;
@@ -403,7 +436,7 @@ fail:
return rc;
}
-int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
+int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
@@ -414,37 +447,10 @@ int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0);
}
-void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl)
-{
- switch (fcntl) {
- case MC_CMD_FCNTL_AUTO:
- WARN_ON(1); /* This is not a link mode */
- link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_BIDIR:
- link_state->fc = EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_RESPOND:
- link_state->fc = EFX_FC_RX;
- break;
- default:
- WARN_ON(1);
- case MC_CMD_FCNTL_OFF:
- link_state->fc = 0;
- break;
- }
-
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- link_state->speed = speed;
-}
-
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
-void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
@@ -472,7 +478,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
@@ -507,7 +513,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
ecmd->supported =
@@ -550,6 +556,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
default: return -EINVAL;
}
} else {
@@ -579,7 +586,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
- u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
size_t outlen;
int rc;
@@ -615,17 +622,15 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
unsigned int retry, i, count = 0;
size_t outlen;
u32 status;
- u8 *buf, *ptr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
int rc;
- buf = kzalloc(0x100, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
- MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+ inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
if (rc)
goto out;
@@ -633,11 +638,11 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
for (retry = 0; retry < 100; ++retry) {
BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
- buf, 0x100, &outlen);
+ outbuf, sizeof(outbuf), &outlen);
if (rc)
goto out;
- status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
if (status != MC_CMD_POLL_BIST_RUNNING)
goto finished;
@@ -654,7 +659,7 @@ finished:
if (efx->phy_type == PHY_TYPE_SFT9001B &&
(bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
- ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
if (status == MC_CMD_POLL_BIST_PASSED &&
outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
for (i = 0; i < 8; i++) {
@@ -668,8 +673,6 @@ finished:
rc = count;
out:
- kfree(buf);
-
return rc;
}
@@ -744,8 +747,8 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
struct ethtool_eeprom *ee, u8 *data)
{
- u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX];
- u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
size_t outlen;
int rc;
unsigned int payload_len;
@@ -785,8 +788,7 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
space_remaining : payload_len;
memcpy(user_data,
- outbuf + page_off +
- MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
+ MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
to_copy);
space_remaining -= to_copy;
@@ -813,10 +815,10 @@ static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
}
}
-const struct efx_phy_operations efx_mcdi_phy_ops = {
+static const struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
- .reconfigure = efx_mcdi_phy_reconfigure,
+ .reconfigure = efx_mcdi_port_reconfigure,
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
@@ -828,3 +830,200 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
.get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
.get_module_info = efx_mcdi_phy_get_module_info,
};
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ return phy_data->supported_cap;
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr, ETH_ALEN);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ size_t outlength;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return true;
+ }
+
+ return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
+}
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+ u32 dma_len, int enable, int clear)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ int rc;
+ int period = enable ? 1000 : 0;
+
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+ MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, !!enable,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, 1,
+ MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+ MAC_STATS_IN_PERIOD_MS, period);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+ __func__, enable ? "enable" : "disable", rc);
+ return rc;
+}
+
+void efx_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
+ MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+}
+
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+}
+
+int efx_mcdi_port_probe(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Hook in PHY operations table */
+ efx->phy_op = &efx_mcdi_phy_ops;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = efx_mcdi_mdio_read;
+ efx->mdio.mdio_write = efx_mcdi_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+
+ return 0;
+}
+
+void efx_mcdi_port_remove(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 9acfd6696ff..8ff954c59ef 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de9..16824fecc5e 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 08f825b71ac..a77a8bd2dd7 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -1,194 +1,32 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
-#include "spi.h"
#include "efx.h"
-#include "nic.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-#define EFX_SPI_VERIFY_BUF_LEN 16
-
-struct efx_mtd_partition {
- struct mtd_info mtd;
- union {
- struct {
- bool updating;
- u8 nvram_type;
- u16 fw_subtype;
- } mcdi;
- size_t offset;
- };
- const char *type_name;
- char name[IFNAMSIZ + 20];
-};
-
-struct efx_mtd_ops {
- int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, u8 *buffer);
- int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
- int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, const u8 *buffer);
- int (*sync)(struct mtd_info *mtd);
-};
-
-struct efx_mtd {
- struct list_head node;
- struct efx_nic *efx;
- const struct efx_spi_device *spi;
- const char *name;
- const struct efx_mtd_ops *ops;
- size_t n_parts;
- struct efx_mtd_partition part[0];
-};
-
-#define efx_for_each_partition(part, efx_mtd) \
- for ((part) = &(efx_mtd)->part[0]; \
- (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
- (part)++)
#define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd)
-static int falcon_mtd_probe(struct efx_nic *efx);
-static int siena_mtd_probe(struct efx_nic *efx);
-
-/* SPI utilities */
-
-static int
-efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- u8 status;
- int rc, i;
-
- /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
- for (i = 0; i < 40; i++) {
- __set_current_state(uninterruptible ?
- TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (signal_pending(current))
- return -EINTR;
- }
- pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
- return -ETIMEDOUT;
-}
-
-static int
-efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
- SPI_STATUS_BP0);
- u8 status;
- int rc;
-
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
-
- if (!(status & unlock_mask))
- return 0; /* already unlocked */
-
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
- if (rc)
- return rc;
-
- status &= ~unlock_mask;
- rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
- NULL, sizeof(status));
- if (rc)
- return rc;
- rc = falcon_spi_wait_write(efx, spi);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int
-efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- unsigned pos, block_len;
- u8 empty[EFX_SPI_VERIFY_BUF_LEN];
- u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
- int rc;
-
- if (len != spi->erase_size)
- return -EINVAL;
-
- if (spi->erase_command == 0)
- return -EOPNOTSUPP;
-
- rc = efx_spi_unlock(efx, spi);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
- NULL, 0);
- if (rc)
- return rc;
- rc = efx_spi_slow_wait(part, false);
-
- /* Verify the entire region has been wiped */
- memset(empty, 0xff, sizeof(empty));
- for (pos = 0; pos < len; pos += block_len) {
- block_len = min(len - pos, sizeof(buffer));
- rc = falcon_spi_read(efx, spi, start + pos, block_len,
- NULL, buffer);
- if (rc)
- return rc;
- if (memcmp(empty, buffer, block_len))
- return -EIO;
-
- /* Avoid locking up the system */
- cond_resched();
- if (signal_pending(current))
- return -EINTR;
- }
-
- return rc;
-}
-
/* MTD interface */
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
+ rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
@@ -202,13 +40,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
static void efx_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->sync(mtd);
+ rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
- part->name, efx_mtd->name, rc);
+ part->name, part->dev_type_name, rc);
}
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -222,62 +60,44 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
ssleep(1);
}
WARN_ON(rc);
+ list_del(&part->node);
}
-static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- efx_mtd_remove_partition(part);
- list_del(&efx_mtd->node);
- kfree(efx_mtd);
-}
-
-static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
- snprintf(part->name, sizeof(part->name),
- "%s %s:%02x", efx_mtd->efx->name,
- part->type_name, part->mcdi.fw_subtype);
- else
- snprintf(part->name, sizeof(part->name),
- "%s %s", efx_mtd->efx->name,
- part->type_name);
-}
-
-static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
{
struct efx_mtd_partition *part;
+ size_t i;
- efx_mtd->efx = efx;
+ for (i = 0; i < n_parts; i++) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
- efx_mtd_rename_device(efx_mtd);
-
- efx_for_each_partition(part, efx_mtd) {
part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE;
- part->mtd.priv = efx_mtd;
+ part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = efx_mtd_erase;
- part->mtd._read = efx_mtd->ops->read;
- part->mtd._write = efx_mtd->ops->write;
+ part->mtd._read = efx->type->mtd_read;
+ part->mtd._write = efx->type->mtd_write;
part->mtd._sync = efx_mtd_sync;
+ efx->type->mtd_rename(part);
+
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
+
+ /* Add to list in order - efx_mtd_remove() depends on this */
+ list_add_tail(&part->node, &efx->mtd_list);
}
- list_add(&efx_mtd->node, &efx->mtd_list);
return 0;
fail:
- while (part != &efx_mtd->part[0]) {
- --part;
+ while (i--) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
efx_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
@@ -286,410 +106,28 @@ fail:
void efx_mtd_remove(struct efx_nic *efx)
{
- struct efx_mtd *efx_mtd, *next;
+ struct efx_mtd_partition *parts, *part, *next;
WARN_ON(efx_dev_registered(efx));
- list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
- efx_mtd_remove_device(efx_mtd);
-}
-
-void efx_mtd_rename(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
-
- ASSERT_RTNL();
-
- list_for_each_entry(efx_mtd, &efx->mtd_list, node)
- efx_mtd_rename_device(efx_mtd);
-}
-
-int efx_mtd_probe(struct efx_nic *efx)
-{
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- return siena_mtd_probe(efx);
- else
- return falcon_mtd_probe(efx);
-}
-
-/* Implementation of MTD operations for Falcon */
-
-static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_read(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = efx_spi_erase(part, part->offset + start, len);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_write(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- mutex_lock(&nic_data->spi_lock);
- rc = efx_spi_slow_wait(part, true);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static const struct efx_mtd_ops falcon_mtd_ops = {
- .read = falcon_mtd_read,
- .erase = falcon_mtd_erase,
- .write = falcon_mtd_write,
- .sync = falcon_mtd_sync,
-};
-
-static int falcon_mtd_probe(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- struct efx_spi_device *spi;
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
-
- ASSERT_RTNL();
-
- spi = &nic_data->spi_flash;
- if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "flash";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_NORFLASH;
- efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
- efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].type_name = "sfc_flash_bootrom";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- spi = &nic_data->spi_eeprom;
- if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "EEPROM";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_RAM;
- efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
- efx_mtd->part[0].mtd.size =
- min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
- EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].type_name = "sfc_bootconfig";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- return rc;
-}
-
-/* Implementation of MTD operations for Siena */
-
-static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk = part->mtd.erasesize;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- /* The MCDI interface can in fact do multiple erase blocks at once;
- * but erasing may be slow, so we make multiple calls here to avoid
- * tripping the MCDI RPC timeout. */
- while (offset < end) {
- rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
- chunk);
- if (rc)
- goto out;
- offset += chunk;
- }
-out:
- return rc;
-}
-
-static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- int rc = 0;
-
- if (part->mcdi.updating) {
- part->mcdi.updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
- }
-
- return rc;
-}
-
-static const struct efx_mtd_ops siena_mtd_ops = {
- .read = siena_mtd_read,
- .erase = siena_mtd_erase,
- .write = siena_mtd_write,
- .sync = siena_mtd_sync,
-};
-
-struct siena_nvram_type_info {
- int port;
- const char *name;
-};
+ if (list_empty(&efx->mtd_list))
+ return;
-static const struct siena_nvram_type_info siena_nvram_types[] = {
- [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
- [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
- [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
-};
+ parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
+ node);
-static int siena_mtd_probe_partition(struct efx_nic *efx,
- struct efx_mtd *efx_mtd,
- unsigned int part_id,
- unsigned int type)
-{
- struct efx_mtd_partition *part = &efx_mtd->part[part_id];
- const struct siena_nvram_type_info *info;
- size_t size, erase_size;
- bool protected;
- int rc;
-
- if (type >= ARRAY_SIZE(siena_nvram_types) ||
- siena_nvram_types[type].name == NULL)
- return -ENODEV;
-
- info = &siena_nvram_types[type];
-
- if (info->port != efx_port_num(efx))
- return -ENODEV;
-
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
- if (rc)
- return rc;
- if (protected)
- return -ENODEV; /* hide it */
-
- part->mcdi.nvram_type = type;
- part->type_name = info->name;
-
- part->mtd.type = MTD_NORFLASH;
- part->mtd.flags = MTD_CAP_NORFLASH;
- part->mtd.size = size;
- part->mtd.erasesize = erase_size;
+ list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+ efx_mtd_remove_partition(part);
- return 0;
+ kfree(parts);
}
-static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
- struct efx_mtd *efx_mtd)
+void efx_mtd_rename(struct efx_nic *efx)
{
struct efx_mtd_partition *part;
- uint16_t fw_subtype_list[
- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
- int rc;
-
- rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
- if (rc)
- return rc;
-
- efx_for_each_partition(part, efx_mtd)
- part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
-
- return 0;
-}
-
-static int siena_mtd_probe(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
- u32 nvram_types;
- unsigned int type;
ASSERT_RTNL();
- rc = efx_mcdi_nvram_types(efx, &nvram_types);
- if (rc)
- return rc;
-
- efx_mtd = kzalloc(sizeof(*efx_mtd) +
- hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->name = "Siena NVRAM manager";
-
- efx_mtd->ops = &siena_mtd_ops;
-
- type = 0;
- efx_mtd->n_parts = 0;
-
- while (nvram_types != 0) {
- if (nvram_types & 1) {
- rc = siena_mtd_probe_partition(efx, efx_mtd,
- efx_mtd->n_parts, type);
- if (rc == 0)
- efx_mtd->n_parts++;
- else if (rc != -ENODEV)
- goto fail;
- }
- type++;
- nvram_types >>= 1;
- }
-
- rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
- if (rc)
- goto fail;
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
-fail:
- if (rc)
- kfree(efx_mtd);
- return rc;
+ list_for_each_entry(part, &efx->mtd_list, node)
+ efx->type->mtd_rename(part);
}
-
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f4c7e6b6774..b172ed13305 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -27,9 +27,11 @@
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
+#include <linux/mtd/mtd.h>
#include "enum.h"
#include "bitfield.h"
+#include "filter.h"
/**************************************************************************
*
@@ -37,7 +39,7 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "3.2"
+#define EFX_DRIVER_VERSION "4.0"
#ifdef DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -93,21 +95,36 @@ struct efx_ptp_data;
struct efx_self_tests;
/**
- * struct efx_special_buffer - An Efx special buffer
- * @addr: CPU base address of the buffer
+ * struct efx_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
- * @index: Buffer index within controller;s buffer table
- * @entries: Number of buffer table entries
*
- * Special buffers are used for the event queues and the TX and RX
- * descriptor queues for each channel. They are *not* used for the
- * actual transmit and receive buffers.
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
*/
-struct efx_special_buffer {
+struct efx_buffer {
void *addr;
dma_addr_t dma_addr;
unsigned int len;
+};
+
+/**
+ * struct efx_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct efx_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that). On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves. On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct efx_special_buffer {
+ struct efx_buffer buf;
unsigned int index;
unsigned int entries;
};
@@ -118,6 +135,7 @@ struct efx_special_buffer {
* freed when descriptor completes
* @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
* freed when descriptor completes.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -129,7 +147,10 @@ struct efx_tx_buffer {
const struct sk_buff *skb;
void *heap_buf;
};
- dma_addr_t dma_addr;
+ union {
+ efx_qword_t option;
+ dma_addr_t dma_addr;
+ };
unsigned short flags;
unsigned short len;
unsigned short unmap_len;
@@ -138,6 +159,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
+#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -169,6 +191,7 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
+ * @merge_events: Number of TX merged completion events
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -205,6 +228,7 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
+ unsigned int merge_events;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -244,6 +268,7 @@ struct efx_rx_buffer {
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
#define EFX_RX_PKT_TCP 0x0040
+#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
/**
* struct efx_rx_page_state - Page-based rx buffer state
@@ -271,13 +296,14 @@ struct efx_rx_page_state {
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
- * @enabled: Receive queue enabled indicator.
+ * @refill_enabled: Enable refill whenever fill level is low
* @flush_pending: Set when a RX flush is pending. Has the same lifetime as
* @rxq_flush_pending.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
- * @scatter_n: Number of buffers used by current packet
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
* @page_ring: The ring to store DMA mapped pages for reuse.
* @page_add: Counter to calculate the write pointer for the recycle ring.
* @page_remove: Counter to calculate the read pointer for the recycle ring.
@@ -302,13 +328,14 @@ struct efx_rx_queue {
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
unsigned int ptr_mask;
- bool enabled;
+ bool refill_enabled;
bool flush_pending;
unsigned int added_count;
unsigned int notified_count;
unsigned int removed_count;
unsigned int scatter_n;
+ unsigned int scatter_len;
struct page **page_ring;
unsigned int page_add;
unsigned int page_remove;
@@ -325,22 +352,6 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-/**
- * struct efx_buffer - An Efx general-purpose buffer
- * @addr: host base address of the buffer
- * @dma_addr: DMA base address of the buffer
- * @len: Buffer length, in bytes
- *
- * The NIC uses these buffers for its interrupt status registers and
- * MAC stats dumps.
- */
-struct efx_buffer {
- void *addr;
- dma_addr_t dma_addr;
- unsigned int len;
-};
-
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -357,12 +368,12 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
- * @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
@@ -378,6 +389,8 @@ enum efx_rx_alloc_method {
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
* lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -389,12 +402,12 @@ struct efx_channel {
struct efx_nic *efx;
int channel;
const struct efx_channel_type *type;
+ bool eventq_init;
bool enabled;
int irq;
unsigned int irq_moderation;
struct net_device *napi_dev;
struct napi_struct napi_str;
- bool work_pending;
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
@@ -414,6 +427,8 @@ struct efx_channel {
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
unsigned int n_rx_nodesc_trunc;
+ unsigned int n_rx_merge_events;
+ unsigned int n_rx_merge_packets;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -423,6 +438,21 @@ struct efx_channel {
};
/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+ struct efx_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
+/**
* struct efx_channel_type - distinguishes traffic and extra channels
* @handle_no_channel: Handle failure to allocate an extra channel
* @pre_probe: Set up extra state prior to initialisation
@@ -579,75 +609,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
return !!(mode & ~PHY_MODE_TX_DISABLED);
}
-/*
- * Efx extended statistics
- *
- * Not all statistics are provided by all supported MACs. The purpose
- * is this structure is to contain the raw statistics provided by each
- * MAC.
+/**
+ * struct efx_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ * it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
*/
-struct efx_mac_stats {
- u64 tx_bytes;
- u64 tx_good_bytes;
- u64 tx_bad_bytes;
- u64 tx_packets;
- u64 tx_bad;
- u64 tx_pause;
- u64 tx_control;
- u64 tx_unicast;
- u64 tx_multicast;
- u64 tx_broadcast;
- u64 tx_lt64;
- u64 tx_64;
- u64 tx_65_to_127;
- u64 tx_128_to_255;
- u64 tx_256_to_511;
- u64 tx_512_to_1023;
- u64 tx_1024_to_15xx;
- u64 tx_15xx_to_jumbo;
- u64 tx_gtjumbo;
- u64 tx_collision;
- u64 tx_single_collision;
- u64 tx_multiple_collision;
- u64 tx_excessive_collision;
- u64 tx_deferred;
- u64 tx_late_collision;
- u64 tx_excessive_deferred;
- u64 tx_non_tcpudp;
- u64 tx_mac_src_error;
- u64 tx_ip_src_error;
- u64 rx_bytes;
- u64 rx_good_bytes;
- u64 rx_bad_bytes;
- u64 rx_packets;
- u64 rx_good;
- u64 rx_bad;
- u64 rx_pause;
- u64 rx_control;
- u64 rx_unicast;
- u64 rx_multicast;
- u64 rx_broadcast;
- u64 rx_lt64;
- u64 rx_64;
- u64 rx_65_to_127;
- u64 rx_128_to_255;
- u64 rx_256_to_511;
- u64 rx_512_to_1023;
- u64 rx_1024_to_15xx;
- u64 rx_15xx_to_jumbo;
- u64 rx_gtjumbo;
- u64 rx_bad_lt64;
- u64 rx_bad_64_to_15xx;
- u64 rx_bad_15xx_to_jumbo;
- u64 rx_bad_gtjumbo;
- u64 rx_overflow;
- u64 rx_missed;
- u64 rx_false_carrier;
- u64 rx_symbol_error;
- u64 rx_align_error;
- u64 rx_length_error;
- u64 rx_internal_error;
- u64 rx_good_lt64;
+struct efx_hw_stat_desc {
+ const char *name;
+ u16 dma_width;
+ u16 offset;
};
/* Number of bits used in a multicast filter hash address */
@@ -662,7 +634,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_filter_state;
struct efx_vf;
struct vfdi_status;
@@ -672,7 +643,6 @@ struct vfdi_status;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
- * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
@@ -689,7 +659,7 @@ struct vfdi_status;
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
- * @channel_name: Names for channels and their IRQs
+ * @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
@@ -707,17 +677,25 @@ struct vfdi_status;
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
* for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ * (valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
* @selftest_work: Work item for asynchronous self-test
* @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state
+ * @mcdi: Management-Controller-to-Driver Interface state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
@@ -737,8 +715,10 @@ struct vfdi_status;
* @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state
- * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
- * @multicast_hash: Multicast hash table
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ * Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ * Protected by @mac_lock.
* @wanted_fc: Wanted flow control flags
* @fc_disable: When non-zero flow control is disabled. Typically used to
* ensure that network back pressure doesn't delay dma queue flushes.
@@ -747,7 +727,12 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
+ * @filter_lock: Filter table lock
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
+ * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
* @rxq_flush_outstanding: Count of number of RX flushes started but not yet
@@ -771,12 +756,8 @@ struct vfdi_status;
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
* field is used by efx_test_interrupts() to verify that an
* interrupt has occurred.
- * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
- * @mac_stats: MAC statistics. These include all statistics the MACs
- * can provide. Generic code converts these into a standard
- * &struct net_device_stats.
- * @stats_lock: Statistics update lock. Serialises statistics fetches
- * and access to @mac_stats.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ * efx_nic_type::{update,start,stop}_stats.
*
* This is stored in the private area of the &struct net_device.
*/
@@ -788,7 +769,6 @@ struct efx_nic {
unsigned int port_num;
const struct efx_nic_type *type;
int legacy_irq;
- bool legacy_irq_enabled;
bool eeh_disabled_legacy_irq;
struct workqueue_struct *workqueue;
char workqueue_name[16];
@@ -806,7 +786,7 @@ struct efx_nic {
unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS];
- char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
@@ -819,6 +799,8 @@ struct efx_nic {
unsigned rx_dc_base;
unsigned sram_lim_qw;
unsigned next_buffer_table;
+
+ unsigned int max_channels;
unsigned n_channels;
unsigned n_rx_channels;
unsigned rss_spread;
@@ -830,6 +812,9 @@ struct efx_nic {
unsigned int rx_page_buf_step;
unsigned int rx_bufs_per_page;
unsigned int rx_pages_per_batch;
+ unsigned int rx_prefix_size;
+ int rx_packet_hash_offset;
+ int rx_packet_len_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -837,6 +822,7 @@ struct efx_nic {
unsigned int_error_count;
unsigned long int_error_expire;
+ bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
unsigned irq_level;
@@ -847,6 +833,7 @@ struct efx_nic {
#endif
void *nic_data;
+ struct efx_mcdi_data *mcdi;
struct mutex mac_lock;
struct work_struct mac_work;
@@ -868,7 +855,7 @@ struct efx_nic {
struct efx_link_state link_state;
unsigned int n_link_state_changes;
- bool promiscuous;
+ bool unicast_filter;
union efx_multicast_hash multicast_hash;
u8 wanted_fc;
unsigned fc_disable;
@@ -879,9 +866,14 @@ struct efx_nic {
void *loopback_selftest;
- struct efx_filter_state *filter_state;
+ spinlock_t filter_lock;
+ void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned int rps_expire_index;
+#endif
- atomic_t drain_pending;
+ atomic_t active_queues;
atomic_t rxq_flush_pending;
atomic_t rxq_flush_outstanding;
wait_queue_head_t flush_wq;
@@ -907,8 +899,6 @@ struct efx_nic {
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
spinlock_t biu_lock;
int last_irq_cpu;
- unsigned n_rx_nodesc_drop_cnt;
- struct efx_mac_stats mac_stats;
spinlock_t stats_lock;
};
@@ -922,8 +912,17 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
return efx->port_num;
}
+struct efx_mtd_partition {
+ struct list_head node;
+ struct mtd_info mtd;
+ const char *dev_type_name;
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
* @init: Initialise the controller
@@ -938,47 +937,118 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @probe_port: Probe the MAC and PHY
* @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
* @prepare_flush: Prepare the hardware for flushing the DMA queues
- * @finish_flush: Clean up after flushing the DMA queues
- * @update_stats: Update statistics not provided by event handling
+ * (for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ * architecture)
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ * Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
* @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
* to the hardware. Serialised by the mac_lock.
* @check_mac_fault: Check MAC fault state. True if fault present.
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
- * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
+ * @test_chip: Test registers. May use efx_farch_test_registers(), and is
* expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
+ * @mcdi_request: Send an MCDI request with the given header and SDU.
+ * The SDU length may be any value from 0 up to the protocol-
+ * defined maximum, but its buffer will be padded to a multiple
+ * of 4 bytes.
+ * @mcdi_poll_response: Test whether an MCDI response is available.
+ * @mcdi_read_response: Read the MCDI response PDU. The offset will
+ * be a multiple of 4. The length may not be, but the buffer
+ * will be padded so it is safe to round up.
+ * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
+ * return an appropriate error code for aborting any current
+ * request; otherwise return 0.
+ * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
+ * be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
+ * queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
+ * a pointer to the &struct efx_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
+ * is a pointer to the &struct efx_nic.
+ * @tx_probe: Allocate resources for TX queue
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: remove RX filters by priority
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_insert: Add or replace a filter for RFS. This must be
+ * atomic. The hardware change may be asynchronous but should
+ * not be delayed for long. It may fail if this can't be done
+ * atomically.
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ * This must check whether the specified table entry is used by RFS
+ * and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ * using efx_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition. This
+ * also notifies the driver that a writer has finished using this
+ * partition.
* @revision: Hardware architecture revision
- * @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
* @buf_tbl_base: Buffer table base address
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_hash_size: Size of hash at start of RX packet
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
- * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
- * @phys_addr_channels: Number of channels with physically addressed
- * descriptors
* @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
+ * @mcdi_max_ver: Maximum MCDI version supported
*/
struct efx_nic_type {
+ unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx);
- void (*dimension_resources)(struct efx_nic *efx);
+ int (*dimension_resources)(struct efx_nic *efx);
void (*fini)(struct efx_nic *efx);
void (*monitor)(struct efx_nic *efx);
enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -987,14 +1057,18 @@ struct efx_nic_type {
int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
- void (*update_stats)(struct efx_nic *efx);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx);
+ void (*prepare_enable_fc_tx)(struct efx_nic *efx);
int (*reconfigure_mac)(struct efx_nic *efx);
bool (*check_mac_fault)(struct efx_nic *efx);
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
@@ -1002,22 +1076,90 @@ struct efx_nic_type {
void (*resume_wol)(struct efx_nic *efx);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
+ void (*mcdi_request)(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len);
+ bool (*mcdi_poll_response)(struct efx_nic *efx);
+ void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
+ size_t pdu_offset, size_t pdu_len);
+ int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*irq_enable_master)(struct efx_nic *efx);
+ void (*irq_test_generate)(struct efx_nic *efx);
+ void (*irq_disable_non_ev)(struct efx_nic *efx);
+ irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+ irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+ int (*tx_probe)(struct efx_tx_queue *tx_queue);
+ void (*tx_init)(struct efx_tx_queue *tx_queue);
+ void (*tx_remove)(struct efx_tx_queue *tx_queue);
+ void (*tx_write)(struct efx_tx_queue *tx_queue);
+ void (*rx_push_indir_table)(struct efx_nic *efx);
+ int (*rx_probe)(struct efx_rx_queue *rx_queue);
+ void (*rx_init)(struct efx_rx_queue *rx_queue);
+ void (*rx_remove)(struct efx_rx_queue *rx_queue);
+ void (*rx_write)(struct efx_rx_queue *rx_queue);
+ void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ int (*ev_probe)(struct efx_channel *channel);
+ int (*ev_init)(struct efx_channel *channel);
+ void (*ev_fini)(struct efx_channel *channel);
+ void (*ev_remove)(struct efx_channel *channel);
+ int (*ev_process)(struct efx_channel *channel, int quota);
+ void (*ev_read_ack)(struct efx_channel *channel);
+ void (*ev_test_generate)(struct efx_channel *channel);
+ int (*filter_table_probe)(struct efx_nic *efx);
+ void (*filter_table_restore)(struct efx_nic *efx);
+ void (*filter_table_remove)(struct efx_nic *efx);
+ void (*filter_update_rx_scatter)(struct efx_nic *efx);
+ s32 (*filter_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+ int (*filter_remove_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+ int (*filter_get_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+ void (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_count_rx_used)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
+ s32 (*filter_get_rx_ids)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+ s32 (*filter_rfs_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+ bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+#ifdef CONFIG_SFC_MTD
+ int (*mtd_probe)(struct efx_nic *efx);
+ void (*mtd_rename)(struct efx_mtd_partition *part);
+ int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+ void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int revision;
- unsigned int mem_map_size;
unsigned int txd_ptr_tbl_base;
unsigned int rxd_ptr_tbl_base;
unsigned int buf_tbl_base;
unsigned int evq_ptr_tbl_base;
unsigned int evq_rptr_tbl_base;
u64 max_dma_mask;
- unsigned int rx_buffer_hash_size;
+ unsigned int rx_prefix_size;
+ unsigned int rx_hash_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
+ bool always_rx_scatter;
unsigned int max_interrupt_mode;
- unsigned int phys_addr_channels;
unsigned int timer_period_max;
netdev_features_t offload_features;
+ int mcdi_max_ver;
+ unsigned int max_rx_ip_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 56ed3bc71e0..e7dbd2dd202 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,295 +19,22 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
/**************************************************************************
*
- * Configurable values
- *
- **************************************************************************
- */
-
-/* This is set to 16 for a good reason. In summary, if larger than
- * 16, the descriptor cache holds more than a default socket
- * buffer's worth of packets (for UDP we can only have at most one
- * socket buffer's worth outstanding). This combined with the fact
- * that we only get 1 TX event per descriptor cache means the NIC
- * goes idle.
- */
-#define TX_DC_ENTRIES 16
-#define TX_DC_ENTRIES_ORDER 1
-
-#define RX_DC_ENTRIES 64
-#define RX_DC_ENTRIES_ORDER 3
-
-/* If EFX_MAX_INT_ERRORS internal errors occur within
- * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
- * disable it.
- */
-#define EFX_INT_ERROR_EXPIRE 3600
-#define EFX_MAX_INT_ERRORS 5
-
-/* Depth of RX flush request fifo */
-#define EFX_RX_FLUSH_COUNT 4
-
-/* Driver generated events */
-#define _EFX_CHANNEL_MAGIC_TEST 0x000101
-#define _EFX_CHANNEL_MAGIC_FILL 0x000102
-#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
-#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
-
-#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
-#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
-
-#define EFX_CHANNEL_MAGIC_TEST(_channel) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
-#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
- (_tx_queue)->queue)
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic);
-
-/**************************************************************************
- *
- * Solarstorm hardware access
- *
- **************************************************************************/
-
-static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
- unsigned int index)
-{
- efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
- value, index);
-}
-
-/* Read the current event from the event queue */
-static inline efx_qword_t *efx_event(struct efx_channel *channel,
- unsigned int index)
-{
- return ((efx_qword_t *) (channel->eventq.addr)) +
- (index & channel->eventq_mask);
-}
-
-/* See if an event is present
- *
- * We check both the high and low dword of the event for all ones. We
- * wrote all ones when we cleared the event, and no valid event can
- * have all ones in either its high or low dwords. This approach is
- * robust against reordering.
- *
- * Note that using a single 64-bit comparison is incorrect; even
- * though the CPU read will be atomic, the DMA write may not be.
- */
-static inline int efx_event_present(efx_qword_t *event)
-{
- return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
- EFX_DWORD_IS_ALL_ONES(event->dword[1]));
-}
-
-static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
- const efx_oword_t *mask)
-{
- return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
- ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
-}
-
-int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs)
-{
- unsigned address = 0, i, j;
- efx_oword_t mask, imask, original, reg, buf;
-
- for (i = 0; i < n_regs; ++i) {
- address = regs[i].address;
- mask = imask = regs[i].mask;
- EFX_INVERT_OWORD(imask);
-
- efx_reado(efx, &original, address);
-
- /* bit sweep on and off */
- for (j = 0; j < 128; j++) {
- if (!EFX_EXTRACT_OWORD32(mask, j, j))
- continue;
-
- /* Test this testable bit can be set in isolation */
- EFX_AND_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 1);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
-
- /* Test this testable bit can be cleared in isolation */
- EFX_OR_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 0);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
- }
-
- efx_writeo(efx, &original, address);
- }
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev,
- "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
- " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
- EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
- return -EIO;
-}
-
-/**************************************************************************
- *
- * Special buffer handling
- * Special buffers are used for event queues and the TX and RX
- * descriptor rings.
- *
- *************************************************************************/
-
-/*
- * Initialise a special buffer
- *
- * This will define a buffer (previously allocated via
- * efx_alloc_special_buffer()) in the buffer table, allowing
- * it to be used for event queues, descriptor rings etc.
- */
-static void
-efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_qword_t buf_desc;
- unsigned int index;
- dma_addr_t dma_addr;
- int i;
-
- EFX_BUG_ON_PARANOID(!buffer->addr);
-
- /* Write buffer descriptors to NIC */
- for (i = 0; i < buffer->entries; i++) {
- index = buffer->index + i;
- dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
- netif_dbg(efx, probe, efx->net_dev,
- "mapping special buffer %d at %llx\n",
- index, (unsigned long long)dma_addr);
- EFX_POPULATE_QWORD_3(buf_desc,
- FRF_AZ_BUF_ADR_REGION, 0,
- FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
- FRF_AZ_BUF_OWNER_ID_FBUF, 0);
- efx_write_buf_tbl(efx, &buf_desc, index);
- }
-}
-
-/* Unmaps a buffer and clears the buffer table entries */
-static void
-efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_oword_t buf_tbl_upd;
- unsigned int start = buffer->index;
- unsigned int end = (buffer->index + buffer->entries - 1);
-
- if (!buffer->entries)
- return;
-
- netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
- buffer->index, buffer->index + buffer->entries - 1);
-
- EFX_POPULATE_OWORD_4(buf_tbl_upd,
- FRF_AZ_BUF_UPD_CMD, 0,
- FRF_AZ_BUF_CLR_CMD, 1,
- FRF_AZ_BUF_CLR_END_ID, end,
- FRF_AZ_BUF_CLR_START_ID, start);
- efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
-}
-
-/*
- * Allocate a new special buffer
- *
- * This allocates memory for a new buffer, clears it and allocates a
- * new buffer ID range. It does not write into the buffer table.
- *
- * This call will allocate 4KB buffers, since 8KB buffers can't be
- * used for event queues and descriptor rings.
- */
-static int efx_alloc_special_buffer(struct efx_nic *efx,
- struct efx_special_buffer *buffer,
- unsigned int len)
-{
- len = ALIGN(len, EFX_BUF_SIZE);
-
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, GFP_KERNEL);
- if (!buffer->addr)
- return -ENOMEM;
- buffer->len = len;
- buffer->entries = len / EFX_BUF_SIZE;
- BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
-
- /* Select new buffer ID */
- buffer->index = efx->next_buffer_table;
- efx->next_buffer_table += buffer->entries;
-#ifdef CONFIG_SFC_SRIOV
- BUG_ON(efx_sriov_enabled(efx) &&
- efx->vf_buftbl_base < efx->next_buffer_table);
-#endif
-
- netif_dbg(efx, probe, efx->net_dev,
- "allocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- return 0;
-}
-
-static void
-efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- if (!buffer->addr)
- return;
-
- netif_dbg(efx, hw, efx->net_dev,
- "deallocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, buffer->len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
- buffer->dma_addr);
- buffer->addr = NULL;
- buffer->entries = 0;
-}
-
-/**************************************************************************
- *
* Generic buffer handling
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len)
+ unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr,
- GFP_ATOMIC | __GFP_ZERO);
+ buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -323,1057 +50,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
}
}
-/**************************************************************************
- *
- * TX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified transmit descriptor in the TX
- * descriptor queue belonging to the specified channel.
- */
-static inline efx_qword_t *
-efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
-}
-
-/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
-static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
-{
- unsigned write_ptr;
- efx_dword_t reg;
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(tx_queue->efx, &reg,
- FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
-}
-
-/* Write pointer and first descriptor for TX descriptor ring */
-static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
- const efx_qword_t *txd)
-{
- unsigned write_ptr;
- efx_oword_t reg;
-
- BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
- BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
- FRF_AZ_TX_DESC_WPTR, write_ptr);
- reg.qword[0] = *txd;
- efx_writeo_page(tx_queue->efx, &reg,
- FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
-}
-
-static inline bool
-efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
-{
- unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
- if (empty_read_count == 0)
- return false;
-
- tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
- && tx_queue->write_count - write_count == 1;
-}
-
-/* For each entry inserted into the software descriptor ring, create a
- * descriptor in the hardware TX descriptor ring (in host memory), and
- * write a doorbell.
- */
-void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
-{
-
- struct efx_tx_buffer *buffer;
- efx_qword_t *txd;
- unsigned write_ptr;
- unsigned old_write_count = tx_queue->write_count;
-
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
-
- do {
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[write_ptr];
- txd = efx_tx_desc(tx_queue, write_ptr);
- ++tx_queue->write_count;
-
- /* Create TX descriptor ring entry */
- BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
- EFX_POPULATE_QWORD_4(*txd,
- FSF_AZ_TX_KER_CONT,
- buffer->flags & EFX_TX_BUF_CONT,
- FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
- FSF_AZ_TX_KER_BUF_REGION, 0,
- FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
- } while (tx_queue->write_count != tx_queue->insert_count);
-
- wmb(); /* Ensure descriptors are written before they are fetched */
-
- if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
- txd = efx_tx_desc(tx_queue,
- old_write_count & tx_queue->ptr_mask);
- efx_push_tx_desc(tx_queue, txd);
- ++tx_queue->pushes;
- } else {
- efx_notify_tx_desc(tx_queue);
- }
-}
-
-/* Allocate hardware resources for a TX queue */
-int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned entries;
-
- entries = tx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &tx_queue->txd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t reg;
-
- /* Pin TX descriptor ring */
- efx_init_special_buffer(efx, &tx_queue->txd);
-
- /* Push TX descriptor ring to card */
- EFX_POPULATE_OWORD_10(reg,
- FRF_AZ_TX_DESCQ_EN, 1,
- FRF_AZ_TX_ISCSI_DDIG_EN, 0,
- FRF_AZ_TX_ISCSI_HDIG_EN, 0,
- FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
- FRF_AZ_TX_DESCQ_EVQ_ID,
- tx_queue->channel->channel,
- FRF_AZ_TX_DESCQ_OWNER_ID, 0,
- FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
- FRF_AZ_TX_DESCQ_SIZE,
- __ffs(tx_queue->txd.entries),
- FRF_AZ_TX_DESCQ_TYPE, 0,
- FRF_BZ_TX_NON_IP_DROP_DIS, 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
- !csum);
- }
-
- efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- /* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
-
- efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
- __clear_bit_le(tx_queue->queue, &reg);
- else
- __set_bit_le(tx_queue->queue, &reg);
- efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_1(reg,
- FRF_BZ_TX_PACE,
- (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
- FFE_BZ_TX_PACE_OFF :
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
- tx_queue->queue);
- }
-}
-
-static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_flush_descq;
-
- WARN_ON(atomic_read(&tx_queue->flush_outstanding));
- atomic_set(&tx_queue->flush_outstanding, 1);
-
- EFX_POPULATE_OWORD_2(tx_flush_descq,
- FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
- efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_desc_ptr;
-
- /* Remove TX descriptor ring from card */
- EFX_ZERO_OWORD(tx_desc_ptr);
- efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- /* Unpin TX descriptor ring */
- efx_fini_special_buffer(efx, &tx_queue->txd);
-}
-
-/* Free buffers backing TX queue */
-void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
-{
- efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
-}
-
-/**************************************************************************
- *
- * RX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified descriptor in the RX descriptor queue */
-static inline efx_qword_t *
-efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
-}
-
-/* This creates an entry in the RX descriptor queue */
-static inline void
-efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_rx_buffer *rx_buf;
- efx_qword_t *rxd;
-
- rxd = efx_rx_desc(rx_queue, index);
- rx_buf = efx_rx_buffer(rx_queue, index);
- EFX_POPULATE_QWORD_3(*rxd,
- FSF_AZ_RX_KER_BUF_SIZE,
- rx_buf->len -
- rx_queue->efx->type->rx_buffer_padding,
- FSF_AZ_RX_KER_BUF_REGION, 0,
- FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
-}
-
-/* This writes to the RX_DESC_WPTR register for the specified receive
- * descriptor ring.
- */
-void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_dword_t reg;
- unsigned write_ptr;
-
- while (rx_queue->notified_count != rx_queue->added_count) {
- efx_build_rx_desc(
- rx_queue,
- rx_queue->notified_count & rx_queue->ptr_mask);
- ++rx_queue->notified_count;
- }
-
- wmb();
- write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
- efx_rx_queue_index(rx_queue));
-}
-
-int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned entries;
-
- entries = rx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &rx_queue->rxd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
- bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
- bool iscsi_digest_en = is_b0;
- bool jumbo_en;
-
- /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
- * DMA to continue after a PCIe page boundary (and scattering
- * is not possible). In Falcon B0 and Siena, it enables
- * scatter.
- */
- jumbo_en = !is_b0 || efx->rx_scatter;
-
- netif_dbg(efx, hw, efx->net_dev,
- "RX queue %d ring in special buffers %d-%d\n",
- efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
- rx_queue->rxd.index + rx_queue->rxd.entries - 1);
-
- rx_queue->scatter_n = 0;
-
- /* Pin RX descriptor ring */
- efx_init_special_buffer(efx, &rx_queue->rxd);
-
- /* Push RX descriptor ring to card */
- EFX_POPULATE_OWORD_10(rx_desc_ptr,
- FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
- FRF_AZ_RX_DESCQ_EVQ_ID,
- efx_rx_queue_channel(rx_queue)->channel,
- FRF_AZ_RX_DESCQ_OWNER_ID, 0,
- FRF_AZ_RX_DESCQ_LABEL,
- efx_rx_queue_index(rx_queue),
- FRF_AZ_RX_DESCQ_SIZE,
- __ffs(rx_queue->rxd.entries),
- FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
- FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
- FRF_AZ_RX_DESCQ_EN, 1);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-}
-
-static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_oword_t rx_flush_descq;
-
- EFX_POPULATE_OWORD_2(rx_flush_descq,
- FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_RX_FLUSH_DESCQ,
- efx_rx_queue_index(rx_queue));
- efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
-
- /* Remove RX descriptor ring from card */
- EFX_ZERO_OWORD(rx_desc_ptr);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-
- /* Unpin RX descriptor ring */
- efx_fini_special_buffer(efx, &rx_queue->rxd);
-}
-
-/* Free buffers backing RX queue */
-void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
-{
- efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
-}
-
-/**************************************************************************
- *
- * Flush handling
- *
- **************************************************************************/
-
-/* efx_nic_flush_queues() must be woken up when all flushes are completed,
- * or more RX flushes can be kicked off.
- */
-static bool efx_flush_wake(struct efx_nic *efx)
-{
- /* Ensure that all updates are visible to efx_nic_flush_queues() */
- smp_mb();
-
- return (atomic_read(&efx->drain_pending) == 0 ||
- (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
- && atomic_read(&efx->rxq_flush_pending) > 0));
-}
-
-static bool efx_check_tx_flush_complete(struct efx_nic *efx)
-{
- bool i = true;
- efx_oword_t txd_ptr_tbl;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_reado_table(efx, &txd_ptr_tbl,
- FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
- if (EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_FLUSH) ||
- EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_EN)) {
- netif_dbg(efx, hw, efx->net_dev,
- "flush did not complete on TXQ %d\n",
- tx_queue->queue);
- i = false;
- } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
- 1, 0)) {
- /* The flush is complete, but we didn't
- * receive a flush completion event
- */
- netif_dbg(efx, hw, efx->net_dev,
- "flush complete on TXQ %d, so drain "
- "the queue\n", tx_queue->queue);
- /* Don't need to increment drain_pending as it
- * has already been incremented for the queues
- * which did not drain
- */
- efx_magic_event(channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(
- tx_queue));
- }
- }
- }
-
- return i;
-}
-
-/* Flush all the transmit queues, and continue flushing receive queues until
- * they're all flushed. Wait for the DRAIN events to be recieved so that there
- * are no more RX and TX events left on any channel. */
-int efx_nic_flush_queues(struct efx_nic *efx)
-{
- unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int rc = 0;
-
- efx->type->prepare_flush(efx);
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- efx_flush_tx_queue(tx_queue);
- }
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- }
- }
-
- while (timeout && atomic_read(&efx->drain_pending) > 0) {
- /* If SRIOV is enabled, then offload receive queue flushing to
- * the firmware (though we will still have to poll for
- * completion). If that fails, fall back to the old scheme.
- */
- if (efx_sriov_enabled(efx)) {
- rc = efx_mcdi_flush_rxqs(efx);
- if (!rc)
- goto wait;
- }
-
- /* The hardware supports four concurrent rx flushes, each of
- * which may need to be retried if there is an outstanding
- * descriptor fetch
- */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (atomic_read(&efx->rxq_flush_outstanding) >=
- EFX_RX_FLUSH_COUNT)
- break;
-
- if (rx_queue->flush_pending) {
- rx_queue->flush_pending = false;
- atomic_dec(&efx->rxq_flush_pending);
- atomic_inc(&efx->rxq_flush_outstanding);
- efx_flush_rx_queue(rx_queue);
- }
- }
- }
-
- wait:
- timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
- timeout);
- }
-
- if (atomic_read(&efx->drain_pending) &&
- !efx_check_tx_flush_complete(efx)) {
- netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
- "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
- atomic_read(&efx->rxq_flush_outstanding),
- atomic_read(&efx->rxq_flush_pending));
- rc = -ETIMEDOUT;
-
- atomic_set(&efx->drain_pending, 0);
- atomic_set(&efx->rxq_flush_pending, 0);
- atomic_set(&efx->rxq_flush_outstanding, 0);
- }
-
- efx->type->finish_flush(efx);
-
- return rc;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- * Event queues are processed by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Update a channel's event queue's read pointer (RPTR) register
- *
- * This writes the EVQ_RPTR_REG register for the specified channel's
- * event queue.
- */
-void efx_nic_eventq_read_ack(struct efx_channel *channel)
-{
- efx_dword_t reg;
- struct efx_nic *efx = channel->efx;
-
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
- channel->eventq_read_ptr & channel->eventq_mask);
-
- /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
- * of 4 bytes, but it is really 16 bytes just like later revisions.
- */
- efx_writed(efx, &reg,
- efx->type->evq_rptr_tbl_base +
- FR_BZ_EVQ_RPTR_STEP * channel->channel);
-}
-
-/* Use HW to insert a SW defined event */
-void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event)
-{
- efx_oword_t drv_ev_reg;
-
- BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
- FRF_AZ_DRV_EV_DATA_WIDTH != 64);
- drv_ev_reg.u32[0] = event->u32[0];
- drv_ev_reg.u32[1] = event->u32[1];
- drv_ev_reg.u32[2] = 0;
- drv_ev_reg.u32[3] = 0;
- EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
- efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
-}
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic)
-{
- efx_qword_t event;
-
- EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
- FSE_AZ_EV_CODE_DRV_GEN_EV,
- FSF_AZ_DRV_GEN_EV_MAGIC, magic);
- efx_generate_event(channel->efx, channel->channel, &event);
-}
-
-/* Handle a transmit completion event
- *
- * The NIC batches TX completion events; the message we receive is of
- * the form "complete all TX events up to this index".
- */
-static int
-efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
-{
- unsigned int tx_ev_desc_ptr;
- unsigned int tx_ev_q_label;
- struct efx_tx_queue *tx_queue;
- struct efx_nic *efx = channel->efx;
- int tx_packets = 0;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return 0;
-
- if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
- /* Transmit completion */
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
- tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- tx_queue->ptr_mask);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
- /* Rewrite the FIFO write pointer */
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
-
- netif_tx_lock(efx->net_dev);
- efx_notify_tx_desc(tx_queue);
- netif_tx_unlock(efx->net_dev);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
- EFX_WORKAROUND_10727(efx)) {
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else {
- netif_err(efx, tx_err, efx->net_dev,
- "channel %d unexpected TX event "
- EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(*event));
- }
-
- return tx_packets;
-}
-
-/* Detect errors included in the rx_evt_pkt_ok bit. */
-static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
- const efx_qword_t *event)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
- bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
- bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
-
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
- rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
- rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
- rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
- rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
- rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
- rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
- 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
- rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
-
- /* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
- rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
- rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
-
- /* Count errors that are not in MAC stats. Ignore expected
- * checksum errors during self-test. */
- if (rx_ev_frm_trunc)
- ++channel->n_rx_frm_trunc;
- else if (rx_ev_tobe_disc)
- ++channel->n_rx_tobe_disc;
- else if (!efx->loopback_selftest) {
- if (rx_ev_ip_hdr_chksum_err)
- ++channel->n_rx_ip_hdr_chksum_err;
- else if (rx_ev_tcp_udp_chksum_err)
- ++channel->n_rx_tcp_udp_chksum_err;
- }
-
- /* TOBE_DISC is expected on unicast mismatches; don't print out an
- * error message. FRM_TRUNC indicates RXDP dropped the packet due
- * to a FIFO overflow.
- */
-#ifdef DEBUG
- if (rx_ev_other_err && net_ratelimit()) {
- netif_dbg(efx, rx_err, efx->net_dev,
- " RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
- rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
- rx_ev_ip_hdr_chksum_err ?
- " [IP_HDR_CHKSUM_ERR]" : "",
- rx_ev_tcp_udp_chksum_err ?
- " [TCP_UDP_CHKSUM_ERR]" : "",
- rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
- rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
- rx_ev_drib_nib ? " [DRIB_NIB]" : "",
- rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
- rx_ev_pause_frm ? " [PAUSE]" : "");
- }
-#endif
-
- /* The frame must be discarded if any of these are true. */
- return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
- rx_ev_tobe_disc | rx_ev_pause_frm) ?
- EFX_RX_PKT_DISCARD : 0;
-}
-
-/* Handle receive events that are not in-order. Return true if this
- * can be handled as a partial packet discard, false if it's more
- * serious.
- */
-static bool
-efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- unsigned expected, dropped;
-
- if (rx_queue->scatter_n &&
- index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
- rx_queue->ptr_mask)) {
- ++channel->n_rx_nodesc_trunc;
- return true;
- }
-
- expected = rx_queue->removed_count & rx_queue->ptr_mask;
- dropped = (index - expected) & rx_queue->ptr_mask;
- netif_info(efx, rx_err, efx->net_dev,
- "dropped %d events (index=%d expected=%d)\n",
- dropped, index, expected);
-
- efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
- RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
- return false;
-}
-
-/* Handle a packet received event
- *
- * The NIC gives a "discard" flag if it's a unicast packet with the
- * wrong destination address
- * Also "is multicast" and "matches multicast filter" flags can be used to
- * discard non-matching multicast packets.
- */
-static void
-efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
-{
- unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
- unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned expected_ptr;
- bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
- u16 flags;
- struct efx_rx_queue *rx_queue;
- struct efx_nic *efx = channel->efx;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return;
-
- rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
- rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
- channel->channel);
-
- rx_queue = efx_channel_get_rx_queue(channel);
-
- rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
- rx_queue->ptr_mask);
-
- /* Check for partial drops and other errors */
- if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
- unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
- if (rx_ev_desc_ptr != expected_ptr &&
- !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
- return;
-
- /* Discard all pending fragments */
- if (rx_queue->scatter_n) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
- }
-
- /* Return if there is no new fragment */
- if (rx_ev_desc_ptr != expected_ptr)
- return;
-
- /* Discard new fragment if not SOP */
- if (!rx_ev_sop) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- 1, 0, EFX_RX_PKT_DISCARD);
- ++rx_queue->removed_count;
- return;
- }
- }
-
- ++rx_queue->scatter_n;
- if (rx_ev_cont)
- return;
-
- rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
- rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
-
- if (likely(rx_ev_pkt_ok)) {
- /* If packet is marked as OK then we can rely on the
- * hardware checksum and classification.
- */
- flags = 0;
- switch (rx_ev_hdr_type) {
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
- flags |= EFX_RX_PKT_TCP;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
- flags |= EFX_RX_PKT_CSUMMED;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
- case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
- break;
- }
- } else {
- flags = efx_handle_rx_not_ok(rx_queue, event);
- }
-
- /* Detect multicast packets that didn't match the filter */
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- if (rx_ev_mcast_pkt) {
- unsigned int rx_ev_mcast_hash_match =
- EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
-
- if (unlikely(!rx_ev_mcast_hash_match)) {
- ++channel->n_rx_mcast_mismatch;
- flags |= EFX_RX_PKT_DISCARD;
- }
- }
-
- channel->irq_mod_score += 2;
-
- /* Handle received packet */
- efx_rx_packet(rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, rx_ev_byte_cnt, flags);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
-}
-
-/* If this flush done event corresponds to a &struct efx_tx_queue, then
- * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
- * of all transmit completions.
- */
-static void
-efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_tx_queue *tx_queue;
- int qid;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
- qid % EFX_TXQ_TYPES);
- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
- efx_magic_event(tx_queue->channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
- }
- }
-}
-
-/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
- * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
- * the RX queue back to the mask of RX queues in need of flushing.
- */
-static void
-efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- int qid;
- bool failed;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
- failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (qid >= efx->n_channels)
- return;
- channel = efx_get_channel(efx, qid);
- if (!efx_channel_has_rx_queue(channel))
- return;
- rx_queue = efx_channel_get_rx_queue(channel);
-
- if (failed) {
- netif_info(efx, hw, efx->net_dev,
- "RXQ %d flush retry\n", qid);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- } else {
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
- }
- atomic_dec(&efx->rxq_flush_outstanding);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_drain_event(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- WARN_ON(atomic_read(&efx->drain_pending) == 0);
- atomic_dec(&efx->drain_pending);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue =
- efx_channel_has_rx_queue(channel) ?
- efx_channel_get_rx_queue(channel) : NULL;
- unsigned magic, code;
-
- magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
- code = _EFX_CHANNEL_MAGIC_CODE(magic);
-
- if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
- channel->event_test_cpu = raw_smp_processor_id();
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
- /* The queue must be empty, so we won't receive any rx
- * events, so efx_process_channel() won't refill the
- * queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
- rx_queue->enabled = false;
- efx_handle_drain_event(channel);
- } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
- efx_handle_drain_event(channel);
- } else {
- netif_dbg(efx, hw, efx->net_dev, "channel %d received "
- "generated event "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(*event));
- }
-}
-
-static void
-efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int ev_sub_code;
- unsigned int ev_sub_data;
-
- ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
- ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
-
- switch (ev_sub_code) {
- case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_tx_flush_done(efx, event);
- efx_sriov_tx_flush_done(efx, event);
- break;
- case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_rx_flush_done(efx, event);
- efx_sriov_rx_flush_done(efx, event);
- break;
- case FSE_AZ_EVQ_INIT_DONE_EV:
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d EVQ %d initialised\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_SRM_UPD_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d SRAM update done\n", channel->channel);
- break;
- case FSE_AZ_WAKE_UP_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RXQ %d wakeup event\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_TIMER_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RX queue %d timer expired\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AA_RX_RECOVER_EV:
- netif_err(efx, rx_err, efx->net_dev,
- "channel %d seen DRIVER RX_RESET event. "
- "Resetting.\n", channel->channel);
- atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx,
- EFX_WORKAROUND_6555(efx) ?
- RESET_TYPE_RX_RECOVERY :
- RESET_TYPE_DISABLE);
- break;
- case FSE_BZ_RX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, rx_err, efx->net_dev,
- "RX DMA Q %d reports descriptor fetch error."
- " RX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- case FSE_BZ_TX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, tx_err, efx->net_dev,
- "TX DMA Q %d reports descriptor fetch error."
- " TX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- default:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d unknown driver event code %d "
- "data %04x\n", channel->channel, ev_sub_code,
- ev_sub_data);
- break;
- }
-}
-
-int efx_nic_process_eventq(struct efx_channel *channel, int budget)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int read_ptr;
- efx_qword_t event, *p_event;
- int ev_code;
- int tx_packets = 0;
- int spent = 0;
-
- read_ptr = channel->eventq_read_ptr;
-
- for (;;) {
- p_event = efx_event(channel, read_ptr);
- event = *p_event;
-
- if (!efx_event_present(&event))
- /* End of events */
- break;
-
- netif_vdbg(channel->efx, intr, channel->efx->net_dev,
- "channel %d event is "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(event));
-
- /* Clear this event by marking it all ones */
- EFX_SET_QWORD(*p_event);
-
- ++read_ptr;
-
- ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
-
- switch (ev_code) {
- case FSE_AZ_EV_CODE_RX_EV:
- efx_handle_rx_event(channel, &event);
- if (++spent == budget)
- goto out;
- break;
- case FSE_AZ_EV_CODE_TX_EV:
- tx_packets += efx_handle_tx_event(channel, &event);
- if (tx_packets > efx->txq_entries) {
- spent = budget;
- goto out;
- }
- break;
- case FSE_AZ_EV_CODE_DRV_GEN_EV:
- efx_handle_generated_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_DRIVER_EV:
- efx_handle_driver_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_USER_EV:
- efx_sriov_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_MCDI_EV:
- efx_mcdi_process_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_GLOBAL_EV:
- if (efx->type->handle_global_event &&
- efx->type->handle_global_event(channel, &event))
- break;
- /* else fall through */
- default:
- netif_err(channel->efx, hw, channel->efx->net_dev,
- "channel %d unknown event type %d (data "
- EFX_QWORD_FMT ")\n", channel->channel,
- ev_code, EFX_QWORD_VAL(event));
- }
- }
-
-out:
- channel->eventq_read_ptr = read_ptr;
- return spent;
-}
-
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
@@ -1382,323 +58,18 @@ bool efx_nic_event_present(struct efx_channel *channel)
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
-/* Allocate buffer table entries for event queue */
-int efx_nic_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned entries;
-
- entries = channel->eventq_mask + 1;
- return efx_alloc_special_buffer(efx, &channel->eventq,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d event queue in special buffers %d-%d\n",
- channel->channel, channel->eventq.index,
- channel->eventq.index + channel->eventq.entries - 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- EFX_POPULATE_OWORD_3(reg,
- FRF_CZ_TIMER_Q_EN, 1,
- FRF_CZ_HOST_NOTIFY_MODE, 0,
- FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
- }
-
- /* Pin event queue buffer */
- efx_init_special_buffer(efx, &channel->eventq);
-
- /* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.addr, 0xff, channel->eventq.len);
-
- /* Push event queue to card */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AZ_EVQ_EN, 1,
- FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
- FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
-
- efx->type->push_irq_moderation(channel);
-}
-
-void efx_nic_fini_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- /* Remove event queue from card */
- EFX_ZERO_OWORD(reg);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
-
- /* Unpin event queue */
- efx_fini_special_buffer(efx, &channel->eventq);
-}
-
-/* Free buffers backing event queue */
-void efx_nic_remove_eventq(struct efx_channel *channel)
-{
- efx_free_special_buffer(channel->efx, &channel->eventq);
-}
-
-
void efx_nic_event_test_start(struct efx_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
- efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+ channel->efx->type->ev_test_generate(channel);
}
-void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
-{
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_FILL(rx_queue));
-}
-
-/**************************************************************************
- *
- * Hardware interrupts
- * The hardware interrupt handler does very little work; all the event
- * queue processing is carried out by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Enable/disable/generate interrupts */
-static inline void efx_nic_interrupts(struct efx_nic *efx,
- bool enabled, bool force)
-{
- efx_oword_t int_en_reg_ker;
-
- EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
- FRF_AZ_KER_INT_KER, force,
- FRF_AZ_DRV_INT_EN_KER, enabled);
- efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
-}
-
-void efx_nic_enable_interrupts(struct efx_nic *efx)
-{
- EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
- wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
-
- efx_nic_interrupts(efx, true, false);
-}
-
-void efx_nic_disable_interrupts(struct efx_nic *efx)
-{
- /* Disable interrupts */
- efx_nic_interrupts(efx, false, false);
-}
-
-/* Generate a test interrupt
- * Interrupt must already have been enabled, otherwise nasty things
- * may happen.
- */
void efx_nic_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
- efx_nic_interrupts(efx, true, true);
-}
-
-/* Process a fatal interrupt
- * Disable bus mastering ASAP and schedule a reset
- */
-irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t *int_ker = efx->irq_status.addr;
- efx_oword_t fatal_intr;
- int error, mem_perr;
-
- efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
- error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
-
- netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
- EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
- EFX_OWORD_VAL(fatal_intr),
- error ? "disabling bus mastering" : "no recognised error");
-
- /* If this is a memory parity error dump which blocks are offending */
- mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
- EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
- if (mem_perr) {
- efx_oword_t reg;
- efx_reado(efx, &reg, FR_AZ_MEM_STAT);
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
- EFX_OWORD_VAL(reg));
- }
-
- /* Disable both devices */
- pci_clear_master(efx->pci_dev);
- if (efx_nic_is_dual_func(efx))
- pci_clear_master(nic_data->pci_dev2);
- efx_nic_disable_interrupts(efx);
-
- /* Count errors and reset or disable the NIC accordingly */
- if (efx->int_error_count == 0 ||
- time_after(jiffies, efx->int_error_expire)) {
- efx->int_error_count = 0;
- efx->int_error_expire =
- jiffies + EFX_INT_ERROR_EXPIRE * HZ;
- }
- if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - reset scheduled\n");
- efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
- } else {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - max number of errors seen."
- "NIC will be disabled\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- }
-
- return IRQ_HANDLED;
-}
-
-/* Handle a legacy interrupt
- * Acknowledges the interrupt and schedule event queue processing.
- */
-static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
-{
- struct efx_nic *efx = dev_id;
- efx_oword_t *int_ker = efx->irq_status.addr;
- irqreturn_t result = IRQ_NONE;
- struct efx_channel *channel;
- efx_dword_t reg;
- u32 queues;
- int syserr;
-
- /* Could this be ours? If interrupts are disabled then the
- * channel state may not be valid.
- */
- if (!efx->legacy_irq_enabled)
- return result;
-
- /* Read the ISR which also ACKs the interrupts */
- efx_readd(efx, &reg, FR_BZ_INT_ISR0);
- queues = EFX_EXTRACT_DWORD(reg, 0, 31);
-
- /* Legacy interrupts are disabled too late by the EEH kernel
- * code. Disable them earlier.
- * If an EEH error occurred, the read will have returned all ones.
- */
- if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
- !efx->eeh_disabled_legacy_irq) {
- disable_irq_nosync(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = true;
- }
-
- /* Handle non-event-queue sources */
- if (queues & (1U << efx->irq_level)) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- if (queues != 0) {
- if (EFX_WORKAROUND_15783(efx))
- efx->irq_zero_count = 0;
-
- /* Schedule processing of any interrupting queues */
- efx_for_each_channel(channel, efx) {
- if (queues & 1)
- efx_schedule_channel_irq(channel);
- queues >>= 1;
- }
- result = IRQ_HANDLED;
-
- } else if (EFX_WORKAROUND_15783(efx)) {
- efx_qword_t *event;
-
- /* We can't return IRQ_HANDLED more than once on seeing ISR=0
- * because this might be a shared interrupt. */
- if (efx->irq_zero_count++ == 0)
- result = IRQ_HANDLED;
-
- /* Ensure we schedule or rearm all event queues */
- efx_for_each_channel(channel, efx) {
- event = efx_event(channel, channel->eventq_read_ptr);
- if (efx_event_present(event))
- efx_schedule_channel_irq(channel);
- else
- efx_nic_eventq_read_ack(channel);
- }
- }
-
- if (result == IRQ_HANDLED)
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
-
- return result;
-}
-
-/* Handle an MSI interrupt
- *
- * Handle an MSI hardware interrupt. This routine schedules event
- * queue processing. No interrupt acknowledgement cycle is necessary.
- * Also, we never need to check that the interrupt is for us, since
- * MSI interrupts cannot be shared.
- */
-static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
-{
- struct efx_channel *channel = *(struct efx_channel **)dev_id;
- struct efx_nic *efx = channel->efx;
- efx_oword_t *int_ker = efx->irq_status.addr;
- int syserr;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
-
- /* Handle non-event-queue sources */
- if (channel->channel == efx->irq_level) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- /* Schedule processing of the channel */
- efx_schedule_channel_irq(channel);
-
- return IRQ_HANDLED;
-}
-
-
-/* Setup RSS indirection table.
- * This maps from the hash value of the packet to RXQ
- */
-void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- size_t i = 0;
- efx_dword_t dword;
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
-
- BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
- FR_BZ_RX_INDIRECTION_TBL_ROWS);
-
- for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
- EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- efx->rx_indir_table[i]);
- efx_writed(efx, &dword,
- FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP * i);
- }
+ efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
@@ -1711,13 +82,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) {
- irq_handler_t handler;
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- handler = efx_legacy_interrupt;
- else
- handler = falcon_legacy_interrupt_a1;
-
- rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+ rc = request_irq(efx->legacy_irq,
+ efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
@@ -1742,10 +108,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
/* Hook MSI or MSI-X interrupt */
n_irqs = 0;
efx_for_each_channel(channel, efx) {
- rc = request_irq(channel->irq, efx_msi_interrupt,
+ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */
- efx->channel_name[channel->channel],
- &efx->channel[channel->channel]);
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
@@ -1774,7 +140,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
@@ -1783,7 +149,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
void efx_nic_fini_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_oword_t reg;
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
@@ -1792,167 +157,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->channel[channel->channel]);
-
- /* ACK legacy interrupt */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- efx_reado(efx, &reg, FR_BZ_INT_ISR0);
- else
- falcon_irq_ack_a1(efx);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
/* Disable legacy interrupt */
if (efx->legacy_irq)
free_irq(efx->legacy_irq, efx);
}
-/* Looks at available SRAM resources and works out how many queues we
- * can support, and where things like descriptor caches should live.
- *
- * SRAM is split up as follows:
- * 0 buftbl entries for channels
- * efx->vf_buftbl_base buftbl entries for SR-IOV
- * efx->rx_dc_base RX descriptor caches
- * efx->tx_dc_base TX descriptor caches
- */
-void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
-{
- unsigned vi_count, buftbl_min;
-
- /* Account for the buffer table entries backing the datapath channels
- * and the descriptor caches for those channels.
- */
- buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
- efx->n_channels * EFX_MAX_EVQ_SIZE)
- * sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
-
-#ifdef CONFIG_SFC_SRIOV
- if (efx_sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- efx->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
- }
- vi_count += efx->vf_count * efx_vf_size(efx);
- }
-#endif
-
- efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
- efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
-}
-
-u32 efx_nic_fpga_ver(struct efx_nic *efx)
-{
- efx_oword_t altera_build;
- efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
- return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
-}
-
-void efx_nic_init_common(struct efx_nic *efx)
-{
- efx_oword_t temp;
-
- /* Set positions of descriptor caches in SRAM. */
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
-
- /* Set TX descriptor cache size. */
- BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
-
- /* Set RX descriptor cache size. Set low watermark to size-8, as
- * this allows most efficient prefetching.
- */
- BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
-
- /* Program INT_KER address */
- EFX_POPULATE_OWORD_2(temp,
- FRF_AZ_NORM_INT_VEC_DIS_KER,
- EFX_INT_MODE_USE_MSI(efx),
- FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
- efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Use an interrupt level unused by event queues */
- efx->irq_level = 0x1f;
- else
- /* Use a valid MSI-X vector */
- efx->irq_level = 0;
-
- /* Enable all the genuinely fatal interrupts. (They are still
- * masked by the overall interrupt mask, controlled by
- * falcon_interrupts()).
- *
- * Note: All other fatal interrupts are enabled
- */
- EFX_POPULATE_OWORD_3(temp,
- FRF_AZ_ILL_ADR_INT_KER_EN, 1,
- FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
- FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
- EFX_INVERT_OWORD(temp);
- efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
-
- efx_nic_push_rx_indir_table(efx);
-
- /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
- * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
- */
- efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
- /* Enable SW_EV to inherit in char driver - assume harmless here */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
- /* Prefetch threshold 2 => fetch when descriptor cache half empty */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
- /* Disable hardware watchdog which can misfire */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
- /* Squash TX of packets of 16 bytes or less */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
- efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_4(temp,
- /* Default values */
- FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
- FRF_BZ_TX_PACE_SB_AF, 0xb,
- FRF_BZ_TX_PACE_FB_BASE, 0,
- /* Allow large pace values in the
- * fast bin. */
- FRF_BZ_TX_PACE_BIN_TH,
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo(efx, &temp, FR_BZ_TX_PACE);
- }
-}
-
/* Register dump */
#define REGISTER_REVISION_A 1
@@ -2217,3 +428,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
}
}
}
+
+/**
+ * efx_nic_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct efx_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL. The names are copied
+ * starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names)
+{
+ size_t visible = 0;
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].name) {
+ if (names) {
+ strlcpy(names, desc[index].name,
+ ETH_GSTRING_LEN);
+ names += ETH_GSTRING_LEN;
+ }
+ ++visible;
+ }
+ }
+
+ return visible;
+}
+
+/**
+ * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
+ * layout. DMA widths of 0, 16, 32 and 64 are supported; where
+ * the width is specified as 0 the corresponding element of
+ * @stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics. The length
+ * of this array must be at least the number of set bits in the
+ * first @count bits of @mask.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ * directly stored to the corresponding elements of @stats
+ */
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate)
+{
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].dma_width) {
+ const void *addr = dma_buf + desc[index].offset;
+ u64 val;
+
+ switch (desc[index].dma_width) {
+ case 16:
+ val = le16_to_cpup((__le16 *)addr);
+ break;
+ case 32:
+ val = le32_to_cpup((__le32 *)addr);
+ break;
+ case 64:
+ val = le64_to_cpup((__le64 *)addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ break;
+ }
+
+ if (accumulate)
+ *stats += val;
+ else
+ *stats = val;
+ }
+
+ ++stats;
+ }
+}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index d63c2991a75..fda29d39032 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -16,17 +16,13 @@
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
-#include "spi.h"
-
-/*
- * Falcon hardware control
- */
enum {
EFX_REV_FALCON_A0 = 0,
EFX_REV_FALCON_A1 = 1,
EFX_REV_FALCON_B0 = 2,
EFX_REV_SIENA_A0 = 3,
+ EFX_REV_HUNT_A0 = 4,
};
static inline int efx_nic_rev(struct efx_nic *efx)
@@ -34,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision;
}
-extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
+extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -42,6 +38,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
}
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell. This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless. Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
+ && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
@@ -59,9 +114,6 @@ enum {
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
-#define FALCON_GMAC_LOOPBACKS \
- (1 << LOOPBACK_GMAC)
-
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
@@ -105,13 +157,96 @@ struct falcon_board {
};
/**
+ * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @erase_command: Erase command (or 0 if sector erase not needed).
+ * @erase_size: Erase sector size (in bytes)
+ * Erase commands affect sectors with this size and alignment.
+ * This must be a power of two.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ */
+struct falcon_spi_device {
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ u8 erase_command;
+ unsigned int erase_size;
+ unsigned int block_size;
+};
+
+static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
+enum {
+ FALCON_STAT_tx_bytes,
+ FALCON_STAT_tx_packets,
+ FALCON_STAT_tx_pause,
+ FALCON_STAT_tx_control,
+ FALCON_STAT_tx_unicast,
+ FALCON_STAT_tx_multicast,
+ FALCON_STAT_tx_broadcast,
+ FALCON_STAT_tx_lt64,
+ FALCON_STAT_tx_64,
+ FALCON_STAT_tx_65_to_127,
+ FALCON_STAT_tx_128_to_255,
+ FALCON_STAT_tx_256_to_511,
+ FALCON_STAT_tx_512_to_1023,
+ FALCON_STAT_tx_1024_to_15xx,
+ FALCON_STAT_tx_15xx_to_jumbo,
+ FALCON_STAT_tx_gtjumbo,
+ FALCON_STAT_tx_non_tcpudp,
+ FALCON_STAT_tx_mac_src_error,
+ FALCON_STAT_tx_ip_src_error,
+ FALCON_STAT_rx_bytes,
+ FALCON_STAT_rx_good_bytes,
+ FALCON_STAT_rx_bad_bytes,
+ FALCON_STAT_rx_packets,
+ FALCON_STAT_rx_good,
+ FALCON_STAT_rx_bad,
+ FALCON_STAT_rx_pause,
+ FALCON_STAT_rx_control,
+ FALCON_STAT_rx_unicast,
+ FALCON_STAT_rx_multicast,
+ FALCON_STAT_rx_broadcast,
+ FALCON_STAT_rx_lt64,
+ FALCON_STAT_rx_64,
+ FALCON_STAT_rx_65_to_127,
+ FALCON_STAT_rx_128_to_255,
+ FALCON_STAT_rx_256_to_511,
+ FALCON_STAT_rx_512_to_1023,
+ FALCON_STAT_rx_1024_to_15xx,
+ FALCON_STAT_rx_15xx_to_jumbo,
+ FALCON_STAT_rx_gtjumbo,
+ FALCON_STAT_rx_bad_lt64,
+ FALCON_STAT_rx_bad_gtjumbo,
+ FALCON_STAT_rx_overflow,
+ FALCON_STAT_rx_symbol_error,
+ FALCON_STAT_rx_align_error,
+ FALCON_STAT_rx_length_error,
+ FALCON_STAT_rx_internal_error,
+ FALCON_STAT_rx_nodesc_drop_cnt,
+ FALCON_STAT_COUNT
+};
+
+/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
+ * @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
- * @stats_dma_done: Pointer to the flag which indicates DMA completion.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
@@ -121,12 +256,12 @@ struct falcon_board {
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
+ u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
- u32 *stats_dma_done;
- struct efx_spi_device spi_flash;
- struct efx_spi_device spi_eeprom;
+ struct falcon_spi_device spi_flash;
+ struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
@@ -138,29 +273,151 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
return &data->board;
}
+enum {
+ SIENA_STAT_tx_bytes,
+ SIENA_STAT_tx_good_bytes,
+ SIENA_STAT_tx_bad_bytes,
+ SIENA_STAT_tx_packets,
+ SIENA_STAT_tx_bad,
+ SIENA_STAT_tx_pause,
+ SIENA_STAT_tx_control,
+ SIENA_STAT_tx_unicast,
+ SIENA_STAT_tx_multicast,
+ SIENA_STAT_tx_broadcast,
+ SIENA_STAT_tx_lt64,
+ SIENA_STAT_tx_64,
+ SIENA_STAT_tx_65_to_127,
+ SIENA_STAT_tx_128_to_255,
+ SIENA_STAT_tx_256_to_511,
+ SIENA_STAT_tx_512_to_1023,
+ SIENA_STAT_tx_1024_to_15xx,
+ SIENA_STAT_tx_15xx_to_jumbo,
+ SIENA_STAT_tx_gtjumbo,
+ SIENA_STAT_tx_collision,
+ SIENA_STAT_tx_single_collision,
+ SIENA_STAT_tx_multiple_collision,
+ SIENA_STAT_tx_excessive_collision,
+ SIENA_STAT_tx_deferred,
+ SIENA_STAT_tx_late_collision,
+ SIENA_STAT_tx_excessive_deferred,
+ SIENA_STAT_tx_non_tcpudp,
+ SIENA_STAT_tx_mac_src_error,
+ SIENA_STAT_tx_ip_src_error,
+ SIENA_STAT_rx_bytes,
+ SIENA_STAT_rx_good_bytes,
+ SIENA_STAT_rx_bad_bytes,
+ SIENA_STAT_rx_packets,
+ SIENA_STAT_rx_good,
+ SIENA_STAT_rx_bad,
+ SIENA_STAT_rx_pause,
+ SIENA_STAT_rx_control,
+ SIENA_STAT_rx_unicast,
+ SIENA_STAT_rx_multicast,
+ SIENA_STAT_rx_broadcast,
+ SIENA_STAT_rx_lt64,
+ SIENA_STAT_rx_64,
+ SIENA_STAT_rx_65_to_127,
+ SIENA_STAT_rx_128_to_255,
+ SIENA_STAT_rx_256_to_511,
+ SIENA_STAT_rx_512_to_1023,
+ SIENA_STAT_rx_1024_to_15xx,
+ SIENA_STAT_rx_15xx_to_jumbo,
+ SIENA_STAT_rx_gtjumbo,
+ SIENA_STAT_rx_bad_gtjumbo,
+ SIENA_STAT_rx_overflow,
+ SIENA_STAT_rx_false_carrier,
+ SIENA_STAT_rx_symbol_error,
+ SIENA_STAT_rx_align_error,
+ SIENA_STAT_rx_length_error,
+ SIENA_STAT_rx_internal_error,
+ SIENA_STAT_rx_nodesc_drop_cnt,
+ SIENA_STAT_COUNT
+};
+
/**
* struct siena_nic_data - Siena NIC state
- * @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
- * @hwmon: Hardware monitor state
+ * @stats: Hardware statistics
*/
struct siena_nic_data {
- struct efx_mcdi_iface mcdi;
int wol_filter_id;
-#ifdef CONFIG_SFC_MCDI_MON
- struct efx_mcdi_mon hwmon;
-#endif
+ u64 stats[SIENA_STAT_COUNT];
};
-#ifdef CONFIG_SFC_MCDI_MON
-static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
-{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->hwmon;
-}
-#endif
+enum {
+ EF10_STAT_tx_bytes,
+ EF10_STAT_tx_packets,
+ EF10_STAT_tx_pause,
+ EF10_STAT_tx_control,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_lt64,
+ EF10_STAT_tx_64,
+ EF10_STAT_tx_65_to_127,
+ EF10_STAT_tx_128_to_255,
+ EF10_STAT_tx_256_to_511,
+ EF10_STAT_tx_512_to_1023,
+ EF10_STAT_tx_1024_to_15xx,
+ EF10_STAT_tx_15xx_to_jumbo,
+ EF10_STAT_rx_bytes,
+ EF10_STAT_rx_bytes_minus_good_bytes,
+ EF10_STAT_rx_good_bytes,
+ EF10_STAT_rx_bad_bytes,
+ EF10_STAT_rx_packets,
+ EF10_STAT_rx_good,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_pause,
+ EF10_STAT_rx_control,
+ EF10_STAT_rx_unicast,
+ EF10_STAT_rx_multicast,
+ EF10_STAT_rx_broadcast,
+ EF10_STAT_rx_lt64,
+ EF10_STAT_rx_64,
+ EF10_STAT_rx_65_to_127,
+ EF10_STAT_rx_128_to_255,
+ EF10_STAT_rx_256_to_511,
+ EF10_STAT_rx_512_to_1023,
+ EF10_STAT_rx_1024_to_15xx,
+ EF10_STAT_rx_15xx_to_jumbo,
+ EF10_STAT_rx_gtjumbo,
+ EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_overflow,
+ EF10_STAT_rx_align_error,
+ EF10_STAT_rx_length_error,
+ EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_COUNT
+};
+
+/**
+ * struct efx_ef10_nic_data - EF10 architecture NIC state
+ * @mcdi_buf: DMA buffer for MCDI
+ * @warm_boot_count: Last seen MC warm boot count
+ * @vi_base: Absolute index of first VI in this function
+ * @n_allocated_vis: Number of VIs allocated to this function
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @rx_rss_context: Firmware handle for our RSS context
+ * @stats: Hardware statistics
+ * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
+ * after MC reboot
+ * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
+ * %MC_CMD_GET_CAPABILITIES response)
+ */
+struct efx_ef10_nic_data {
+ struct efx_buffer mcdi_buf;
+ u16 warm_boot_count;
+ unsigned int vi_base;
+ unsigned int n_allocated_vis;
+ bool must_realloc_vis;
+ bool must_restore_filters;
+ u32 rx_rss_context;
+ u64 stats[EF10_STAT_COUNT];
+ bool workaround_35388;
+ bool must_check_datapath_caps;
+ u32 datapath_caps;
+};
/*
* On the SFC9000 family each port is associated with 1 PCI physical
@@ -263,6 +520,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_nic_type;
/**************************************************************************
*
@@ -274,35 +532,123 @@ extern const struct efx_nic_type siena_a0_nic_type;
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
-extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_write(tx_queue);
+}
/* RX data path */
-extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
-extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
/* Event data path */
-extern int efx_nic_probe_eventq(struct efx_channel *channel);
-extern void efx_nic_init_eventq(struct efx_channel *channel);
-extern void efx_nic_fini_eventq(struct efx_channel *channel);
-extern void efx_nic_remove_eventq(struct efx_channel *channel);
-extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
-extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
-extern bool efx_nic_event_present(struct efx_channel *channel);
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+ return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ channel->efx->type->ev_read_ack(channel);
+}
+extern void efx_nic_event_test_start(struct efx_channel *channel);
-/* MAC/PHY */
-extern void falcon_drain_tx_fifo(struct efx_nic *efx);
-extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern bool falcon_xmac_check_fault(struct efx_nic *efx);
-extern int falcon_reconfigure_xmac(struct efx_nic *efx);
-extern void falcon_update_stats_xmac(struct efx_nic *efx);
+/* Falcon/Siena queue operations */
+extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+extern int efx_farch_ev_probe(struct efx_channel *channel);
+extern int efx_farch_ev_init(struct efx_channel *channel);
+extern void efx_farch_ev_fini(struct efx_channel *channel);
+extern void efx_farch_ev_remove(struct efx_channel *channel);
+extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
+extern void efx_farch_ev_read_ack(struct efx_channel *channel);
+extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+
+/* Falcon/Siena filter operations */
+extern int efx_farch_filter_table_probe(struct efx_nic *efx);
+extern void efx_farch_filter_table_restore(struct efx_nic *efx);
+extern void efx_farch_filter_table_remove(struct efx_nic *efx);
+extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+extern s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+extern int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
@@ -322,16 +668,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
*stat = diff;
}
-/* Interrupts and test events */
+/* Interrupts */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_event_test_start(struct efx_channel *channel);
extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
-extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
-extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
-extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+/* Falcon/Siena interrupts */
+extern void efx_farch_irq_enable_master(struct efx_nic *efx);
+extern void efx_farch_irq_test_generate(struct efx_nic *efx);
+extern void efx_farch_irq_disable_master(struct efx_nic *efx);
+extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
@@ -345,69 +693,47 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void siena_prepare_flush(struct efx_nic *efx);
+extern int efx_farch_fini_dmaq(struct efx_nic *efx);
extern void siena_finish_flush(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void
-efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_nic_init_common(struct efx_nic *efx);
-extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
+extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+extern void efx_farch_init_common(struct efx_nic *efx);
+extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
+{
+ efx->type->rx_push_indir_table(efx);
+}
+extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len);
+ unsigned int len, gfp_t gfp_flags);
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
/* Tests */
-struct efx_nic_register_test {
+struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
-extern int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs);
+extern int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- */
+extern size_t
+efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+extern void
+efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate);
+
+#define EFX_MAX_FLUSH_TIME 5000
-#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
-#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
-
-/* Retrieve statistic from statistics block */
-#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
- if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
- (efx)->mac_stats.efx_stat += le16_to_cpu( \
- *((__force __le16 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
- (efx)->mac_stats.efx_stat += le32_to_cpu( \
- *((__force __le32 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else \
- (efx)->mac_stats.efx_stat += le64_to_cpu( \
- *((__force __le64 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- } while (0)
-
-#define FALCON_MAC_STATS_SIZE 0x100
-
-#define MAC_DATA_LBN 0
-#define MAC_DATA_WIDTH 32
-
-extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
-
-extern void falcon_poll_xmac(struct efx_nic *efx);
+extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 11d148cd844..45eeb707515 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -47,21 +47,4 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
-/****************************************************************************
- * Siena managed PHYs
- */
-extern const struct efx_phy_operations efx_mcdi_phy_ops;
-
-extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 *value_out, u32 *status_out);
-extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 value, u32 *status_out);
-extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl);
-extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
-extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
-
#endif
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b495394a6df..03acf57df04 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -46,7 +46,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "nic.h"
/* Maximum number of events expected to make up a PTP event */
@@ -294,8 +294,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
- MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
+ MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -311,9 +310,10 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
efx->ptp_data->channel->channel);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
@@ -329,9 +329,10 @@ static int efx_ptp_enable(struct efx_nic *efx)
*/
static int efx_ptp_disable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -389,14 +390,14 @@ static void efx_ptp_send_times(struct efx_nic *efx,
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
now.ts_real.tv_nsec);
/* Update host time in NIC memory */
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+ efx->type->ptp_write_host_time(efx, host_time);
}
*last_time = now;
}
/* Read a timeset from the MC's results and partial process. */
-static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
+static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
+ struct efx_ptp_timeset *timeset)
{
unsigned start_ns, end_ns;
@@ -425,12 +426,14 @@ static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
* busy. A number of readings are taken so that, hopefully, at least one good
* synchronisation will be seen in the results.
*/
-static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
- size_t response_length,
- const struct pps_event_time *last_time)
+static int
+efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
+ size_t response_length,
+ const struct pps_event_time *last_time)
{
- unsigned number_readings = (response_length /
- MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
+ unsigned number_readings =
+ MCDI_VAR_ARRAY_LEN(response_length,
+ PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
unsigned total;
unsigned ngood = 0;
@@ -447,8 +450,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
* appera to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
- efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
- synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
+ efx_ptp_read_timeset(
+ MCDI_ARRAY_STRUCT_PTR(synch_buf,
+ PTP_OUT_SYNCHRONIZE_TIMESET, i),
+ &ptp->timeset[i]);
}
/* Find the last good host-MC synchronization result. The MC times
@@ -518,7 +523,7 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
+ MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
size_t response_length;
int rc;
unsigned long timeout;
@@ -527,17 +532,17 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
int *start = ptp->start.addr;
MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
num_readings);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
- (u32)ptp->start.dma_addr);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
- (u32)((u64)ptp->start.dma_addr >> 32));
+ MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
+ ptp->start.dma_addr);
/* Clear flag that signals MC ready */
ACCESS_ONCE(*start) = 0;
- efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
- MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ EFX_BUG_ON_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
@@ -564,15 +569,15 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
/* Transmit a PTP packet, via the MCDI interface, to the wire. */
static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
{
- u8 *txbuf = efx->ptp_data->txbuf;
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
struct skb_shared_hwtstamps timestamps;
int rc = -EIO;
- /* MCDI driver requires word aligned lengths */
- size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
- u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
+ MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
+ size_t len;
- MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
- MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
if (skb_shinfo(skb)->nr_frags != 0) {
rc = skb_linearize(skb);
if (rc != 0)
@@ -585,10 +590,12 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
}
skb_copy_from_linear_data(skb,
- &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
- len);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
- sizeof(txtime), &len);
+ MCDI_PTR(ptp_data->txbuf,
+ PTP_IN_TRANSMIT_PACKET),
+ skb->len);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
+ ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
+ txtime, sizeof(txtime), &len);
if (rc != 0)
goto fail;
@@ -872,7 +879,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
if (!efx->ptp_data)
return -ENOMEM;
- rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
+ rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
@@ -1359,7 +1366,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1373,9 +1380,8 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
(PPB_EXTRA_BITS + MAX_PPB_BITS));
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
- (u32)(adjustment_ns >> 32));
+ MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
@@ -1394,11 +1400,11 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
struct timespec delta_ts = ns_to_timespec(delta);
- u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1411,11 +1417,12 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
- u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 326a28637f3..efa3612affc 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 6af9cfda50f..4a596725023 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
+#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
@@ -60,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
return page_address(buf->page) + buf->page_offset;
}
-static inline u32 efx_rx_buf_hash(const u8 *eh)
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
- /* The ethernet header is always directly after any hash. */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
- return __le32_to_cpup((const __le32 *)(eh - 4));
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
- const u8 *data = eh - 4;
+ const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
unsigned int fill_level, batch_size;
int space, rc = 0;
+ if (!rx_queue->refill_enabled)
+ return;
+
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
@@ -435,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
+ skb->rxhash = efx_rx_buf_hash(efx, eh);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -523,10 +526,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
- efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ if (!(flags & EFX_RX_PKT_PREFIX_LEN))
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
- unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
- unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+ unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either
* the hardware or the driver is broken.
@@ -551,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
return;
}
- if (n_frags == 1)
+ if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
@@ -564,8 +568,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/
prefetch(efx_rx_buf_va(rx_buf));
- rx_buf->page_offset += efx->type->rx_buffer_hash_size;
- rx_buf->len -= efx->type->rx_buffer_hash_size;
+ rx_buf->page_offset += efx->rx_prefix_size;
+ rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
@@ -577,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0)
break;
- efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
- rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
@@ -630,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel)
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
+ /* Read length from the prefix if necessary. This already
+ * excludes the length of the prefix itself.
+ */
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ rx_buf->len = le16_to_cpup((__le16 *)
+ (eh + efx->rx_packet_len_offset));
+
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
@@ -738,9 +749,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
/* Set up RX descriptor ring */
- rx_queue->enabled = true;
efx_nic_init_rx(rx_queue);
}
@@ -753,11 +764,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- /* A flush failure might have left rx_queue->enabled */
- rx_queue->enabled = false;
-
del_timer_sync(&rx_queue->slow_fill);
- efx_nic_fini_rx(rx_queue);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
@@ -803,3 +810,130 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_spec spec;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ int nhoff;
+ int rc;
+
+ nhoff = skb_network_offset(skb);
+
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(struct vlan_hdr));
+ if (((const struct vlan_hdr *)skb->data + nhoff)->
+ h_vlan_encapsulated_proto != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ /* This is IP over 802.1q VLAN. We can't filter on the
+ * IP 5-tuple and the vlan together, so just strip the
+ * vlan header and filter on the IP part.
+ */
+ nhoff += sizeof(struct vlan_hdr);
+ } else if (skb->protocol != htons(ETH_P_IP)) {
+ return -EPROTONOSUPPORT;
+ }
+
+ /* RFS must validate the IP header length before calling us */
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
+ rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+ ip->daddr, ports[1], ip->saddr, ports[0]);
+ if (rc)
+ return rc;
+
+ rc = efx->type->filter_rfs_insert(efx, &spec);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ efx->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+ &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+ rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+{
+ bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+ unsigned int index, size;
+ u32 flow_id;
+
+ if (!spin_trylock_bh(&efx->filter_lock))
+ return false;
+
+ expire_one = efx->type->filter_rfs_expire_one;
+ index = efx->rps_expire_index;
+ size = efx->type->max_rx_ip_filters;
+ while (quota--) {
+ flow_id = efx->rps_flow_id[index];
+ if (expire_one(efx, flow_id, index))
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [flow %u]\n",
+ index, flow_id);
+ if (++index == size)
+ index = 0;
+ }
+ efx->rps_expire_index = index;
+
+ spin_unlock_bh(&efx->filter_lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 2069f51b2aa..144bbff5a4a 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
static int efx_poll_loopback(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
- struct efx_channel *channel;
- /* NAPI polling is not enabled, so process channels
- * synchronously */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- }
return atomic_read(&state->rx_good) == state->packet_count;
}
@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
- } else {
- struct efx_channel *channel = efx_get_channel(efx, 0);
- if (channel->work_pending)
- efx_process_channel_now(channel);
}
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index aed24b73605..87698ae0bf7 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 8c91775e3c5..d034bcd124e 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -18,8 +18,7 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "spi.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
@@ -30,7 +29,6 @@
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -52,81 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
channel->channel);
}
-static int siena_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
- addr, value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
-
-static int siena_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint16_t value;
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
- addr, &value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (int)value;
-}
-
-/* This call is responsible for hooking in the MAC and PHY operations */
-static int siena_probe_port(struct efx_nic *efx)
-{
- int rc;
-
- /* Hook in PHY operations table */
- efx->phy_op = &efx_mcdi_phy_ops;
-
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = siena_mdio_read;
- efx->mdio.mdio_write = siena_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
- rc = efx->phy_op->probe(efx);
- if (rc != 0)
- return rc;
-
- /* Allocate buffer for stats */
- rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- MC_CMD_MAC_NSTATS * sizeof(u64));
- if (rc)
- return rc;
- netif_dbg(efx, probe, efx->net_dev,
- "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
-
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
-
- return 0;
-}
-
-static void siena_remove_port(struct efx_nic *efx)
-{
- efx->phy_op->remove(efx);
- efx_nic_free_buffer(efx, &efx->stats_buffer);
-}
-
void siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
@@ -139,7 +62,7 @@ void siena_finish_flush(struct efx_nic *efx)
efx_mcdi_set_mac(efx);
}
-static const struct efx_nic_register_test siena_register_tests[] = {
+static const struct efx_farch_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
@@ -178,16 +101,16 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
*/
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
if (rc)
goto out;
tests->registers =
- efx_nic_test_registers(efx, siena_register_tests,
- ARRAY_SIZE(siena_register_tests))
+ efx_farch_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
? -1 : 1;
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
out:
rc2 = efx_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
@@ -200,11 +123,6 @@ out:
**************************************************************************
*/
-static enum reset_type siena_map_reset_reason(enum reset_type reason)
-{
- return RESET_TYPE_RECOVER_OR_ALL;
-}
-
static int siena_map_reset_flags(u32 *flags)
{
enum {
@@ -230,21 +148,6 @@ static int siena_map_reset_flags(u32 *flags)
return -EINVAL;
}
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
-{
- int rc;
-
- /* Recover from a failed assertion pre-reset */
- rc = efx_mcdi_handle_assertion(efx);
- if (rc)
- return rc;
-
- if (method == RESET_TYPE_WORLD)
- return efx_mcdi_reset_mc(efx);
- else
- return efx_mcdi_reset_port(efx);
-}
-
#ifdef CONFIG_EEH
/* When a PCI device is isolated from the bus, a subsequent MMIO read is
* required for the kernel EEH mechanisms to notice. As the Solarflare driver
@@ -274,19 +177,25 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
return rc;
}
-static void siena_dimension_resources(struct efx_nic *efx)
+static int siena_dimension_resources(struct efx_nic *efx)
{
/* Each port has a small block of internal SRAM dedicated to
* the buffer table and descriptor caches. In theory we can
* map both blocks to one port, but we don't.
*/
- efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ return 0;
+}
+
+static unsigned int siena_mem_map_size(struct efx_nic *efx)
+{
+ return FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
}
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = false;
efx_oword_t reg;
int rc;
@@ -296,38 +205,24 @@ static int siena_probe_nic(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
+ efx->max_channels = EFX_MAX_CHANNELS;
+
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- efx_mcdi_init(efx);
-
- /* Recover from a failed assertion before probing */
- rc = efx_mcdi_handle_assertion(efx);
+ rc = efx_mcdi_init(efx);
if (rc)
goto fail1;
- /* Let the BMC know that the driver is now in charge of link and
- * filter settings. We must do this before we reset the NIC */
- rc = efx_mcdi_drv_attach(efx, true, &already_attached);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
- goto fail2;
- }
- if (already_attached)
- /* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
-
/* Now we can reset the NIC */
- rc = siena_reset_hw(efx, RESET_TYPE_ALL);
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
@@ -336,7 +231,8 @@ static int siena_probe_nic(struct efx_nic *efx)
siena_init_wol(efx);
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -371,8 +267,7 @@ fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
- efx_mcdi_drv_attach(efx, false, NULL);
-fail2:
+ efx_mcdi_fini(efx);
fail1:
kfree(efx->nic_data);
return rc;
@@ -448,7 +343,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -458,144 +353,192 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->irq_status);
- siena_reset_hw(efx, RESET_TYPE_ALL);
+ efx_mcdi_reset(efx, RESET_TYPE_ALL);
- /* Relinquish the device back to the BMC */
- efx_mcdi_drv_attach(efx, false, NULL);
+ efx_mcdi_fini(efx);
/* Tear down the private nic state */
kfree(efx->nic_data);
efx->nic_data = NULL;
}
-#define STATS_GENERATION_INVALID ((__force __le64)(-1))
+#define SIENA_DMA_STAT(ext_name, mcdi_name) \
+ [SIENA_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define SIENA_OTHER_STAT(ext_name) \
+ [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
+ SIENA_DMA_STAT(tx_bytes, TX_BYTES),
+ SIENA_OTHER_STAT(tx_good_bytes),
+ SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
+ SIENA_DMA_STAT(tx_packets, TX_PKTS),
+ SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ SIENA_DMA_STAT(tx_64, TX_64_PKTS),
+ SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
+ SIENA_OTHER_STAT(tx_collision),
+ SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
+ SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(rx_bytes, RX_BYTES),
+ SIENA_OTHER_STAT(rx_good_bytes),
+ SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
+ SIENA_DMA_STAT(rx_packets, RX_PKTS),
+ SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ SIENA_DMA_STAT(rx_64, RX_64_PKTS),
+ SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
+ SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
+};
+static const unsigned long siena_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
+};
+
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
+ siena_stat_mask, names);
+}
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
__le64 *dma_stats;
- struct efx_mac_stats *mac_stats;
__le64 generation_start, generation_end;
- mac_stats = &efx->mac_stats;
dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
- if (generation_end == STATS_GENERATION_INVALID)
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
-
-#define MAC_STAT(M, D) \
- mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
-
- MAC_STAT(tx_bytes, TX_BYTES);
- MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
- MAC_STAT(tx_packets, TX_PKTS);
- MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
- MAC_STAT(tx_pause, TX_PAUSE_PKTS);
- MAC_STAT(tx_control, TX_CONTROL_PKTS);
- MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
- MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
- MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
- MAC_STAT(tx_lt64, TX_LT64_PKTS);
- MAC_STAT(tx_64, TX_64_PKTS);
- MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
- MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
- MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
- MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
- MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
- MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
- mac_stats->tx_collision = 0;
- MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
- MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
- MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
- MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
- MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
- mac_stats->tx_collision = (mac_stats->tx_single_collision +
- mac_stats->tx_multiple_collision +
- mac_stats->tx_excessive_collision +
- mac_stats->tx_late_collision);
- MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
- MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
- MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
- MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
- MAC_STAT(rx_bytes, RX_BYTES);
- MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->rx_good_bytes,
- mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
- MAC_STAT(rx_packets, RX_PKTS);
- MAC_STAT(rx_good, RX_GOOD_PKTS);
- MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
- MAC_STAT(rx_pause, RX_PAUSE_PKTS);
- MAC_STAT(rx_control, RX_CONTROL_PKTS);
- MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
- MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
- MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
- MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
- MAC_STAT(rx_64, RX_64_PKTS);
- MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
- MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
- MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
- MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
- MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
- MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
- mac_stats->rx_bad_lt64 = 0;
- mac_stats->rx_bad_64_to_15xx = 0;
- mac_stats->rx_bad_15xx_to_jumbo = 0;
- MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
- MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
- mac_stats->rx_missed = 0;
- MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
- MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
- MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
- MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
- MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
- mac_stats->rx_good_lt64 = 0;
-
- efx->n_rx_nodesc_drop_cnt =
- le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
-
-#undef MAC_STAT
-
+ efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
+ stats, efx->stats_buffer.addr, false);
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
+ /* Update derived statistics */
+ efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
+ stats[SIENA_STAT_tx_bytes] -
+ stats[SIENA_STAT_tx_bad_bytes]);
+ stats[SIENA_STAT_tx_collision] =
+ stats[SIENA_STAT_tx_single_collision] +
+ stats[SIENA_STAT_tx_multiple_collision] +
+ stats[SIENA_STAT_tx_excessive_collision] +
+ stats[SIENA_STAT_tx_late_collision];
+ efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
+ stats[SIENA_STAT_rx_bytes] -
+ stats[SIENA_STAT_rx_bad_bytes]);
return 0;
}
-static void siena_update_nic_stats(struct efx_nic *efx)
+static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
- return;
+ break;
udelay(100);
}
- /* Use the old values instead */
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
+ core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
+ core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[SIENA_STAT_rx_multicast];
+ core_stats->collisions = stats[SIENA_STAT_tx_collision];
+ core_stats->rx_length_errors =
+ stats[SIENA_STAT_rx_gtjumbo] +
+ stats[SIENA_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
+ core_stats->tx_window_errors =
+ stats[SIENA_STAT_tx_late_collision];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[SIENA_STAT_rx_symbol_error]);
+ core_stats->tx_errors = (core_stats->tx_window_errors +
+ stats[SIENA_STAT_tx_bad]);
+ }
+
+ return SIENA_STAT_COUNT;
}
-static void siena_start_nic_stats(struct efx_nic *efx)
+static int siena_mac_reconfigure(struct efx_nic *efx)
{
- __le64 *dma_stats = efx->stats_buffer.addr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
+ int rc;
- dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
+ BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN !=
+ MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
+ sizeof(efx->multicast_hash));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
-}
+ efx_farch_filter_sync_rx_mode(efx);
-static void siena_stop_nic_stats(struct efx_nic *efx)
-{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ rc = efx_mcdi_set_mac(efx);
+ if (rc != 0)
+ return rc;
+
+ memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
+ efx->multicast_hash.byte, sizeof(efx->multicast_hash));
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
}
/**************************************************************************
@@ -669,6 +612,241 @@ static void siena_init_wol(struct efx_nic *efx)
}
}
+/**************************************************************************
+ *
+ * MCDI
+ *
+ **************************************************************************
+ */
+
+#define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
+#define MCDI_DOORBELL(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
+#define MCDI_STATUS(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
+
+static void siena_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned int i;
+ unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
+
+ EFX_BUG_ON_PARANOID(hdr_len != 4);
+
+ efx_writed(efx, hdr, pdu);
+
+ for (i = 0; i < inlen_dw; i++)
+ efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
+
+ /* Ensure the request is written out before the doorbell */
+ wmb();
+
+ /* ring the doorbell with a distinctive value */
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static bool siena_mcdi_poll_response(struct efx_nic *efx)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ efx_dword_t hdr;
+
+ efx_readd(efx, &hdr, pdu);
+
+ /* All 1's indicates that shared memory is in reset (and is
+ * not a valid hdr). Wait for it to come out reset before
+ * completing the command
+ */
+ return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
+ EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
+ int i;
+
+ for (i = 0; i < outlen_dw; i++)
+ efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
+}
+
+static int siena_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
+ efx_dword_t reg;
+ u32 value;
+
+ efx_readd(efx, &reg, addr);
+ value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+ if (value == 0)
+ return 0;
+
+ EFX_ZERO_DWORD(reg);
+ efx_writed(efx, &reg, addr);
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * copies that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
+ nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return -EINTR;
+ else
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * MTD
+ *
+ **************************************************************************
+ */
+
+#ifdef CONFIG_SFC_MTD
+
+struct siena_nvram_type_info {
+ int port;
+ const char *name;
+};
+
+static const struct siena_nvram_type_info siena_nvram_types[] = {
+ [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
+ [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
+ [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
+};
+
+static int siena_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ const struct siena_nvram_type_info *info;
+ size_t size, erase_size;
+ bool protected;
+ int rc;
+
+ if (type >= ARRAY_SIZE(siena_nvram_types) ||
+ siena_nvram_types[type].name == NULL)
+ return -ENODEV;
+
+ info = &siena_nvram_types[type];
+
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+ part->common.dev_type_name = "Siena NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *parts,
+ size_t n_parts)
+{
+ uint16_t fw_subtype_list[
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
+ size_t i;
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < n_parts; i++)
+ parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
+
+ return 0;
+}
+
+static int siena_mtd_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_mtd_partition *parts;
+ u32 nvram_types;
+ unsigned int type;
+ size_t n_parts;
+ int rc;
+
+ ASSERT_RTNL();
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ type = 0;
+ n_parts = 0;
+
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = siena_mtd_probe_partition(efx, &parts[n_parts],
+ type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
+ if (rc)
+ goto fail;
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
/**************************************************************************
*
@@ -678,6 +856,7 @@ static void siena_init_wol(struct efx_nic *efx)
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
@@ -688,44 +867,94 @@ const struct efx_nic_type siena_a0_nic_type = {
#else
.monitor = NULL,
#endif
- .map_reset_reason = siena_map_reset_reason,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
- .reset = siena_reset_hw,
- .probe_port = siena_probe_port,
- .remove_port = siena_remove_port,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
+ .describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
- .start_stats = siena_start_nic_stats,
- .stop_stats = siena_stop_nic_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
- .reconfigure_mac = efx_mcdi_mac_reconfigure,
+ .reconfigure_mac = siena_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
- .reconfigure_port = efx_mcdi_phy_reconfigure,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = siena_mcdi_request,
+ .mcdi_poll_response = siena_mcdi_poll_response,
+ .mcdi_read_response = siena_mcdi_read_response,
+ .mcdi_poll_reboot = siena_mcdi_poll_reboot,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = siena_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = siena_ptp_write_host_time,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = (FR_CZ_MC_TREG_SMEM +
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 90f8d1604f5..0c38f926871 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2010-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,7 @@
#include "mcdi.h"
#include "filter.h"
#include "mcdi_pcol.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -197,8 +197,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
unsigned *vi_scale_out, unsigned *vf_total_out)
{
- u8 inbuf[MC_CMD_SRIOV_IN_LEN];
- u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
unsigned vi_scale, vf_total;
size_t outlen;
int rc;
@@ -240,64 +240,55 @@ static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
unsigned int count)
{
- u8 *inbuf, *record;
- unsigned int used;
- u32 from_rid, from_hi, from_lo;
+ MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
+ MCDI_DECLARE_STRUCT_PTR(record);
+ unsigned int index, used;
+ u64 from_addr;
+ u32 from_rid;
int rc;
mb(); /* Finish writing source/reading dest before DMA starts */
- used = MC_CMD_MEMCPY_IN_LEN(count);
- if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
+ if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
return -ENOBUFS;
+ used = MC_CMD_MEMCPY_IN_LEN(count);
- /* Allocate room for the largest request */
- inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
- if (inbuf == NULL)
- return -ENOMEM;
-
- record = inbuf;
- MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
- while (count-- > 0) {
+ for (index = 0; index < count; index++) {
+ record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
+ count);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
req->to_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
- (u32)req->to_addr);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
- (u32)(req->to_addr >> 32));
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
+ req->to_addr);
if (req->from_buf == NULL) {
from_rid = req->from_rid;
- from_lo = (u32)req->from_addr;
- from_hi = (u32)(req->from_addr >> 32);
+ from_addr = req->from_addr;
} else {
- if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
+ if (WARN_ON(used + req->length >
+ MCDI_CTL_SDU_LEN_MAX_V1)) {
rc = -ENOBUFS;
goto out;
}
from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
- from_lo = used;
- from_hi = 0;
- memcpy(inbuf + used, req->from_buf, req->length);
+ from_addr = used;
+ memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
+ req->length);
used += req->length;
}
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
- from_lo);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
- from_hi);
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
+ from_addr);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
req->length);
++req;
- record += MC_CMD_MEMCPY_IN_RECORD_LEN;
}
rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
out:
- kfree(inbuf);
-
mb(); /* Don't write source/read dest before DMA is complete */
return rc;
@@ -473,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
++vf->msg_seqno;
- efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
- &event);
+ efx_farch_generate_event(efx,
+ EFX_VI_BASE + vf->index * efx_vf_size(efx),
+ &event);
}
static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
@@ -684,16 +676,12 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
unsigned timeout = HZ;
unsigned index, rxqs_count;
- __le32 *rxqs;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
int rc;
BUILD_BUG_ON(VF_MAX_RX_QUEUES >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
- if (rxqs == NULL)
- return VFDI_RC_ENOMEM;
-
rtnl_lock();
siena_prepare_flush(efx);
rtnl_unlock();
@@ -708,14 +696,19 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
vf_offset + index);
efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
}
- if (test_bit(index, vf->rxq_mask))
- rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
+ if (test_bit(index, vf->rxq_mask)) {
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
+ }
}
atomic_set(&vf->rxq_retry_count, 0);
while (timeout && (vf->rxq_count || vf->txq_count)) {
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
- rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
+ NULL, 0, NULL);
WARN_ON(rc < 0);
timeout = wait_event_timeout(vf->flush_waitq,
@@ -725,8 +718,10 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
for (index = 0; index < count; ++index) {
if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
atomic_dec(&vf->rxq_retry_count);
- rxqs[rxqs_count++] =
- cpu_to_le32(vf_offset + index);
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
}
}
}
@@ -749,7 +744,6 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
}
efx_sriov_bufs(efx, vf->buftbl_base, NULL,
EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
- kfree(rxqs);
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
@@ -1004,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
- if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
+ if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
efx_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf);
}
@@ -1248,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn));
- rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
+ rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
+ GFP_KERNEL);
if (rc)
goto fail;
@@ -1280,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
if (rc)
goto fail_cmd;
- rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
+ rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
+ GFP_KERNEL);
if (rc)
goto fail_status;
vfdi_status = efx->vfdi_status.addr;
@@ -1535,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
efx_sriov_usrev(efx, true);
(void)efx_sriov_cmd(efx, true, NULL, NULL);
- if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
+ if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
deleted file mode 100644
index 5431a1bbff5..00000000000
--- a/drivers/net/ethernet/sfc/spi.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_SPI_H
-#define EFX_SPI_H
-
-#include "net_driver.h"
-
-/**************************************************************************
- *
- * Basic SPI command set and bit definitions
- *
- *************************************************************************/
-
-#define SPI_WRSR 0x01 /* Write status register */
-#define SPI_WRITE 0x02 /* Write data to memory array */
-#define SPI_READ 0x03 /* Read data from memory array */
-#define SPI_WRDI 0x04 /* Reset write enable latch */
-#define SPI_RDSR 0x05 /* Read status register */
-#define SPI_WREN 0x06 /* Set write enable latch */
-#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
-
-#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
-#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
-#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
-#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
-#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
-#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
-
-/**
- * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
- * @device_id: Controller's id for the device
- * @size: Size (in bytes)
- * @addr_len: Number of address bytes in read/write commands
- * @munge_address: Flag whether addresses should be munged.
- * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
- * use bit 3 of the command byte as address bit A8, rather
- * than having a two-byte address. If this flag is set, then
- * commands should be munged in this way.
- * @erase_command: Erase command (or 0 if sector erase not needed).
- * @erase_size: Erase sector size (in bytes)
- * Erase commands affect sectors with this size and alignment.
- * This must be a power of two.
- * @block_size: Write block size (in bytes).
- * Write commands are limited to blocks with this size and alignment.
- */
-struct efx_spi_device {
- int device_id;
- unsigned int size;
- unsigned int addr_len;
- unsigned int munge_address:1;
- u8 erase_command;
- unsigned int erase_size;
- unsigned int block_size;
-};
-
-static inline bool efx_spi_present(const struct efx_spi_device *spi)
-{
- return spi->size != 0;
-}
-
-int falcon_spi_cmd(struct efx_nic *efx,
- const struct efx_spi_device *spi, unsigned int command,
- int address, const void *in, void *out, size_t len);
-int falcon_spi_wait_write(struct efx_nic *efx,
- const struct efx_spi_device *spi);
-int falcon_spi_read(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, u8 *buffer);
-int falcon_spi_write(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer);
-
-/*
- * SFC4000 flash is partitioned into:
- * 0-0x400 chip and board config (see falcon_hwdefs.h)
- * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
- * 0x8000-end boot code (mapped to PCI expansion ROM)
- * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
- * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
- * 0-0x400 chip and board config
- * configurable VPD
- * 0x800-0x1800 boot config
- * Aside from the chip and board config, all of these are optional and may
- * be absent or truncated depending on the devices used.
- */
-#define FALCON_NVCONFIG_END 0x400U
-#define FALCON_FLASH_BOOTCODE_START 0x8000U
-#define EFX_EEPROM_BOOTCONFIG_START 0x800U
-#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
-
-#endif /* EFX_SPI_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index d37cb501712..2c90e6b3157 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 5e090e54298..2ac91c5b5ee 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
- if (unlikely(buffer->len == 0)) {
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
@@ -437,6 +439,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.
@@ -543,10 +548,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->initialised = true;
}
-void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
if (!tx_queue->buffer)
return;
@@ -561,22 +569,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
netdev_tx_reset_queue(tx_queue->core_txq);
}
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- if (!tx_queue->initialised)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- tx_queue->initialised = false;
-
- /* Flush TX queue, remove descriptor ring */
- efx_nic_fini_tx(tx_queue);
-
- efx_release_tx_buffers(tx_queue);
-}
-
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
int i;
@@ -708,7 +700,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
if (unlikely(!page_buf->addr) &&
- efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+ efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
return NULL;
result = (u8 *)page_buf->addr + offset;
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 29bb3f9941c..3d5ee325988 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index 225557caaf5..ae044f44936 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index e4dd3a7f304..2310b75d4ec 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,27 +15,15 @@
* Bug numbers are from Solarflare's Bugzilla.
*/
-#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1
-/* XAUI resets if link not detected */
-#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
-/* RX PCIe double split performance issue */
-#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
-/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
- * or a PCIe error (bug 11028) */
-#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
-/* Transmit flow control may get disabled */
-#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
-/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
@@ -56,4 +44,10 @@
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+/* Lockup when writing event block registers at gen2/gen3 */
+#define EFX_EF10_WORKAROUND_35388(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
+#define EFX_WORKAROUND_35388(efx) \
+ (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+
#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 9f5f35e041a..770036bc2d8 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -212,9 +212,8 @@ static void meth_check_link(struct net_device *dev)
static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
- priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma,
- GFP_ATOMIC | __GFP_ZERO);
+ priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
+ &priv->tx_ring_dma, GFP_ATOMIC);
if (!priv->tx_ring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 02df0894690..ee18e6f7b4f 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1770,9 +1770,6 @@ static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
struct sis190_private *tp = netdev_priv(dev);
unsigned long flags;
- if (regs->len > SIS190_REGS_SIZE)
- regs->len = SIS190_REGS_SIZE;
-
spin_lock_irqsave(&tp->lock, flags);
memcpy_fromio(p, tp->mmio_addr, regs->len);
spin_unlock_irqrestore(&tp->lock, flags);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index f5d7ad75e47..975dc2d8e54 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1309,23 +1309,9 @@ static void sis900_timer(unsigned long data)
struct sis900_private *sis_priv = netdev_priv(net_dev);
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
+ int speed = 0, duplex = 0;
u16 status;
- if (!sis_priv->autong_complete){
- int uninitialized_var(speed), duplex = 0;
-
- sis900_read_mode(net_dev, &speed, &duplex);
- if (duplex){
- sis900_set_mode(sis_priv, speed, duplex);
- sis630_set_eq(net_dev, sis_priv->chipset_rev);
- netif_carrier_on(net_dev);
- }
-
- sis_priv->timer.expires = jiffies + HZ;
- add_timer(&sis_priv->timer);
- return;
- }
-
status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
@@ -1336,8 +1322,16 @@ static void sis900_timer(unsigned long data)
status = sis900_default_phy(net_dev);
mii_phy = sis_priv->mii;
- if (status & MII_STAT_LINK)
- sis900_check_mode(net_dev, mii_phy);
+ if (status & MII_STAT_LINK) {
+ WARN_ON(!(status & MII_STAT_AUTO_DONE));
+
+ sis900_read_mode(net_dev, &speed, &duplex);
+ if (duplex) {
+ sis900_set_mode(sis_priv, speed, duplex);
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+ netif_carrier_on(net_dev);
+ }
+ }
} else {
/* Link ON -> OFF */
if (!(status & MII_STAT_LINK)){
@@ -1715,7 +1709,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
if(netif_msg_intr(sis_priv))
printk(KERN_DEBUG "%s: exiting interrupt, "
- "interrupt status = 0x%#8.8x.\n",
+ "interrupt status = %#8.8x\n",
net_dev->name, sr32(isr));
spin_unlock (&sis_priv->lock);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 345558fe736..afe01c4088a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2067,7 +2067,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
{
- struct smc911x_platdata *pd = pdev->dev.platform_data;
+ struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
if (!pd) {
ret = -EINVAL;
goto release_both;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index cde13be7c7d..73be7f3982e 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2202,7 +2202,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
*/
static int smc_drv_probe(struct platform_device *pdev)
{
- struct smc91x_platdata *pd = pdev->dev.platform_data;
+ struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
struct smc_local *lp;
struct net_device *ndev;
struct resource *res, *ires;
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 370e13dde11..5730fe2445a 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -271,7 +271,7 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
-#define SMC_IRQ_FLAGS (IRQF_DISABLED)
+#define SMC_IRQ_FLAGS 0
#else
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a1419211585..5fdbc2686eb 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2374,7 +2374,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct net_device *dev;
struct smsc911x_data *pdata;
- struct smsc911x_platform_config *config = pdev->dev.platform_data;
+ struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res, *irq_res;
unsigned int intcfg = 0;
int res_size, irq_flags;
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index ffa5c4ad121..5f9e79f7f2d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1356,8 +1356,7 @@ static int smsc9420_open(struct net_device *dev)
smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
smsc9420_pci_flush_write(pd);
- result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
- DRV_NAME, pd);
+ result = request_irq(irq, smsc9420_isr, IRQF_SHARED, DRV_NAME, pd);
if (result) {
smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
result = -ENODEV;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c922fde929a..f16a9bdf45b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -70,7 +70,6 @@ struct stmmac_priv {
struct net_device *dev;
struct device *device;
struct mac_device_info *hw;
- int no_csum_insertion;
spinlock_t lock;
struct phy_device *phydev ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index def7e75e1d5..76ad214b403 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -45,8 +45,8 @@ static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
data = (1000000000ULL / 50000000);
/* 0.465ns accuracy */
- if (value & PTP_TCR_TSCTRLSSR)
- data = (data * 100) / 465;
+ if (!(value & PTP_TCR_TSCTRLSSR))
+ data = (data * 1000) / 465;
writel(data, ioaddr + PTP_SSIR);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0a9bb9d30c3..8d4ccd35a01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1224,8 +1224,9 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (likely(priv->plat->force_sf_dma_mode ||
- ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ if (priv->plat->force_thresh_dma_mode)
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
+ else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1c83a44c547..51c9069ef40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -83,6 +83,11 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
dma_cfg->mixed_burst =
of_property_read_bool(np, "snps,mixed-burst");
}
+ plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
+ if (plat->force_thresh_dma_mode) {
+ plat->force_sf_dma_mode = 0;
+ pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
+ }
return 0;
}
@@ -113,14 +118,11 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
const char *mac = NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr))
return PTR_ERR(addr);
- plat_dat = pdev->dev.platform_data;
+ plat_dat = dev_get_platdata(&pdev->dev);
if (pdev->dev.of_node) {
if (!plat_dat)
plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index fa322409bff..f28460ce24a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9360,7 +9360,7 @@ static ssize_t show_port_phy(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
u32 port_phy = p->port_phy;
char *orig_buf = buf;
int i;
@@ -9390,7 +9390,7 @@ static ssize_t show_plat_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
const char *type_str;
switch (p->plat_type) {
@@ -9419,7 +9419,7 @@ static ssize_t __show_chan_per_port(struct device *dev,
int rx)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
char *orig_buf = buf;
u8 *arr;
int i;
@@ -9452,7 +9452,7 @@ static ssize_t show_num_ports(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
return sprintf(buf, "%d\n", p->num_ports);
}
@@ -9478,7 +9478,7 @@ static struct niu_parent *niu_new_parent(struct niu *np,
if (IS_ERR(plat_dev))
return NULL;
- for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
+ for (i = 0; niu_parent_attributes[i].attr.name; i++) {
int err = device_create_file(&plat_dev->dev,
&niu_parent_attributes[i]);
if (err)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d43fa9ff98..7217ee5d627 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1239,7 +1239,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
static int bigmac_sbus_remove(struct platform_device *op)
{
- struct bigmac *bp = dev_get_drvdata(&op->dev);
+ struct bigmac *bp = platform_get_drvdata(op);
struct device *parent = op->dev.parent;
struct net_device *net_dev = bp->dev;
struct platform_device *qec_op;
@@ -1259,8 +1259,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 171f5b0809c..e37b587b386 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2798,7 +2798,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out_free_coherent;
}
- dev_set_drvdata(&op->dev, hp);
+ platform_set_drvdata(op, hp);
if (qfe_slot != -1)
printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
@@ -3111,7 +3111,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
goto err_out_iounmap;
}
- dev_set_drvdata(&pdev->dev, hp);
+ pci_set_drvdata(pdev, hp);
if (!qfe_slot) {
struct pci_dev *qpdev = qp->quattro_dev;
@@ -3159,7 +3159,7 @@ err_out:
static void happy_meal_pci_remove(struct pci_dev *pdev)
{
- struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
+ struct happy_meal *hp = pci_get_drvdata(pdev);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
@@ -3171,7 +3171,7 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
free_netdev(net_dev);
- dev_set_drvdata(&pdev->dev, NULL);
+ pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
@@ -3231,7 +3231,7 @@ static int hme_sbus_probe(struct platform_device *op)
static int hme_sbus_remove(struct platform_device *op)
{
- struct happy_meal *hp = dev_get_drvdata(&op->dev);
+ struct happy_meal *hp = platform_get_drvdata(op);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
@@ -3250,8 +3250,6 @@ static int hme_sbus_remove(struct platform_device *op)
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 31bbbca341a..2dc16b6efaf 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -636,7 +636,7 @@ static void cpmac_hw_stop(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
cpmac_write(priv->regs, CPMAC_RX_CONTROL,
@@ -659,7 +659,7 @@ static void cpmac_hw_start(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
for (i = 0; i < 8; i++) {
@@ -1118,7 +1118,7 @@ static int cpmac_probe(struct platform_device *pdev)
struct net_device *dev;
struct plat_cpmac_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (external_switch || dumb_switch) {
strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 22a7a433621..79974e31187 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,9 +34,9 @@
#include <linux/of_device.h>
#include <linux/if_vlan.h>
-#include <linux/platform_data/cpsw.h>
#include <linux/pinctrl/consumer.h>
+#include "cpsw.h"
#include "cpsw_ale.h"
#include "cpts.h"
#include "davinci_cpdma.h"
@@ -82,6 +82,8 @@ do { \
#define CPSW_VERSION_1 0x19010a
#define CPSW_VERSION_2 0x19010c
+#define CPSW_VERSION_3 0x19010f
+#define CPSW_VERSION_4 0x190112
#define HOST_PORT_NUM 0
#define SLIVER_SIZE 0x40
@@ -91,6 +93,7 @@ do { \
#define CPSW1_SLAVE_SIZE 0x040
#define CPSW1_CPDMA_OFFSET 0x100
#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_HW_STATS 0x400
#define CPSW1_CPTS_OFFSET 0x500
#define CPSW1_ALE_OFFSET 0x600
#define CPSW1_SLIVER_OFFSET 0x700
@@ -99,6 +102,7 @@ do { \
#define CPSW2_SLAVE_OFFSET 0x200
#define CPSW2_SLAVE_SIZE 0x100
#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_HW_STATS 0x900
#define CPSW2_STATERAM_OFFSET 0xa00
#define CPSW2_CPTS_OFFSET 0xc00
#define CPSW2_ALE_OFFSET 0xd00
@@ -299,6 +303,44 @@ struct cpsw_sliver_regs {
u32 rx_pri_map;
};
+struct cpsw_hw_stats {
+ u32 rxgoodframes;
+ u32 rxbroadcastframes;
+ u32 rxmulticastframes;
+ u32 rxpauseframes;
+ u32 rxcrcerrors;
+ u32 rxaligncodeerrors;
+ u32 rxoversizedframes;
+ u32 rxjabberframes;
+ u32 rxundersizedframes;
+ u32 rxfragments;
+ u32 __pad_0[2];
+ u32 rxoctets;
+ u32 txgoodframes;
+ u32 txbroadcastframes;
+ u32 txmulticastframes;
+ u32 txpauseframes;
+ u32 txdeferredframes;
+ u32 txcollisionframes;
+ u32 txsinglecollframes;
+ u32 txmultcollframes;
+ u32 txexcessivecollisions;
+ u32 txlatecollisions;
+ u32 txunderrun;
+ u32 txcarriersenseerrors;
+ u32 txoctets;
+ u32 octetframes64;
+ u32 octetframes65t127;
+ u32 octetframes128t255;
+ u32 octetframes256t511;
+ u32 octetframes512t1023;
+ u32 octetframes1024tup;
+ u32 netoctets;
+ u32 rxsofoverruns;
+ u32 rxmofoverruns;
+ u32 rxdmaoverruns;
+};
+
struct cpsw_slave {
void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
@@ -332,6 +374,7 @@ struct cpsw_priv {
struct cpsw_platform_data data;
struct cpsw_ss_regs __iomem *regs;
struct cpsw_wr_regs __iomem *wr_regs;
+ u8 __iomem *hw_stats;
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
u32 version;
@@ -354,6 +397,94 @@ struct cpsw_priv {
u32 emac_port;
};
+struct cpsw_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+enum {
+ CPSW_STATS,
+ CPDMA_RX_STATS,
+ CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m) CPSW_STATS, \
+ sizeof(((struct cpsw_hw_stats *)0)->m), \
+ offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
+ sizeof(((struct cpdma_chan_stats *)0)->m), \
+ offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
+ sizeof(((struct cpdma_chan_stats *)0)->m), \
+ offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+ { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+ { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+ { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+ { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+ { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+ { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+ { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+ { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+ { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+ { "Rx Fragments", CPSW_STAT(rxfragments) },
+ { "Rx Octets", CPSW_STAT(rxoctets) },
+ { "Good Tx Frames", CPSW_STAT(txgoodframes) },
+ { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+ { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+ { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+ { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+ { "Collisions", CPSW_STAT(txcollisionframes) },
+ { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+ { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+ { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+ { "Late Collisions", CPSW_STAT(txlatecollisions) },
+ { "Tx Underrun", CPSW_STAT(txunderrun) },
+ { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+ { "Tx Octets", CPSW_STAT(txoctets) },
+ { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+ { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+ { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+ { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+ { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+ { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+ { "Net Octets", CPSW_STAT(netoctets) },
+ { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+ { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+ { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+ { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
+ { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
+ { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+ { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
+ { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
+ { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
+ { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
+ { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
+ { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
+ { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
+ { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
+ { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
+ { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
+ { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
+ { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
+ { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
#define for_each_slave(priv, func, arg...) \
do { \
@@ -723,6 +854,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
return 0;
}
+static int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CPSW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < CPSW_STATS_LEN; i++) {
+ memcpy(p, cpsw_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpdma_chan_stats rx_stats;
+ struct cpdma_chan_stats tx_stats;
+ u32 val;
+ u8 *p;
+ int i;
+
+ /* Collect Davinci CPDMA stats for Rx and Tx Channel */
+ cpdma_chan_get_stats(priv->rxch, &rx_stats);
+ cpdma_chan_get_stats(priv->txch, &tx_stats);
+
+ for (i = 0; i < CPSW_STATS_LEN; i++) {
+ switch (cpsw_gstrings_stats[i].type) {
+ case CPSW_STATS:
+ val = readl(priv->hw_stats +
+ cpsw_gstrings_stats[i].stat_offset);
+ data[i] = val;
+ break;
+
+ case CPDMA_RX_STATS:
+ p = (u8 *)&rx_stats +
+ cpsw_gstrings_stats[i].stat_offset;
+ data[i] = *(u32 *)p;
+ break;
+
+ case CPDMA_TX_STATS:
+ p = (u8 *)&tx_stats +
+ cpsw_gstrings_stats[i].stat_offset;
+ data[i] = *(u32 *)p;
+ break;
+ }
+ }
+}
+
static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
{
static char *leader = "........................................";
@@ -799,6 +993,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
break;
}
@@ -1232,6 +1428,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
}
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int flags = 0;
+ u16 vid = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (priv->data.dual_emac) {
+ vid = priv->slaves[priv->emac_port].port_vlan;
+ flags = ALE_VLAN;
+ }
+
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ flags, vid);
+ cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+ flags, vid);
+
+ memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ for_each_slave(priv, cpsw_set_slave_mac, priv);
+
+ return 0;
+}
+
static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1326,6 +1549,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
.ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
@@ -1416,6 +1640,29 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
}
+static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (priv->slaves[slave_no].phy)
+ phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
+}
+
+static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1426,6 +1673,11 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_settings = cpsw_set_settings,
.get_coalesce = cpsw_get_coalesce,
.set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1623,6 +1875,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
priv_sl2->wr_regs = priv->wr_regs;
+ priv_sl2->hw_stats = priv->hw_stats;
priv_sl2->dma = priv->dma;
priv_sl2->txch = priv->txch;
priv_sl2->rxch = priv->rxch;
@@ -1780,7 +2033,8 @@ static int cpsw_probe(struct platform_device *pdev)
switch (priv->version) {
case CPSW_VERSION_1:
priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->hw_stats = ss_regs + CPSW1_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1790,8 +2044,11 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.desc_mem_phys = 0;
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->hw_stats = ss_regs + CPSW2_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
new file mode 100644
index 00000000000..eb3e101ec04
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -0,0 +1,42 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __CPSW_H__
+#define __CPSW_H__
+
+#include <linux/if_ether.h>
+
+struct cpsw_slave_data {
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+ u8 mac_addr[ETH_ALEN];
+ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
+};
+
+struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
+ u32 ss_reg_ofs; /* Subsystem control register offset */
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
+ u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
+ u32 ale_entries; /* ale table size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 rx_descs; /* Number of Rx Descriptios */
+ u32 mac_control; /* Mac control register */
+ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
+ bool dual_emac; /* Enable Dual EMAC mode */
+};
+
+#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 031ebc81b50..90a79462c86 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -591,6 +591,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
int cpdma_chan_dump(struct cpdma_chan *chan)
{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 1a222bce4bd..67df09ea9d0 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1761,7 +1761,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
const u8 *mac_addr;
if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
- return pdev->dev.platform_data;
+ return dev_get_platdata(&pdev->dev);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 16ddfc34806..4ec92659a10 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -314,7 +314,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
static int davinci_mdio_probe(struct platform_device *pdev)
{
- struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data;
struct resource *res;
@@ -421,8 +421,7 @@ bail_out:
static int davinci_mdio_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ struct davinci_mdio_data *data = platform_get_drvdata(pdev);
if (data->bus) {
mdiobus_unregister(data->bus);
@@ -434,8 +433,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- dev_set_drvdata(dev, NULL);
-
kfree(data);
return 0;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 098b1c42b39..4083ba8839e 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -15,3 +15,14 @@ config TILE_NET
To compile this driver as a module, choose M here: the module
will be called tile_net.
+
+config PTP_1588_CLOCK_TILEGX
+ tristate "Tilera TILE-Gx mPIPE as PTP clock"
+ select PTP_1588_CLOCK
+ depends on TILE_NET
+ depends on TILEGX
+ ---help---
+ This driver adds support for using the mPIPE as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index f3c2d034b32..13e6fff8ca2 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -36,7 +36,10 @@
#include <linux/io.h>
#include <linux/ctype.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -76,6 +79,9 @@
#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+/* The "kinds" of buffer stacks (small/large/jumbo). */
+#define MAX_KINDS 3
+
/* Size of completions data to allocate.
* ISSUE: Probably more than needed since we don't use all the channels.
*/
@@ -130,29 +136,31 @@ struct tile_net_tx_wake {
/* Info for a specific cpu. */
struct tile_net_info {
- /* The NAPI struct. */
- struct napi_struct napi;
- /* Packet queue. */
- gxio_mpipe_iqueue_t iqueue;
/* Our cpu. */
int my_cpu;
- /* True if iqueue is valid. */
- bool has_iqueue;
- /* NAPI flags. */
- bool napi_added;
- bool napi_enabled;
- /* Number of small sk_buffs which must still be provided. */
- unsigned int num_needed_small_buffers;
- /* Number of large sk_buffs which must still be provided. */
- unsigned int num_needed_large_buffers;
/* A timer for handling egress completions. */
struct hrtimer egress_timer;
/* True if "egress_timer" is scheduled. */
bool egress_timer_scheduled;
- /* Comps for each egress channel. */
- struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
- /* Transmit wake timer for each egress channel. */
- struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+ struct info_mpipe {
+ /* Packet queue. */
+ gxio_mpipe_iqueue_t iqueue;
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Number of buffers (by kind) which must still be provided. */
+ unsigned int num_needed_buffers[MAX_KINDS];
+ /* instance id. */
+ int instance;
+ /* True if iqueue is valid. */
+ bool has_iqueue;
+ /* NAPI flags. */
+ bool napi_added;
+ bool napi_enabled;
+ /* Comps for each egress channel. */
+ struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+ /* Transmit wake timer for each egress channel. */
+ struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+ } mpipe[NR_MPIPE_MAX];
};
/* Info for egress on a particular egress channel. */
@@ -177,19 +185,67 @@ struct tile_net_priv {
int loopify_channel;
/* The egress channel (channel or loopify_channel). */
int echannel;
- /* Total stats. */
- struct net_device_stats stats;
+ /* mPIPE instance, 0 or 1. */
+ int instance;
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ /* The timestamp config. */
+ struct hwtstamp_config stamp_cfg;
+#endif
};
-/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
-static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+static struct mpipe_data {
+ /* The ingress irq. */
+ int ingress_irq;
-/* Devices currently associated with each channel.
- * NOTE: The array entry can become NULL after ifconfig down, but
- * we do not free the underlying net_device structures, so it is
- * safe to use a pointer after reading it from this array.
- */
-static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+ /* The "context" for all devices. */
+ gxio_mpipe_context_t context;
+
+ /* Egress info, indexed by "priv->echannel"
+ * (lazily created as needed).
+ */
+ struct tile_net_egress
+ egress_for_echannel[TILE_NET_CHANNELS];
+
+ /* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+ struct net_device
+ *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+ /* The actual memory allocated for the buffer stacks. */
+ void *buffer_stack_vas[MAX_KINDS];
+
+ /* The amount of memory allocated for each buffer stack. */
+ size_t buffer_stack_bytes[MAX_KINDS];
+
+ /* The first buffer stack index
+ * (small = +0, large = +1, jumbo = +2).
+ */
+ int first_buffer_stack;
+
+ /* The buckets. */
+ int first_bucket;
+ int num_buckets;
+
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ /* PTP-specific data. */
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+
+ /* Lock for ptp accessors. */
+ struct mutex ptp_lock;
+#endif
+
+} mpipe_data[NR_MPIPE_MAX] = {
+ [0 ... (NR_MPIPE_MAX - 1)] {
+ .ingress_irq = -1,
+ .first_buffer_stack = -1,
+ .first_bucket = -1,
+ .num_buckets = 1
+ }
+};
/* A mutex for "tile_net_devs_for_channel". */
static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
@@ -197,34 +253,17 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
/* The per-cpu info. */
static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
-/* The "context" for all devices. */
-static gxio_mpipe_context_t context;
-/* Buffer sizes and mpipe enum codes for buffer stacks.
+/* The buffer size enums for each buffer stack.
* See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ * We avoid the "10384" size because it can induce "false chaining"
+ * on "cut-through" jumbo packets.
*/
-#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
-#define BUFFER_SIZE_SMALL 128
-#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
-#define BUFFER_SIZE_LARGE 1664
-
-/* The small/large "buffer stacks". */
-static int small_buffer_stack = -1;
-static int large_buffer_stack = -1;
-
-/* Amount of memory allocated for each buffer stack. */
-static size_t buffer_stack_size;
-
-/* The actual memory allocated for the buffer stacks. */
-static void *small_buffer_stack_va;
-static void *large_buffer_stack_va;
-
-/* The buckets. */
-static int first_bucket = -1;
-static int num_buckets = 1;
-
-/* The ingress irq. */
-static int ingress_irq = -1;
+static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
+ GXIO_MPIPE_BUFFER_SIZE_128,
+ GXIO_MPIPE_BUFFER_SIZE_1664,
+ GXIO_MPIPE_BUFFER_SIZE_16384
+};
/* Text value of tile_net.cpus if passed as a module parameter. */
static char *network_cpus_string;
@@ -232,11 +271,21 @@ static char *network_cpus_string;
/* The actual cpus in "network_cpus". */
static struct cpumask network_cpus_map;
-/* If "loopify=LINK" was specified, this is "LINK". */
+/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
static char *loopify_link_name;
-/* If "tile_net.custom" was specified, this is non-NULL. */
-static char *custom_str;
+/* If "tile_net.custom" was specified, this is true. */
+static bool custom_flag;
+
+/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
+static uint jumbo_num;
+
+/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
+static inline int mpipe_instance(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ return priv->instance;
+}
/* The "tile_net.cpus" argument specifies the cpus that are dedicated
* to handle ingress packets.
@@ -289,9 +338,15 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
/* The "tile_net.custom" argument causes us to ignore the "conventional"
* classifier metadata, in particular, the "l2_offset".
*/
-module_param_named(custom, custom_str, charp, 0444);
+module_param_named(custom, custom_flag, bool, 0444);
MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
+ * and to allocate the given number of "jumbo" buffers.
+ */
+module_param_named(jumbo, jumbo_num, uint, 0444);
+MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
+
/* Atomically update a statistics field.
* Note that on TILE-Gx, this operation is fire-and-forget on the
* issuing core (single-cycle dispatch) and takes only a few cycles
@@ -305,15 +360,16 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
}
/* Allocate and push a buffer. */
-static bool tile_net_provide_buffer(bool small)
+static bool tile_net_provide_buffer(int instance, int kind)
{
- int stack = small ? small_buffer_stack : large_buffer_stack;
+ struct mpipe_data *md = &mpipe_data[instance];
+ gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
+ size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
const unsigned long buffer_alignment = 128;
struct sk_buff *skb;
int len;
- len = sizeof(struct sk_buff **) + buffer_alignment;
- len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+ len = sizeof(struct sk_buff **) + buffer_alignment + bs;
skb = dev_alloc_skb(len);
if (skb == NULL)
return false;
@@ -328,7 +384,7 @@ static bool tile_net_provide_buffer(bool small)
/* Make sure "skb" and the back-pointer have been flushed. */
wmb();
- gxio_mpipe_push_buffer(&context, stack,
+ gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
(void *)va_to_tile_io_addr(skb->data));
return true;
@@ -354,11 +410,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
return skb;
}
-static void tile_net_pop_all_buffers(int stack)
+static void tile_net_pop_all_buffers(int instance, int stack)
{
+ struct mpipe_data *md = &mpipe_data[instance];
+
for (;;) {
tile_io_addr_t addr =
- (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+ (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
+ stack);
if (addr == 0)
break;
dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
@@ -369,24 +428,111 @@ static void tile_net_pop_all_buffers(int stack)
static void tile_net_provide_needed_buffers(void)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ int instance, kind;
+ for (instance = 0; instance < NR_MPIPE_MAX &&
+ info->mpipe[instance].has_iqueue; instance++) {
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ while (info->mpipe[instance].num_needed_buffers[kind]
+ != 0) {
+ if (!tile_net_provide_buffer(instance, kind)) {
+ pr_notice("Tile %d still needs"
+ " some buffers\n",
+ info->my_cpu);
+ return;
+ }
+ info->mpipe[instance].
+ num_needed_buffers[kind]--;
+ }
+ }
+ }
+}
- while (info->num_needed_small_buffers != 0) {
- if (!tile_net_provide_buffer(true))
- goto oops;
- info->num_needed_small_buffers--;
+/* Get RX timestamp, and store it in the skb. */
+static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
+ gxio_mpipe_idesc_t *idesc)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
+ idesc->time_stamp_ns);
}
+#endif
+}
- while (info->num_needed_large_buffers != 0) {
- if (!tile_net_provide_buffer(false))
- goto oops;
- info->num_needed_large_buffers--;
+/* Get TX timestamp, and store it in the skb. */
+static void tile_tx_timestamp(struct sk_buff *skb, int instance)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct skb_shared_info *shtx = skb_shinfo(skb);
+ if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec ts;
+
+ shtx->tx_flags |= SKBTX_IN_PROGRESS;
+ gxio_mpipe_get_timestamp(&md->context, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
+#endif
+}
- return;
+/* Use ioctl() to enable or disable TX or RX timestamping. */
+static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct hwtstamp_config config;
+ struct tile_net_priv *priv = netdev_priv(dev);
-oops:
- /* Add a description to the page allocation failure dump. */
- pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ priv->stamp_cfg = config;
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -398,7 +544,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf)
/* Filter out packets that aren't for us. */
if (!(dev->flags & IFF_PROMISC) &&
!is_multicast_ether_addr(buf) &&
- compare_ether_addr(dev->dev_addr, buf) != 0)
+ !ether_addr_equal(dev->dev_addr, buf))
return true;
return false;
@@ -409,6 +555,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
+ int instance = priv->instance;
/* Encode the actual packet length. */
skb_put(skb, len);
@@ -419,47 +566,52 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_receive_skb(skb);
+ /* Get RX timestamp from idesc. */
+ tile_rx_timestamp(priv, skb, idesc);
+
+ napi_gro_receive(&info->mpipe[instance].napi, skb);
/* Update stats. */
- tile_net_stats_add(1, &priv->stats.rx_packets);
- tile_net_stats_add(len, &priv->stats.rx_bytes);
+ tile_net_stats_add(1, &dev->stats.rx_packets);
+ tile_net_stats_add(len, &dev->stats.rx_bytes);
/* Need a new buffer. */
- if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
- info->num_needed_small_buffers++;
+ if (idesc->size == buffer_size_enums[0])
+ info->mpipe[instance].num_needed_buffers[0]++;
+ else if (idesc->size == buffer_size_enums[1])
+ info->mpipe[instance].num_needed_buffers[1]++;
else
- info->num_needed_large_buffers++;
+ info->mpipe[instance].num_needed_buffers[2]++;
}
/* Handle a packet. Return true if "processed", false if "filtered". */
-static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
uint8_t l2_offset;
void *va;
void *buf;
unsigned long len;
bool filter;
- /* Drop packets for which no buffer was available.
- * NOTE: This happens under heavy load.
+ /* Drop packets for which no buffer was available (which can
+ * happen under heavy load), or for which the me/tr/ce flags
+ * are set (which can happen for jumbo cut-through packets,
+ * or with a customized classifier).
*/
- if (idesc->be) {
- struct tile_net_priv *priv = netdev_priv(dev);
- tile_net_stats_add(1, &priv->stats.rx_dropped);
- gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
- if (net_ratelimit())
- pr_info("Dropping packet (insufficient buffers).\n");
- return false;
+ if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
+ if (dev)
+ tile_net_stats_add(1, &dev->stats.rx_errors);
+ goto drop;
}
/* Get the "l2_offset", if allowed. */
- l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+ l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
- /* Get the raw buffer VA (includes "headroom"). */
- va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+ /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
+ va = tile_io_addr_to_va((unsigned long)idesc->va);
/* Get the actual packet start/length. */
buf = va + l2_offset;
@@ -470,7 +622,10 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
filter = filter_packet(dev, buf);
if (filter) {
- gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+ if (dev)
+ tile_net_stats_add(1, &dev->stats.rx_dropped);
+drop:
+ gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
} else {
struct sk_buff *skb = mpipe_buf_to_skb(va);
@@ -480,7 +635,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
tile_net_receive_skb(dev, skb, idesc, len);
}
- gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
return !filter;
}
@@ -501,14 +656,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned int work = 0;
gxio_mpipe_idesc_t *idesc;
- int i, n;
-
- /* Process packets. */
- while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+ int instance, i, n;
+ struct mpipe_data *md;
+ struct info_mpipe *info_mpipe =
+ container_of(napi, struct info_mpipe, napi);
+
+ instance = info_mpipe->instance;
+ while ((n = gxio_mpipe_iqueue_try_peek(
+ &info_mpipe->iqueue,
+ &idesc)) > 0) {
for (i = 0; i < n; i++) {
if (i == TILE_NET_BATCH)
goto done;
- if (tile_net_handle_packet(idesc + i)) {
+ if (tile_net_handle_packet(instance,
+ idesc + i)) {
if (++work >= budget)
goto done;
}
@@ -516,14 +677,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
}
/* There are no packets left. */
- napi_complete(&info->napi);
+ napi_complete(&info_mpipe->napi);
+ md = &mpipe_data[instance];
/* Re-enable hypervisor interrupts. */
- gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+ gxio_mpipe_enable_notif_ring_interrupt(
+ &md->context, info->mpipe[instance].iqueue.ring);
/* HACK: Avoid the "rotting packet" problem. */
- if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
- napi_schedule(&info->napi);
+ if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
+ napi_schedule(&info_mpipe->napi);
/* ISSUE: Handle completions? */
@@ -533,11 +696,11 @@ done:
return work;
}
-/* Handle an ingress interrupt on the current cpu. */
-static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+/* Handle an ingress interrupt from an instance on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- napi_schedule(&info->napi);
+ napi_schedule(&info->mpipe[(uint64_t)id].napi);
return IRQ_HANDLED;
}
@@ -579,7 +742,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
{
struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
struct tile_net_priv *priv = netdev_priv(dev);
- struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel];
+ int instance = priv->instance;
+ struct tile_net_tx_wake *tx_wake =
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_start(&tx_wake->timer,
ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
@@ -617,7 +782,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned long irqflags;
bool pending = false;
- int i;
+ int i, instance;
local_irq_save(irqflags);
@@ -625,13 +790,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
info->egress_timer_scheduled = false;
/* Free all possible comps for this tile. */
- for (i = 0; i < TILE_NET_CHANNELS; i++) {
- struct tile_net_egress *egress = &egress_for_echannel[i];
- struct tile_net_comps *comps = info->comps_for_echannel[i];
- if (comps->comp_last >= comps->comp_next)
- continue;
- tile_net_free_comps(egress->equeue, comps, -1, true);
- pending = pending || (comps->comp_last < comps->comp_next);
+ for (instance = 0; instance < NR_MPIPE_MAX &&
+ info->mpipe[instance].has_iqueue; instance++) {
+ for (i = 0; i < TILE_NET_CHANNELS; i++) {
+ struct tile_net_egress *egress =
+ &mpipe_data[instance].egress_for_echannel[i];
+ struct tile_net_comps *comps =
+ info->mpipe[instance].comps_for_echannel[i];
+ if (!egress || comps->comp_last >= comps->comp_next)
+ continue;
+ tile_net_free_comps(egress->equeue, comps, -1, true);
+ pending = pending ||
+ (comps->comp_last < comps->comp_next);
+ }
}
/* Reschedule timer if needed. */
@@ -643,37 +814,112 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-/* Helper function for "tile_net_update()".
- * "dev" (i.e. arg) is the device being brought up or down,
- * or NULL if all devices are now down.
- */
-static void tile_net_update_cpu(void *arg)
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+
+/* PTP clock operations. */
+
+static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- struct net_device *dev = arg;
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
+ ret = -EINVAL;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
- if (!info->has_iqueue)
- return;
+static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_adjust_timestamp(&md->context, delta))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
- if (dev != NULL) {
- if (!info->napi_added) {
- netif_napi_add(dev, &info->napi,
- tile_net_poll, TILE_NET_WEIGHT);
- info->napi_added = true;
- }
- if (!info->napi_enabled) {
- napi_enable(&info->napi);
- info->napi_enabled = true;
- }
- enable_percpu_irq(ingress_irq, 0);
- } else {
- disable_percpu_irq(ingress_irq);
- if (info->napi_enabled) {
- napi_disable(&info->napi);
- info->napi_enabled = false;
- }
- /* FIXME: Drain the iqueue. */
- }
+static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_get_timestamp(&md->context, ts))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
+
+static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_set_timestamp(&md->context, ts))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
+
+static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_mpipe_caps = {
+ .owner = THIS_MODULE,
+ .name = "mPIPE clock",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfreq = ptp_mpipe_adjfreq,
+ .adjtime = ptp_mpipe_adjtime,
+ .gettime = ptp_mpipe_gettime,
+ .settime = ptp_mpipe_settime,
+ .enable = ptp_mpipe_enable,
+};
+
+#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
+
+/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
+static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ gxio_mpipe_set_timestamp(&md->context, &ts);
+
+ mutex_init(&md->ptp_lock);
+ md->caps = ptp_mpipe_caps;
+ md->ptp_clock = ptp_clock_register(&md->caps, NULL);
+ if (IS_ERR(md->ptp_clock))
+ netdev_err(dev, "ptp_clock_register failed %ld\n",
+ PTR_ERR(md->ptp_clock));
+#endif
+}
+
+/* Initialize PTP fields in a new device. */
+static void init_ptp_dev(struct tile_net_priv *priv)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+#endif
+}
+
+/* Helper functions for "tile_net_update()". */
+static void enable_ingress_irq(void *irq)
+{
+ enable_percpu_irq((long)irq, 0);
+}
+
+static void disable_ingress_irq(void *irq)
+{
+ disable_percpu_irq((long)irq);
}
/* Helper function for tile_net_open() and tile_net_stop().
@@ -683,19 +929,22 @@ static int tile_net_update(struct net_device *dev)
{
static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
bool saw_channel = false;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
int channel;
int rc;
int cpu;
- gxio_mpipe_rules_init(&rules, &context);
+ saw_channel = false;
+ gxio_mpipe_rules_init(&rules, &md->context);
for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
- if (tile_net_devs_for_channel[channel] == NULL)
+ if (md->tile_net_devs_for_channel[channel] == NULL)
continue;
if (!saw_channel) {
saw_channel = true;
- gxio_mpipe_rules_begin(&rules, first_bucket,
- num_buckets, NULL);
+ gxio_mpipe_rules_begin(&rules, md->first_bucket,
+ md->num_buckets, NULL);
gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
}
gxio_mpipe_rules_add_channel(&rules, channel);
@@ -706,102 +955,150 @@ static int tile_net_update(struct net_device *dev)
*/
rc = gxio_mpipe_rules_commit(&rules);
if (rc != 0) {
- netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+ netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
+ instance, rc);
return -EIO;
}
- /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, tile_net_update_cpu,
- (saw_channel ? dev : NULL), 1);
+ /* Update all cpus, sequentially (to protect "netif_napi_add()").
+ * We use on_each_cpu to handle the IPI mask or unmask.
+ */
+ if (!saw_channel)
+ on_each_cpu(disable_ingress_irq,
+ (void *)(long)(md->ingress_irq), 1);
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+
+ if (!info->mpipe[instance].has_iqueue)
+ continue;
+ if (saw_channel) {
+ if (!info->mpipe[instance].napi_added) {
+ netif_napi_add(dev, &info->mpipe[instance].napi,
+ tile_net_poll, TILE_NET_WEIGHT);
+ info->mpipe[instance].napi_added = true;
+ }
+ if (!info->mpipe[instance].napi_enabled) {
+ napi_enable(&info->mpipe[instance].napi);
+ info->mpipe[instance].napi_enabled = true;
+ }
+ } else {
+ if (info->mpipe[instance].napi_enabled) {
+ napi_disable(&info->mpipe[instance].napi);
+ info->mpipe[instance].napi_enabled = false;
+ }
+ /* FIXME: Drain the iqueue. */
+ }
+ }
+ if (saw_channel)
+ on_each_cpu(enable_ingress_irq,
+ (void *)(long)(md->ingress_irq), 1);
/* HACK: Allow packets to flow in the simulator. */
if (saw_channel)
- sim_enable_mpipe_links(0, -1);
+ sim_enable_mpipe_links(instance, -1);
return 0;
}
-/* Allocate and initialize mpipe buffer stacks, and register them in
- * the mPIPE TLBs, for both small and large packet sizes.
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+/* Initialize a buffer stack. */
+static int create_buffer_stack(struct net_device *dev,
+ int kind, size_t num_buffers)
{
pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
- int rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+ size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
+ int stack_idx = md->first_buffer_stack + kind;
+ void *va;
+ int i, rc;
- /* Compute stack bytes; we round up to 64KB and then use
- * alloc_pages() so we get the required 64KB alignment as well.
+ /* Round up to 64KB and then use alloc_pages() so we get the
+ * required 64KB alignment.
*/
- buffer_stack_size =
- ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
- 64 * 1024);
+ md->buffer_stack_bytes[kind] =
+ ALIGN(needed, 64 * 1024);
- /* Allocate two buffer stack indices. */
- rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
- if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
- rc);
- return rc;
- }
- small_buffer_stack = rc;
- large_buffer_stack = rc + 1;
-
- /* Allocate the small memory stack. */
- small_buffer_stack_va =
- alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
- if (small_buffer_stack_va == NULL) {
+ va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
+ if (va == NULL) {
netdev_err(dev,
- "Could not alloc %zd bytes for buffer stacks\n",
- buffer_stack_size);
+ "Could not alloc %zd bytes for buffer stack %d\n",
+ md->buffer_stack_bytes[kind], kind);
return -ENOMEM;
}
- rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
- BUFFER_SIZE_SMALL_ENUM,
- small_buffer_stack_va,
- buffer_stack_size, 0);
+
+ /* Initialize the buffer stack. */
+ rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
+ buffer_size_enums[kind], va,
+ md->buffer_stack_bytes[kind], 0);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
+ instance, rc);
+ free_pages_exact(va, md->buffer_stack_bytes[kind]);
return rc;
}
- rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+
+ md->buffer_stack_vas[kind] = va;
+
+ rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
hash_pte, 0);
if (rc != 0) {
netdev_err(dev,
- "gxio_mpipe_register_buffer_memory failed: %d\n",
- rc);
+ "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
- /* Allocate the large buffer stack. */
- large_buffer_stack_va =
- alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
- if (large_buffer_stack_va == NULL) {
- netdev_err(dev,
- "Could not alloc %zd bytes for buffer stacks\n",
- buffer_stack_size);
- return -ENOMEM;
- }
- rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
- BUFFER_SIZE_LARGE_ENUM,
- large_buffer_stack_va,
- buffer_stack_size, 0);
- if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
- rc);
- return rc;
+ /* Provide initial buffers. */
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(instance, kind)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ return -ENOMEM;
+ }
}
- rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
- hash_pte, 0);
- if (rc != 0) {
+
+ return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev,
+ int network_cpus_count)
+{
+ int num_kinds = MAX_KINDS - (jumbo_num == 0);
+ size_t num_buffers;
+ int rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ /* Allocate the buffer stacks. */
+ rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
+ if (rc < 0) {
netdev_err(dev,
- "gxio_mpipe_register_buffer_memory failed: %d\n",
- rc);
+ "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
+ md->first_buffer_stack = rc;
- return 0;
+ /* Enough small/large buffers to (normally) avoid buffer errors. */
+ num_buffers =
+ network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+
+ /* Allocate the small memory stack. */
+ if (rc >= 0)
+ rc = create_buffer_stack(dev, 0, num_buffers);
+
+ /* Allocate the large buffer stack. */
+ if (rc >= 0)
+ rc = create_buffer_stack(dev, 1, num_buffers);
+
+ /* Allocate the jumbo buffer stack if needed. */
+ if (rc >= 0 && jumbo_num != 0)
+ rc = create_buffer_stack(dev, 2, jumbo_num);
+
+ return rc;
}
/* Allocate per-cpu resources (memory for completions and idescs).
@@ -812,6 +1109,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
{
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
int order, i, rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
struct page *page;
void *addr;
@@ -826,7 +1125,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
addr = pfn_to_kaddr(page_to_pfn(page));
memset(addr, 0, COMPS_SIZE);
for (i = 0; i < TILE_NET_CHANNELS; i++)
- info->comps_for_echannel[i] =
+ info->mpipe[instance].comps_for_echannel[i] =
addr + i * sizeof(struct tile_net_comps);
/* If this is a network cpu, create an iqueue. */
@@ -840,14 +1139,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
return -ENOMEM;
}
addr = pfn_to_kaddr(page_to_pfn(page));
- rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
- addr, NOTIF_RING_SIZE, 0);
+ rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
+ &md->context, ring++, addr,
+ NOTIF_RING_SIZE, 0);
if (rc < 0) {
netdev_err(dev,
"gxio_mpipe_iqueue_init failed: %d\n", rc);
return rc;
}
- info->has_iqueue = true;
+ info->mpipe[instance].has_iqueue = true;
}
return ring;
@@ -860,40 +1160,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
int ring, int network_cpus_count)
{
int group, rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
/* Allocate one NotifGroup. */
- rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+ rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
- rc);
+ netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
group = rc;
/* Initialize global num_buckets value. */
if (network_cpus_count > 4)
- num_buckets = 256;
+ md->num_buckets = 256;
else if (network_cpus_count > 1)
- num_buckets = 16;
+ md->num_buckets = 16;
/* Allocate some buckets, and set global first_bucket value. */
- rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+ rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
- first_bucket = rc;
+ md->first_bucket = rc;
/* Init group and buckets. */
rc = gxio_mpipe_init_notif_group_and_buckets(
- &context, group, ring, network_cpus_count,
- first_bucket, num_buckets,
+ &md->context, group, ring, network_cpus_count,
+ md->first_bucket, md->num_buckets,
GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
if (rc != 0) {
- netdev_err(
- dev,
- "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
- rc);
+ netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
+ "mpipe[%d] %d\n", instance, rc);
return rc;
}
@@ -907,30 +1208,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
*/
static int tile_net_setup_interrupts(struct net_device *dev)
{
- int cpu, rc;
+ int cpu, rc, irq;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ irq = md->ingress_irq;
+ if (irq < 0) {
+ irq = create_irq();
+ if (irq < 0) {
+ netdev_err(dev,
+ "create_irq failed: mpipe[%d] %d\n",
+ instance, irq);
+ return irq;
+ }
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
- rc = create_irq();
- if (rc < 0) {
- netdev_err(dev, "create_irq failed: %d\n", rc);
- return rc;
- }
- ingress_irq = rc;
- tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
- rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
- 0, "tile_net", NULL);
- if (rc != 0) {
- netdev_err(dev, "request_irq failed: %d\n", rc);
- destroy_irq(ingress_irq);
- ingress_irq = -1;
- return rc;
+ rc = request_irq(irq, tile_net_handle_ingress_irq,
+ 0, "tile_net", (void *)((uint64_t)instance));
+
+ if (rc != 0) {
+ netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
+ instance, rc);
+ destroy_irq(irq);
+ return rc;
+ }
+ md->ingress_irq = irq;
}
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- if (info->has_iqueue) {
- gxio_mpipe_request_notif_ring_interrupt(
- &context, cpu_x(cpu), cpu_y(cpu),
- KERNEL_PL, ingress_irq, info->iqueue.ring);
+ if (info->mpipe[instance].has_iqueue) {
+ gxio_mpipe_request_notif_ring_interrupt(&md->context,
+ cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
+ info->mpipe[instance].iqueue.ring);
}
}
@@ -938,39 +1248,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
}
/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
-static void tile_net_init_mpipe_fail(void)
+static void tile_net_init_mpipe_fail(int instance)
{
- int cpu;
+ int kind, cpu;
+ struct mpipe_data *md = &mpipe_data[instance];
/* Do cleanups that require the mpipe context first. */
- if (small_buffer_stack >= 0)
- tile_net_pop_all_buffers(small_buffer_stack);
- if (large_buffer_stack >= 0)
- tile_net_pop_all_buffers(large_buffer_stack);
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ if (md->buffer_stack_vas[kind] != NULL) {
+ tile_net_pop_all_buffers(instance,
+ md->first_buffer_stack +
+ kind);
+ }
+ }
/* Destroy mpipe context so the hardware no longer owns any memory. */
- gxio_mpipe_destroy(&context);
+ gxio_mpipe_destroy(&md->context);
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- free_pages((unsigned long)(info->comps_for_echannel[0]),
- get_order(COMPS_SIZE));
- info->comps_for_echannel[0] = NULL;
- free_pages((unsigned long)(info->iqueue.idescs),
+ free_pages(
+ (unsigned long)(
+ info->mpipe[instance].comps_for_echannel[0]),
+ get_order(COMPS_SIZE));
+ info->mpipe[instance].comps_for_echannel[0] = NULL;
+ free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
get_order(NOTIF_RING_SIZE));
- info->iqueue.idescs = NULL;
+ info->mpipe[instance].iqueue.idescs = NULL;
}
- if (small_buffer_stack_va)
- free_pages_exact(small_buffer_stack_va, buffer_stack_size);
- if (large_buffer_stack_va)
- free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ if (md->buffer_stack_vas[kind] != NULL) {
+ free_pages_exact(md->buffer_stack_vas[kind],
+ md->buffer_stack_bytes[kind]);
+ md->buffer_stack_vas[kind] = NULL;
+ }
+ }
- small_buffer_stack_va = NULL;
- large_buffer_stack_va = NULL;
- large_buffer_stack = -1;
- small_buffer_stack = -1;
- first_bucket = -1;
+ md->first_buffer_stack = -1;
+ md->first_bucket = -1;
}
/* The first time any tilegx network device is opened, we initialize
@@ -984,9 +1300,11 @@ static void tile_net_init_mpipe_fail(void)
*/
static int tile_net_init_mpipe(struct net_device *dev)
{
- int i, num_buffers, rc;
+ int rc;
int cpu;
int first_ring, ring;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
int network_cpus_count = cpus_weight(network_cpus_map);
if (!hash_default) {
@@ -994,36 +1312,21 @@ static int tile_net_init_mpipe(struct net_device *dev)
return -EIO;
}
- rc = gxio_mpipe_init(&context, 0);
+ rc = gxio_mpipe_init(&md->context, instance);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
+ instance, rc);
return -EIO;
}
/* Set up the buffer stacks. */
- num_buffers =
- network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
- rc = init_buffer_stacks(dev, num_buffers);
+ rc = init_buffer_stacks(dev, network_cpus_count);
if (rc != 0)
goto fail;
- /* Provide initial buffers. */
- rc = -ENOMEM;
- for (i = 0; i < num_buffers; i++) {
- if (!tile_net_provide_buffer(true)) {
- netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
- goto fail;
- }
- }
- for (i = 0; i < num_buffers; i++) {
- if (!tile_net_provide_buffer(false)) {
- netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
- goto fail;
- }
- }
-
/* Allocate one NotifRing for each network cpu. */
- rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+ rc = gxio_mpipe_alloc_notif_rings(&md->context,
+ network_cpus_count, 0, 0);
if (rc < 0) {
netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
rc);
@@ -1050,10 +1353,13 @@ static int tile_net_init_mpipe(struct net_device *dev)
if (rc != 0)
goto fail;
+ /* Register PTP clock and set mPIPE timestamp, if configured. */
+ register_ptp_clock(dev, md);
+
return 0;
fail:
- tile_net_init_mpipe_fail();
+ tile_net_init_mpipe_fail(instance);
return rc;
}
@@ -1063,17 +1369,19 @@ fail:
*/
static int tile_net_init_egress(struct net_device *dev, int echannel)
{
+ static int ering = -1;
struct page *headers_page, *edescs_page, *equeue_page;
gxio_mpipe_edesc_t *edescs;
gxio_mpipe_equeue_t *equeue;
unsigned char *headers;
int headers_order, edescs_order, equeue_order;
size_t edescs_size;
- int edma;
int rc = -ENOMEM;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
/* Only initialize once. */
- if (egress_for_echannel[echannel].equeue != NULL)
+ if (md->egress_for_echannel[echannel].equeue != NULL)
return 0;
/* Allocate memory for the "headers". */
@@ -1110,28 +1418,41 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
}
equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
- /* Allocate an edma ring. Note that in practice this can't
- * fail, which is good, because we will leak an edma ring if so.
- */
- rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
- if (rc < 0) {
- netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
- rc);
- goto fail_equeue;
+ /* Allocate an edma ring (using a one entry "free list"). */
+ if (ering < 0) {
+ rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
+ "mpipe[%d] %d\n", instance, rc);
+ goto fail_equeue;
+ }
+ ering = rc;
}
- edma = rc;
/* Initialize the equeue. */
- rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+ rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
edescs, edescs_size, 0);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
+ instance, rc);
goto fail_equeue;
}
+ /* Don't reuse the ering later. */
+ ering = -1;
+
+ if (jumbo_num != 0) {
+ /* Make sure "jumbo" packets can be egressed safely. */
+ if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
+ /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
+ netdev_warn(dev, "Jumbo packets may not be egressed"
+ " properly on channel %d\n", echannel);
+ }
+ }
+
/* Done. */
- egress_for_echannel[echannel].equeue = equeue;
- egress_for_echannel[echannel].headers = headers;
+ md->egress_for_echannel[echannel].equeue = equeue;
+ md->egress_for_echannel[echannel].headers = headers;
return 0;
fail_equeue:
@@ -1151,11 +1472,25 @@ fail:
static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
const char *link_name)
{
- int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+ int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
if (rc < 0) {
- netdev_err(dev, "Failed to open '%s'\n", link_name);
+ netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
+ link_name, instance, rc);
return rc;
}
+ if (jumbo_num != 0) {
+ u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
+ rc = gxio_mpipe_link_set_attr(link, attr, 1);
+ if (rc != 0) {
+ netdev_err(dev,
+ "Cannot receive jumbo packets on '%s'\n",
+ link_name);
+ gxio_mpipe_link_close(link);
+ return rc;
+ }
+ }
rc = gxio_mpipe_link_channel(link);
if (rc < 0 || rc >= TILE_NET_CHANNELS) {
netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
@@ -1169,12 +1504,23 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
static int tile_net_open(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
- int cpu, rc;
+ int cpu, rc, instance;
mutex_lock(&tile_net_devs_for_channel_mutex);
- /* Do one-time initialization the first time any device is opened. */
- if (ingress_irq < 0) {
+ /* Get the instance info. */
+ rc = gxio_mpipe_link_instance(dev->name);
+ if (rc < 0 || rc >= NR_MPIPE_MAX) {
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+ return -EIO;
+ }
+
+ priv->instance = rc;
+ instance = rc;
+ if (!mpipe_data[rc].context.mmio_fast_base) {
+ /* Do one-time initialization per instance the first time
+ * any device is opened.
+ */
rc = tile_net_init_mpipe(dev);
if (rc != 0)
goto fail;
@@ -1205,7 +1551,7 @@ static int tile_net_open(struct net_device *dev)
if (rc != 0)
goto fail;
- tile_net_devs_for_channel[priv->channel] = dev;
+ mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
rc = tile_net_update(dev);
if (rc != 0)
@@ -1217,7 +1563,7 @@ static int tile_net_open(struct net_device *dev)
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake =
- &info->tx_wake[priv->echannel];
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
@@ -1243,7 +1589,7 @@ fail:
priv->channel = -1;
}
priv->echannel = -1;
- tile_net_devs_for_channel[priv->channel] = NULL;
+ mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
mutex_unlock(&tile_net_devs_for_channel_mutex);
/* Don't return raw gxio error codes to generic Linux. */
@@ -1255,18 +1601,20 @@ static int tile_net_stop(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
int cpu;
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake =
- &info->tx_wake[priv->echannel];
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_cancel(&tx_wake->timer);
netif_stop_subqueue(dev, cpu);
}
mutex_lock(&tile_net_devs_for_channel_mutex);
- tile_net_devs_for_channel[priv->channel] = NULL;
+ md->tile_net_devs_for_channel[priv->channel] = NULL;
(void)tile_net_update(dev);
if (priv->loopify_channel >= 0) {
if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
@@ -1374,20 +1722,21 @@ static int tso_count_edescs(struct sk_buff *skb)
return num_edescs;
}
-/* Prepare modified copies of the skbuff headers.
- * FIXME: add support for IPv6.
- */
+/* Prepare modified copies of the skbuff headers. */
static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
s64 slot)
{
struct skb_shared_info *sh = skb_shinfo(skb);
struct iphdr *ih;
+ struct ipv6hdr *ih6;
struct tcphdr *th;
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->len - sh_len;
unsigned char *data = skb->data;
unsigned int ih_off, th_off, p_len;
- unsigned int isum_seed, tsum_seed, id, seq;
+ unsigned int isum_seed, tsum_seed, seq;
+ unsigned int uninitialized_var(id);
+ int is_ipv6;
long f_id = -1; /* id of the current fragment */
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
long f_used = 0; /* bytes used from the current fragment */
@@ -1395,18 +1744,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
int segment;
/* Locate original headers and compute various lengths. */
- ih = ip_hdr(skb);
+ is_ipv6 = skb_is_gso_v6(skb);
+ if (is_ipv6) {
+ ih6 = ipv6_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ } else {
+ ih = ip_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ isum_seed = ((0xFFFF - ih->check) +
+ (0xFFFF - ih->tot_len) +
+ (0xFFFF - ih->id));
+ id = ntohs(ih->id);
+ }
+
th = tcp_hdr(skb);
- ih_off = skb_network_offset(skb);
th_off = skb_transport_offset(skb);
p_len = sh->gso_size;
- /* Set up seed values for IP and TCP csum and initialize id and seq. */
- isum_seed = ((0xFFFF - ih->check) +
- (0xFFFF - ih->tot_len) +
- (0xFFFF - ih->id));
tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
- id = ntohs(ih->id);
seq = ntohl(th->seq);
/* Prepare all the headers. */
@@ -1420,11 +1775,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
memcpy(buf, data, sh_len);
/* Update copied ip header. */
- ih = (struct iphdr *)(buf + ih_off);
- ih->tot_len = htons(sh_len + p_len - ih_off);
- ih->id = htons(id);
- ih->check = csum_long(isum_seed + ih->tot_len +
- ih->id) ^ 0xffff;
+ if (is_ipv6) {
+ ih6 = (struct ipv6hdr *)(buf + ih_off);
+ ih6->payload_len = htons(sh_len + p_len - ih_off -
+ sizeof(*ih6));
+ } else {
+ ih = (struct iphdr *)(buf + ih_off);
+ ih->tot_len = htons(sh_len + p_len - ih_off);
+ ih->id = htons(id++);
+ ih->check = csum_long(isum_seed + ih->tot_len +
+ ih->id) ^ 0xffff;
+ }
/* Update copied tcp header. */
th = (struct tcphdr *)(buf + th_off);
@@ -1458,7 +1819,6 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
slot++;
}
- id++;
seq += p_len;
/* The last segment may be less than gso_size. */
@@ -1475,8 +1835,9 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
struct sk_buff *skb, unsigned char *headers, s64 slot)
{
- struct tile_net_priv *priv = netdev_priv(dev);
struct skb_shared_info *sh = skb_shinfo(skb);
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
@@ -1499,8 +1860,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
edesc_head.xfer_size = sh_len;
/* This is only used to specify the TLB. */
- edesc_head.stack_idx = large_buffer_stack;
- edesc_body.stack_idx = large_buffer_stack;
+ edesc_head.stack_idx = md->first_buffer_stack;
+ edesc_body.stack_idx = md->first_buffer_stack;
/* Egress all the edescs. */
for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1553,8 +1914,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
}
/* Update stats. */
- tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
- tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+ tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
+ tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
}
/* Do "TSO" handling for egress.
@@ -1575,8 +1936,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
int channel = priv->echannel;
- struct tile_net_egress *egress = &egress_for_echannel[channel];
- struct tile_net_comps *comps = info->comps_for_echannel[channel];
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct tile_net_egress *egress = &md->egress_for_echannel[channel];
+ struct tile_net_comps *comps =
+ info->mpipe[instance].comps_for_echannel[channel];
gxio_mpipe_equeue_t *equeue = egress->equeue;
unsigned long irqflags;
int num_edescs;
@@ -1640,10 +2004,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
- struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct tile_net_egress *egress =
+ &md->egress_for_echannel[priv->echannel];
gxio_mpipe_equeue_t *equeue = egress->equeue;
struct tile_net_comps *comps =
- info->comps_for_echannel[priv->echannel];
+ info->mpipe[instance].comps_for_echannel[priv->echannel];
unsigned int len = skb->len;
unsigned char *data = skb->data;
unsigned int num_edescs;
@@ -1660,7 +2027,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
/* This is only used to specify the TLB. */
- edesc.stack_idx = large_buffer_stack;
+ edesc.stack_idx = md->first_buffer_stack;
/* Prepare the edescs. */
for (i = 0; i < num_edescs; i++) {
@@ -1693,13 +2060,16 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < num_edescs; i++)
gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+ /* Store TX timestamp if needed. */
+ tile_tx_timestamp(skb, instance);
+
/* Add a completion record. */
add_comp(equeue, comps, slot - 1, skb);
/* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
- tile_net_stats_add(1, &priv->stats.tx_packets);
+ tile_net_stats_add(1, &dev->stats.tx_packets);
tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
- &priv->stats.tx_bytes);
+ &dev->stats.tx_bytes);
local_irq_restore(irqflags);
@@ -1727,20 +2097,18 @@ static void tile_net_tx_timeout(struct net_device *dev)
/* Ioctl commands. */
static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- return -EOPNOTSUPP;
-}
+ if (cmd == SIOCSHWTSTAMP)
+ return tile_hwtstamp_ioctl(dev, rq, cmd);
-/* Get system network statistics for device. */
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- return &priv->stats;
+ return -EOPNOTSUPP;
}
/* Change the MTU. */
static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
{
- if ((new_mtu < 68) || (new_mtu > 1500))
+ if (new_mtu < 68)
+ return -EINVAL;
+ if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
@@ -1772,9 +2140,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
*/
static void tile_net_netpoll(struct net_device *dev)
{
- disable_percpu_irq(ingress_irq);
- tile_net_handle_ingress_irq(ingress_irq, NULL);
- enable_percpu_irq(ingress_irq, 0);
+ int instance = mpipe_instance(dev);
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ disable_percpu_irq(md->ingress_irq);
+ napi_schedule(&info->mpipe[instance].napi);
+ enable_percpu_irq(md->ingress_irq, 0);
}
#endif
@@ -1784,7 +2156,6 @@ static const struct net_device_ops tile_net_ops = {
.ndo_start_xmit = tile_net_tx,
.ndo_select_queue = tile_net_select_queue,
.ndo_do_ioctl = tile_net_ioctl,
- .ndo_get_stats = tile_net_get_stats,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
@@ -1800,14 +2171,21 @@ static const struct net_device_ops tile_net_ops = {
*/
static void tile_net_setup(struct net_device *dev)
{
+ netdev_features_t features = 0;
+
ether_setup(dev);
dev->netdev_ops = &tile_net_ops;
dev->watchdog_timeo = TILE_NET_TIMEOUT;
- dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_HW_CSUM;
- dev->features |= NETIF_F_SG;
- dev->features |= NETIF_F_TSO;
dev->mtu = 1500;
+
+ features |= NETIF_F_HW_CSUM;
+ features |= NETIF_F_SG;
+ features |= NETIF_F_TSO;
+ features |= NETIF_F_TSO6;
+
+ dev->hw_features |= features;
+ dev->vlan_features |= features;
+ dev->features |= features;
}
/* Allocate the device structure, register the device, and obtain the
@@ -1842,6 +2220,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
priv->channel = -1;
priv->loopify_channel = -1;
priv->echannel = -1;
+ init_ptp_dev(priv);
/* Get the MAC address and set it in the device struct; this must
* be done before the device is opened. If the MAC is all zeroes,
@@ -1871,9 +2250,12 @@ static void tile_net_init_module_percpu(void *unused)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
int my_cpu = smp_processor_id();
+ int instance;
- info->has_iqueue = false;
-
+ for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
+ info->mpipe[instance].has_iqueue = false;
+ info->mpipe[instance].instance = instance;
+ }
info->my_cpu = my_cpu;
/* Initialize the egress timer. */
@@ -1890,6 +2272,8 @@ static int __init tile_net_init_module(void)
pr_info("Tilera Network Driver\n");
+ BUILD_BUG_ON(NR_MPIPE_MAX != 2);
+
mutex_init(&tile_net_devs_for_channel_mutex);
/* Initialize each CPU. */
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 36435499814..106be47716e 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -31,6 +31,7 @@
#include <linux/in6.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/u64_stats_sync.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -88,13 +89,6 @@
/* ISSUE: This has not been thoroughly tested (except at 1500). */
#define TILE_NET_MTU 1500
-/* HACK: Define to support GSO. */
-/* ISSUE: This may actually hurt performance of the TCP blaster. */
-/* #define TILE_NET_GSO */
-
-/* Define this to collapse "duplicate" acks. */
-/* #define IGNORE_DUP_ACKS */
-
/* HACK: Define this to verify incoming packets. */
/* #define TILE_NET_VERIFY_INGRESS */
@@ -156,10 +150,13 @@ struct tile_netio_queue {
* Statistics counters for a specific cpu and device.
*/
struct tile_net_stats_t {
- u32 rx_packets;
- u32 rx_bytes;
- u32 tx_packets;
- u32 tx_bytes;
+ struct u64_stats_sync syncp;
+ u64 rx_packets; /* total packets received */
+ u64 tx_packets; /* total packets transmitted */
+ u64 rx_bytes; /* total bytes received */
+ u64 tx_bytes; /* total bytes transmitted */
+ u64 rx_errors; /* packets truncated or marked bad by hw */
+ u64 rx_dropped; /* packets not for us or intf not up */
};
@@ -218,8 +215,6 @@ struct tile_net_priv {
int network_cpus_count;
/* Credits per network cpu. */
int network_cpus_credits;
- /* Network stats. */
- struct net_device_stats stats;
/* For NetIO bringup retries. */
struct delayed_work retry_work;
/* Quick access to per cpu data. */
@@ -627,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg)
}
-#ifdef IGNORE_DUP_ACKS
-
-/*
- * Help detect "duplicate" ACKs. These are sequential packets (for a
- * given flow) which are exactly 66 bytes long, sharing everything but
- * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
- * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
- * +N, and the Tstamps are usually identical.
- *
- * NOTE: Apparently truly duplicate acks (with identical "ack" values),
- * should not be collapsed, as they are used for some kind of flow control.
- */
-static bool is_dup_ack(char *s1, char *s2, unsigned int len)
-{
- int i;
-
- unsigned long long ignorable = 0;
-
- /* Identification. */
- ignorable |= (1ULL << 0x12);
- ignorable |= (1ULL << 0x13);
-
- /* Header checksum. */
- ignorable |= (1ULL << 0x18);
- ignorable |= (1ULL << 0x19);
-
- /* ACK. */
- ignorable |= (1ULL << 0x2a);
- ignorable |= (1ULL << 0x2b);
- ignorable |= (1ULL << 0x2c);
- ignorable |= (1ULL << 0x2d);
-
- /* WinSize. */
- ignorable |= (1ULL << 0x30);
- ignorable |= (1ULL << 0x31);
-
- /* Checksum. */
- ignorable |= (1ULL << 0x32);
- ignorable |= (1ULL << 0x33);
-
- for (i = 0; i < len; i++, ignorable >>= 1) {
-
- if ((ignorable & 1) || (s1[i] == s2[i]))
- continue;
-
-#ifdef TILE_NET_DEBUG
- /* HACK: Mention non-timestamp diffs. */
- if (i < 0x38 && i != 0x2f &&
- net_ratelimit())
- pr_info("Diff at 0x%x\n", i);
-#endif
-
- return false;
- }
-
-#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
- /* HACK: Do not suppress truly duplicate ACKs. */
- /* ISSUE: Is this actually necessary or helpful? */
- if (s1[0x2a] == s2[0x2a] &&
- s1[0x2b] == s2[0x2b] &&
- s1[0x2c] == s2[0x2c] &&
- s1[0x2d] == s2[0x2d]) {
- return false;
- }
-#endif
-
- return true;
-}
-
-#endif
-
-
-
static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
{
struct tile_netio_queue *queue = &info->queue;
@@ -774,6 +696,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+ netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
/* Extract the packet size. FIXME: Shouldn't the second line */
/* get subtracted? Mostly moot, since it should be "zero". */
@@ -806,40 +729,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
#endif /* TILE_NET_DUMP_PACKETS */
#ifdef TILE_NET_VERIFY_INGRESS
- if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
- NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
- /* Bug 6624: Includes UDP packets with a "zero" checksum. */
- pr_warning("Bad L4 checksum on %d byte packet.\n", len);
- }
- if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
- NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
+ if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
dump_packet(buf, len, "rx");
- panic("Bad L3 checksum.");
- }
- switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
- case NETIO_PKT_STATUS_OVERSIZE:
- if (len >= 64) {
- dump_packet(buf, len, "rx");
- panic("Unexpected OVERSIZE.");
- }
- break;
- case NETIO_PKT_STATUS_BAD:
- pr_warning("Unexpected BAD %ld byte packet.\n", len);
+ panic("Unexpected OVERSIZE.");
}
#endif
filter = 0;
- /* ISSUE: Filter TCP packets with "bad" checksums? */
-
- if (!(dev->flags & IFF_UP)) {
+ if (pkt_status == NETIO_PKT_STATUS_BAD) {
+ /* Handle CRC error and hardware truncation. */
+ filter = 2;
+ } else if (!(dev->flags & IFF_UP)) {
/* Filter packets received before we're up. */
filter = 1;
- } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
+ } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
+ pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
/* Filter "truncated" packets. */
- filter = 1;
+ filter = 2;
} else if (!(dev->flags & IFF_PROMISC)) {
- /* FIXME: Implement HW multicast filter. */
if (!is_multicast_ether_addr(buf)) {
/* Filter packets not for our address. */
const u8 *mine = dev->dev_addr;
@@ -847,9 +755,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
}
}
- if (filter) {
+ u64_stats_update_begin(&stats->syncp);
- /* ISSUE: Update "drop" statistics? */
+ if (filter != 0) {
+
+ if (filter == 1)
+ stats->rx_dropped++;
+ else
+ stats->rx_errors++;
tile_net_provide_linux_buffer(info, va, small);
@@ -881,6 +794,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
stats->rx_bytes += len;
}
+ u64_stats_update_end(&stats->syncp);
+
/* ISSUE: It would be nice to defer this until the packet has */
/* actually been processed. */
tile_net_return_credit(info);
@@ -1907,8 +1822,10 @@ busy:
kfree_skb(olds[i]);
/* Update stats. */
+ u64_stats_update_begin(&stats->syncp);
stats->tx_packets += num_segs;
stats->tx_bytes += (num_segs * sh_len) + d_len;
+ u64_stats_update_end(&stats->syncp);
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer(info);
@@ -1936,7 +1853,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int csum_start = skb_checksum_start_offset(skb);
- lepp_frag_t frags[LEPP_MAX_FRAGS];
+ lepp_frag_t frags[1 + MAX_SKB_FRAGS];
unsigned int num_frags;
@@ -1951,7 +1868,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int cmd_head, cmd_tail, cmd_next;
unsigned int comp_tail;
- lepp_cmd_t cmds[LEPP_MAX_FRAGS];
+ lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
/*
@@ -2089,8 +2006,10 @@ busy:
kfree_skb(olds[i]);
/* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
+ u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
+ u64_stats_update_end(&stats->syncp);
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer(info);
@@ -2127,30 +2046,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*
* Returns the address of the device statistics structure.
*/
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct tile_net_priv *priv = netdev_priv(dev);
- u32 rx_packets = 0;
- u32 tx_packets = 0;
- u32 rx_bytes = 0;
- u32 tx_bytes = 0;
+ u64 rx_packets = 0, tx_packets = 0;
+ u64 rx_bytes = 0, tx_bytes = 0;
+ u64 rx_errors = 0, rx_dropped = 0;
int i;
for_each_online_cpu(i) {
- if (priv->cpu[i]) {
- rx_packets += priv->cpu[i]->stats.rx_packets;
- rx_bytes += priv->cpu[i]->stats.rx_bytes;
- tx_packets += priv->cpu[i]->stats.tx_packets;
- tx_bytes += priv->cpu[i]->stats.tx_bytes;
- }
+ struct tile_net_stats_t *cpu_stats;
+ u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
+ u64 trx_errors, trx_dropped;
+ unsigned int start;
+
+ if (priv->cpu[i] == NULL)
+ continue;
+ cpu_stats = &priv->cpu[i]->stats;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ trx_packets = cpu_stats->rx_packets;
+ ttx_packets = cpu_stats->tx_packets;
+ trx_bytes = cpu_stats->rx_bytes;
+ ttx_bytes = cpu_stats->tx_bytes;
+ trx_errors = cpu_stats->rx_errors;
+ trx_dropped = cpu_stats->rx_dropped;
+ } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+
+ rx_packets += trx_packets;
+ tx_packets += ttx_packets;
+ rx_bytes += trx_bytes;
+ tx_bytes += ttx_bytes;
+ rx_errors += trx_errors;
+ rx_dropped += trx_dropped;
}
- priv->stats.rx_packets = rx_packets;
- priv->stats.rx_bytes = rx_bytes;
- priv->stats.tx_packets = tx_packets;
- priv->stats.tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->tx_packets = tx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_errors = rx_errors;
+ stats->rx_dropped = rx_dropped;
- return &priv->stats;
+ return stats;
}
@@ -2287,7 +2227,7 @@ static const struct net_device_ops tile_net_ops = {
.ndo_stop = tile_net_stop,
.ndo_start_xmit = tile_net_tx,
.ndo_do_ioctl = tile_net_ioctl,
- .ndo_get_stats = tile_net_get_stats,
+ .ndo_get_stats64 = tile_net_get_stats64,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
@@ -2305,39 +2245,30 @@ static const struct net_device_ops tile_net_ops = {
*/
static void tile_net_setup(struct net_device *dev)
{
- PDEBUG("tile_net_setup()\n");
+ netdev_features_t features = 0;
ether_setup(dev);
-
dev->netdev_ops = &tile_net_ops;
-
dev->watchdog_timeo = TILE_NET_TIMEOUT;
+ dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+ dev->mtu = TILE_NET_MTU;
- /* We want lockless xmit. */
- dev->features |= NETIF_F_LLTX;
-
- /* We support hardware tx checksums. */
- dev->features |= NETIF_F_HW_CSUM;
-
- /* We support scatter/gather. */
- dev->features |= NETIF_F_SG;
-
- /* We support TSO. */
- dev->features |= NETIF_F_TSO;
+ features |= NETIF_F_HW_CSUM;
+ features |= NETIF_F_SG;
-#ifdef TILE_NET_GSO
- /* We support GSO. */
- dev->features |= NETIF_F_GSO;
-#endif
+ /* We support TSO iff the HV supports sufficient frags. */
+ if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
+ features |= NETIF_F_TSO;
+ /* We can't support HIGHDMA without hash_default, since we need
+ * to be able to finv() with a VA if we don't have hash_default.
+ */
if (hash_default)
- dev->features |= NETIF_F_HIGHDMA;
-
- /* ISSUE: We should support NETIF_F_UFO. */
+ features |= NETIF_F_HIGHDMA;
- dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
-
- dev->mtu = TILE_NET_MTU;
+ dev->hw_features |= features;
+ dev->vlan_features |= features;
+ dev->features |= features;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 9c805e0c0ca..f7f2ef49c0c 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1726,7 +1726,7 @@ static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
goto fail_alloc_irq;
}
result = request_irq(card->irq, gelic_card_interrupt,
- IRQF_DISABLED, netdev->name, card);
+ 0, netdev->name, card);
if (result) {
dev_info(ctodev(card), "%s:request_irq failed (%d)\n",
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01bdc6ca075..c4dbf981804 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,13 +1308,13 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
- GFP_KERNEL | __GFP_ZERO);
+ data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma,
+ GFP_KERNEL);
if (!data->rxring)
return -ENOMEM;
- data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
- GFP_KERNEL | __GFP_ZERO);
+ data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
+ GFP_KERNEL);
if (!data->txring) {
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM;
@@ -1558,7 +1558,7 @@ tsi108_init_one(struct platform_device *pdev)
hw_info *einfo;
int err = 0;
- einfo = pdev->dev.platform_data;
+ einfo = dev_get_platdata(&pdev->dev);
if (NULL == einfo) {
printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index b75eb9e0e86..bdf697b184a 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -32,7 +32,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "via-rhine"
-#define DRV_VERSION "1.5.0"
+#define DRV_VERSION "1.5.1"
#define DRV_RELDATE "2010-10-09"
#include <linux/types.h>
@@ -1704,7 +1704,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
if (unlikely(vlan_tx_tag_present(skb))) {
- rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+ u16 vid_pcp = vlan_tx_tag_get(skb);
+
+ /* drop CFI/DEI bit, register needs VID and PCP */
+ vid_pcp = (vid_pcp & VLAN_VID_MASK) |
+ ((vid_pcp & VLAN_PRIO_MASK) >> 1);
+ rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
/* request tagging */
rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
}
@@ -2407,7 +2412,7 @@ static struct pci_driver rhine_driver = {
.driver.pm = RHINE_PM_OPS,
};
-static struct dmi_system_id __initdata rhine_dmi_table[] = {
+static struct dmi_system_id rhine_dmi_table[] __initdata = {
{
.ident = "EPIA-M",
.matches = {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index d01cacf8a7c..d022bf93657 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2376,6 +2376,23 @@ out_0:
return ret;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * velocity_poll_controller - Velocity Poll controller function
+ * @dev: network device
+ *
+ *
+ * Used by NETCONSOLE and other diagnostic tools to allow network I/P
+ * with interrupts disabled.
+ */
+static void velocity_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ velocity_intr(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
/**
* velocity_mii_ioctl - MII ioctl handler
* @dev: network device
@@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_do_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = velocity_poll_controller,
+#endif
};
/**
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 30fed08d167..0df36c6ec7f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -622,7 +622,7 @@ static const struct net_device_ops w5100_netdev_ops = {
static int w5100_hw_probe(struct platform_device *pdev)
{
- struct wiznet_platform_data *data = pdev->dev.platform_data;
+ struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5100_priv *priv = netdev_priv(ndev);
const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index e92884564e1..71c27b3292f 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -542,7 +542,7 @@ static const struct net_device_ops w5300_netdev_ops = {
static int w5300_hw_probe(struct platform_device *pdev)
{
- struct wiznet_platform_data *data = pdev->dev.platform_data;
+ struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5300_priv *priv = netdev_priv(ndev);
const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 58eb4488bef..0029148077a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
@@ -297,6 +297,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ /* Init descriptor indexes */
+ lp->tx_bd_ci = 0;
+ lp->tx_bd_next = 0;
+ lp->tx_bd_tail = 0;
+ lp->rx_bd_ci = 0;
+
return 0;
out:
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fb7d1c28a2e..b2ff038d6d2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -201,17 +201,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/*
* Allocate the Tx and Rx buffer descriptors.
*/
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p,
- GFP_KERNEL | __GFP_ZERO);
+ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p,
- GFP_KERNEL | __GFP_ZERO);
+ lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fd4dbdae533..4c619ea5189 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1230,8 +1230,7 @@ error:
*/
static int xemaclite_of_remove(struct platform_device *of_dev)
{
- struct device *dev = &of_dev->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(of_dev);
struct net_local *lp = netdev_priv(ndev);
@@ -1250,7 +1249,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
lp->phy_node = NULL;
xemaclite_remove_ndev(ndev, of_dev);
- dev_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3d689fcb791..e78802e75ea 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1384,7 +1384,7 @@ static int eth_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
- struct eth_plat_info *plat = pdev->dev.platform_data;
+ struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
int err;