summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bcma/core.c8
-rw-r--r--drivers/bcma/main.c5
-rw-r--r--drivers/connector/cn_proc.c25
-rw-r--r--drivers/connector/connector.c12
-rw-r--r--drivers/dma/ioat/dca.c11
-rw-r--r--drivers/firewire/Kconfig6
-rw-r--r--drivers/firewire/net.c462
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c155
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c113
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/hisax/fsm.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/media/dvb-core/dvb_net.c10
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/appletalk/Kconfig18
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c128
-rw-r--r--drivers/net/caif/caif_shmcore.c747
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c76
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/mcp251x.c35
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c24
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c9
-rw-r--r--drivers/net/ethernet/apple/macmace.c16
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c73
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c84
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h47
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c327
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c226
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c278
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c107
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c389
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h18
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c33
-rw-r--r--drivers/net/ethernet/cadence/macb.c37
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c778
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c89
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c256
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c14
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c74
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c9
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c60
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c22
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c (renamed from drivers/net/ethernet/freescale/fec.c)34
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c159
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c131
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h27
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c240
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c408
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c37
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c306
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c134
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c104
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c7
-rw-r--r--drivers/net/ethernet/marvell/Kconfig5
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c239
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c131
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c32
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c12
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c20
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c1
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c21
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c220
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h81
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c379
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h181
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c55
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c75
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c47
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c96
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h214
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1297
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c1176
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c255
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c397
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h220
-rw-r--r--drivers/net/ethernet/s6gmac.c16
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c267
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/enum.h12
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c17
-rw-r--r--drivers/net/ethernet/sfc/filter.c249
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h97
-rw-r--r--drivers/net/ethernet/sfc/nic.c94
-rw-r--r--drivers/net/ethernet/sfc/ptp.c116
-rw-r--r--drivers/net/ethernet/sfc/rx.c793
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c90
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h122
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c151
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c85
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c998
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c215
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h74
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c13
-rw-r--r--drivers/net/ethernet/sun/sunqe.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c159
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c48
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c240
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c21
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c27
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1
-rw-r--r--drivers/net/fddi/defxx.c9
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c51
-rw-r--r--drivers/net/ieee802154/mrf24j40.c15
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c18
-rw-r--r--drivers/net/irda/bfin_sir.c3
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c17
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/irda/w83977af_ir.c7
-rw-r--r--drivers/net/macvlan.c19
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c127
-rw-r--r--drivers/net/phy/mdio-gpio.c12
-rw-r--r--drivers/net/phy/mdio-octeon.c13
-rw-r--r--drivers/net/phy/micrel.c41
-rw-r--r--drivers/net/phy/phy.c66
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c53
-rw-r--r--drivers/net/team/Kconfig12
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c25
-rw-r--r--drivers/net/team/team_mode_broadcast.c14
-rw-r--r--drivers/net/team/team_mode_random.c71
-rw-r--r--drivers/net/team/team_mode_roundrobin.c36
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c399
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c72
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c116
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c41
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h332
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c38
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h49
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c188
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c58
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c60
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h17
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c154
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h363
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/b43.h6
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43/main.c7
-rw-r--r--drivers/net/wireless/b43/phy_ht.c610
-rw-r--r--drivers/net/wireless/b43/phy_ht.h77
-rw-r--r--drivers/net/wireless/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c382
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h87
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c37
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c25
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h27
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c33
-rw-r--r--drivers/net/wireless/iwlegacy/common.c21
-rw-r--r--drivers/net/wireless/iwlegacy/common.h9
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig11
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c (renamed from drivers/net/wireless/iwlwifi/pcie/1000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c (renamed from drivers/net/wireless/iwlwifi/pcie/2000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c (renamed from drivers/net/wireless/iwlwifi/pcie/5000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c (renamed from drivers/net/wireless/iwlwifi/pcie/6000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c (renamed from drivers/net/wireless/iwlwifi/pcie/7000.c)63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c347
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c260
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c138
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c57
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c91
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h115
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c218
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c70
-rw-r--r--drivers/net/wireless/mwifiex/fw.h72
-rw-r--r--drivers/net/wireless/mwifiex/init.c6
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h23
-rw-r--r--drivers/net/wireless/mwifiex/main.c15
-rw-r--r--drivers/net/wireless/mwifiex/main.h9
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c156
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c79
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c10
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c2
-rw-r--r--drivers/net/wireless/mwifiex/util.c5
-rw-r--r--drivers/net/wireless/mwl8k.c111
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c857
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c116
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c63
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c20
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/xen-netback/netback.c9
-rw-r--r--drivers/net/xen-netfront.c26
-rw-r--r--drivers/nfc/microread/mei.c2
-rw-r--r--drivers/ptp/ptp_pch.c29
-rw-r--r--drivers/s390/kvm/virtio_ccw.c6
-rw-r--r--drivers/scsi/csiostor/Makefile3
-rw-r--r--drivers/scsi/csiostor/csio_hw.c559
-rw-r--r--drivers/scsi/csiostor/csio_hw.h47
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h175
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c403
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c397
-rw-r--r--drivers/scsi/csiostor/csio_init.c48
-rw-r--r--drivers/scsi/csiostor/csio_init.h29
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c10
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_wr.c60
-rw-r--r--drivers/scsi/scsi_netlink.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c21
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c70
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c41
-rw-r--r--drivers/ssb/driver_mipscore.c25
-rw-r--r--drivers/ssb/driver_pcicore.c15
-rw-r--r--drivers/ssb/embedded.c5
-rw-r--r--drivers/ssb/main.c51
-rw-r--r--drivers/ssb/pci.c97
-rw-r--r--drivers/ssb/pcmcia.c46
-rw-r--r--drivers/ssb/scan.c31
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/ssb/ssb_private.h19
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c12
515 files changed, 21215 insertions, 8307 deletions
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 03bbe104338..17b26ce7e05 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -104,7 +104,13 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
if (i)
bcma_err(core->bus, "PLL enable timeout\n");
} else {
- bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
+ /*
+ * Mask the PLL but don't wait for it to be disabled. PLL may be
+ * shared between cores and will be still up if there is another
+ * core using it.
+ */
+ bcma_mask32(core, BCMA_CLKCTLST, ~req);
+ bcma_read32(core, BCMA_CLKCTLST);
}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9a6188add59..f72f52b4b1d 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -120,6 +120,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
continue;
}
+ /* Only first GMAC core on BCM4706 is connected and working */
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+ core->core_unit > 0)
+ continue;
+
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 1110478dd0f..08ae128cce9 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -232,6 +232,31 @@ void proc_comm_connector(struct task_struct *task)
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
+void proc_coredump_connector(struct task_struct *task)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ __u8 buffer[CN_PROC_MSG_SIZE];
+ struct timespec ts;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->what = PROC_EVENT_COREDUMP;
+ ev->event_data.coredump.process_pid = task->pid;
+ ev->event_data.coredump.process_tgid = task->tgid;
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f1b7e244bfc..6ecfa758942 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/skbuff.h>
-#include <linux/netlink.h>
+#include <net/netlink.h>
#include <linux/moduleparam.h>
#include <linux/connector.h>
#include <linux/slab.h>
@@ -95,13 +95,13 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
if (!netlink_has_listeners(dev->nls, group))
return -ESRCH;
- size = NLMSG_SPACE(sizeof(*msg) + msg->len);
+ size = sizeof(*msg) + msg->len;
- skb = alloc_skb(size, gfp_mask);
+ skb = nlmsg_new(size, gfp_mask);
if (!skb)
return -ENOMEM;
- nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
+ nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
if (!nlh) {
kfree_skb(skb);
return -EMSGSIZE;
@@ -124,7 +124,7 @@ static int cn_call_callback(struct sk_buff *skb)
{
struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev;
- struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
+ struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV;
@@ -162,7 +162,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
skb = skb_get(__skb);
- if (skb->len >= NLMSG_SPACE(0)) {
+ if (skb->len >= NLMSG_HDRLEN) {
nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 9b041858d10..9e84d5bc930 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -470,8 +470,10 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
}
if (!dca2_tag_map_valid(ioatdca->tag_map)) {
- dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, "
- "disabling DCA\n");
+ WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
+ "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
+ dev_driver_string(&pdev->dev),
+ dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
@@ -689,7 +691,10 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
}
if (dca3_tag_map_invalid(ioatdca->tag_map)) {
- dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n");
+ WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
+ "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
+ dev_driver_string(&pdev->dev),
+ dev_name(&pdev->dev));
free_dca_provider(dca);
return NULL;
}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 7224533e8ca..7a701a58bbf 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -47,9 +47,9 @@ config FIREWIRE_NET
tristate "IP networking over 1394"
depends on FIREWIRE && INET
help
- This enables IPv4 over IEEE 1394, providing IP connectivity with
- other implementations of RFC 2734 as found on several operating
- systems. Multicast support is currently limited.
+ This enables IPv4/IPv6 over IEEE 1394, providing IP connectivity
+ with other implementations of RFC 2734/3146 as found on several
+ operating systems. Multicast support is currently limited.
To compile this driver as a module, say M here: The module will be
called firewire-net.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 2b27bff2591..4d565365e47 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1,5 +1,6 @@
/*
* IPv4 over IEEE 1394, per RFC 2734
+ * IPv6 over IEEE 1394, per RFC 3146
*
* Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
*
@@ -28,6 +29,7 @@
#include <asm/unaligned.h>
#include <net/arp.h>
+#include <net/firewire.h>
/* rx limits */
#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
@@ -45,6 +47,7 @@
#define IANA_SPECIFIER_ID 0x00005eU
#define RFC2734_SW_VERSION 0x000001U
+#define RFC3146_SW_VERSION 0x000002U
#define IEEE1394_GASP_HDR_SIZE 8
@@ -57,32 +60,10 @@
#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
-#define RFC2734_HW_ADDR_LEN 16
-
-struct rfc2734_arp {
- __be16 hw_type; /* 0x0018 */
- __be16 proto_type; /* 0x0806 */
- u8 hw_addr_len; /* 16 */
- u8 ip_addr_len; /* 4 */
- __be16 opcode; /* ARP Opcode */
- /* Above is exactly the same format as struct arphdr */
-
- __be64 s_uniq_id; /* Sender's 64bit EUI */
- u8 max_rec; /* Sender's max packet size */
- u8 sspd; /* Sender's max speed */
- __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
- __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
- __be32 sip; /* Sender's IP Address */
- __be32 tip; /* IP Address of requested hw addr */
-} __packed;
-
-/* This header format is specific to this driver implementation. */
-#define FWNET_ALEN 8
-#define FWNET_HLEN 10
-struct fwnet_header {
- u8 h_dest[FWNET_ALEN]; /* destination address */
- __be16 h_proto; /* packet type ID field */
-} __packed;
+static bool fwnet_hwaddr_is_multicast(u8 *ha)
+{
+ return !!(*ha & 1);
+}
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
@@ -191,8 +172,6 @@ struct fwnet_peer {
struct list_head peer_link;
struct fwnet_device *dev;
u64 guid;
- u64 fifo;
- __be32 ip;
/* guarded by dev->lock */
struct list_head pd_list; /* received partial datagrams */
@@ -222,6 +201,15 @@ struct fwnet_packet_task {
};
/*
+ * Get fifo address embedded in hwaddr
+ */
+static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
+{
+ return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
+ | get_unaligned_be32(&ha->uc.fifo_lo);
+}
+
+/*
* saddr == NULL means use device source address.
* daddr == NULL means leave destination address (eg unresolved arp).
*/
@@ -513,10 +501,20 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
bool is_broadcast, u16 ether_type)
{
struct fwnet_device *dev;
- static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
int status;
__be64 guid;
+ switch (ether_type) {
+ case ETH_P_ARP:
+ case ETH_P_IP:
+#if IS_ENABLED(CONFIG_IPV6)
+ case ETH_P_IPV6:
+#endif
+ break;
+ default:
+ goto err;
+ }
+
dev = netdev_priv(net);
/* Write metadata, and then pass to the receive level */
skb->dev = net;
@@ -524,92 +522,11 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
/*
* Parse the encapsulation header. This actually does the job of
- * converting to an ethernet frame header, as well as arp
- * conversion if needed. ARP conversion is easier in this
- * direction, since we are using ethernet as our backend.
+ * converting to an ethernet-like pseudo frame header.
*/
- /*
- * If this is an ARP packet, convert it. First, we want to make
- * use of some of the fields, since they tell us a little bit
- * about the sending machine.
- */
- if (ether_type == ETH_P_ARP) {
- struct rfc2734_arp *arp1394;
- struct arphdr *arp;
- unsigned char *arp_ptr;
- u64 fifo_addr;
- u64 peer_guid;
- unsigned sspd;
- u16 max_payload;
- struct fwnet_peer *peer;
- unsigned long flags;
-
- arp1394 = (struct rfc2734_arp *)skb->data;
- arp = (struct arphdr *)skb->data;
- arp_ptr = (unsigned char *)(arp + 1);
- peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
- fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
- | get_unaligned_be32(&arp1394->fifo_lo);
-
- sspd = arp1394->sspd;
- /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
- if (sspd > SCODE_3200) {
- dev_notice(&net->dev, "sspd %x out of range\n", sspd);
- sspd = SCODE_3200;
- }
- max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
-
- spin_lock_irqsave(&dev->lock, flags);
- peer = fwnet_peer_find_by_guid(dev, peer_guid);
- if (peer) {
- peer->fifo = fifo_addr;
-
- if (peer->speed > sspd)
- peer->speed = sspd;
- if (peer->max_payload > max_payload)
- peer->max_payload = max_payload;
-
- peer->ip = arp1394->sip;
- }
- spin_unlock_irqrestore(&dev->lock, flags);
-
- if (!peer) {
- dev_notice(&net->dev,
- "no peer for ARP packet from %016llx\n",
- (unsigned long long)peer_guid);
- goto no_peer;
- }
-
- /*
- * Now that we're done with the 1394 specific stuff, we'll
- * need to alter some of the data. Believe it or not, all
- * that needs to be done is sender_IP_address needs to be
- * moved, the destination hardware address get stuffed
- * in and the hardware address length set to 8.
- *
- * IMPORTANT: The code below overwrites 1394 specific data
- * needed above so keep the munging of the data for the
- * higher level IP stack last.
- */
-
- arp->ar_hln = 8;
- /* skip over sender unique id */
- arp_ptr += arp->ar_hln;
- /* move sender IP addr */
- put_unaligned(arp1394->sip, (u32 *)arp_ptr);
- /* skip over sender IP addr */
- arp_ptr += arp->ar_pln;
-
- if (arp->ar_op == htons(ARPOP_REQUEST))
- memset(arp_ptr, 0, sizeof(u64));
- else
- memcpy(arp_ptr, net->dev_addr, sizeof(u64));
- }
-
- /* Now add the ethernet header. */
guid = cpu_to_be64(dev->card->guid);
if (dev_hard_header(skb, net, ether_type,
- is_broadcast ? &broadcast_hw : &guid,
+ is_broadcast ? net->broadcast : net->dev_addr,
NULL, skb->len) >= 0) {
struct fwnet_header *eth;
u16 *rawp;
@@ -618,7 +535,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = (struct fwnet_header *)skb_mac_header(skb);
- if (*eth->h_dest & 1) {
+ if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
if (memcmp(eth->h_dest, net->broadcast,
net->addr_len) == 0)
skb->pkt_type = PACKET_BROADCAST;
@@ -630,7 +547,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
skb->pkt_type = PACKET_OTHERHOST;
}
- if (ntohs(eth->h_proto) >= 1536) {
+ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
protocol = eth->h_proto;
} else {
rawp = (u16 *)skb->data;
@@ -652,7 +569,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
return 0;
- no_peer:
+ err:
net->stats.rx_errors++;
net->stats.rx_dropped++;
@@ -856,7 +773,12 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
- if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ if (specifier_id == IANA_SPECIFIER_ID &&
+ (ver == RFC2734_SW_VERSION
+#if IS_ENABLED(CONFIG_IPV6)
+ || ver == RFC3146_SW_VERSION
+#endif
+ )) {
buf_ptr += 2;
length -= IEEE1394_GASP_HDR_SIZE;
fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
@@ -1059,16 +981,27 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
u8 *p;
int generation;
int node_id;
+ unsigned int sw_version;
/* ptask->generation may not have been set yet */
generation = dev->card->generation;
smp_rmb();
node_id = dev->card->node_id;
+ switch (ptask->skb->protocol) {
+ default:
+ sw_version = RFC2734_SW_VERSION;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ sw_version = RFC3146_SW_VERSION;
+#endif
+ }
+
p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
- | RFC2734_SW_VERSION, &p[4]);
+ | sw_version, &p[4]);
/* We should not transmit if broadcast_channel.valid == 0. */
fw_send_request(dev->card, &ptask->transaction,
@@ -1116,6 +1049,62 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
return 0;
}
+static void fwnet_fifo_stop(struct fwnet_device *dev)
+{
+ if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
+ return;
+
+ fw_core_remove_address_handler(&dev->handler);
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+}
+
+static int fwnet_fifo_start(struct fwnet_device *dev)
+{
+ int retval;
+
+ if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
+ return 0;
+
+ dev->handler.length = 4096;
+ dev->handler.address_callback = fwnet_receive_packet;
+ dev->handler.callback_data = dev;
+
+ retval = fw_core_add_address_handler(&dev->handler,
+ &fw_high_memory_region);
+ if (retval < 0)
+ return retval;
+
+ dev->local_fifo = dev->handler.offset;
+
+ return 0;
+}
+
+static void __fwnet_broadcast_stop(struct fwnet_device *dev)
+{
+ unsigned u;
+
+ if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
+ kunmap(dev->broadcast_rcv_buffer.pages[u]);
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
+ }
+ if (dev->broadcast_rcv_context) {
+ fw_iso_context_destroy(dev->broadcast_rcv_context);
+ dev->broadcast_rcv_context = NULL;
+ }
+ kfree(dev->broadcast_rcv_buffer_ptrs);
+ dev->broadcast_rcv_buffer_ptrs = NULL;
+ dev->broadcast_state = FWNET_BROADCAST_ERROR;
+}
+
+static void fwnet_broadcast_stop(struct fwnet_device *dev)
+{
+ if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
+ return;
+ fw_iso_context_stop(dev->broadcast_rcv_context);
+ __fwnet_broadcast_stop(dev);
+}
+
static int fwnet_broadcast_start(struct fwnet_device *dev)
{
struct fw_iso_context *context;
@@ -1124,60 +1113,47 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
unsigned max_receive;
struct fw_iso_packet packet;
unsigned long offset;
+ void **ptrptr;
unsigned u;
- if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
- dev->handler.length = 4096;
- dev->handler.address_callback = fwnet_receive_packet;
- dev->handler.callback_data = dev;
-
- retval = fw_core_add_address_handler(&dev->handler,
- &fw_high_memory_region);
- if (retval < 0)
- goto failed_initial;
-
- dev->local_fifo = dev->handler.offset;
- }
+ if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
+ return 0;
max_receive = 1U << (dev->card->max_receive + 1);
num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
- if (!dev->broadcast_rcv_context) {
- void **ptrptr;
-
- context = fw_iso_context_create(dev->card,
- FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
- dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
- if (IS_ERR(context)) {
- retval = PTR_ERR(context);
- goto failed_context_create;
- }
+ ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ if (!ptrptr) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ dev->broadcast_rcv_buffer_ptrs = ptrptr;
+
+ context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
+ IEEE1394_BROADCAST_CHANNEL,
+ dev->card->link_speed, 8,
+ fwnet_receive_broadcast, dev);
+ if (IS_ERR(context)) {
+ retval = PTR_ERR(context);
+ goto failed;
+ }
- retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
- dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
- if (retval < 0)
- goto failed_buffer_init;
+ retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
+ FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
+ if (retval < 0)
+ goto failed;
- ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
- if (!ptrptr) {
- retval = -ENOMEM;
- goto failed_ptrs_alloc;
- }
+ dev->broadcast_state = FWNET_BROADCAST_STOPPED;
- dev->broadcast_rcv_buffer_ptrs = ptrptr;
- for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
- void *ptr;
- unsigned v;
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
+ void *ptr;
+ unsigned v;
- ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
- for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
- *ptrptr++ = (void *)
- ((char *)ptr + v * max_receive);
- }
- dev->broadcast_rcv_context = context;
- } else {
- context = dev->broadcast_rcv_context;
+ ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
+ for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
+ *ptrptr++ = (void *) ((char *)ptr + v * max_receive);
}
+ dev->broadcast_rcv_context = context;
packet.payload_length = max_receive;
packet.interrupt = 1;
@@ -1191,7 +1167,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
retval = fw_iso_context_queue(context, &packet,
&dev->broadcast_rcv_buffer, offset);
if (retval < 0)
- goto failed_rcv_queue;
+ goto failed;
offset += max_receive;
}
@@ -1201,7 +1177,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
retval = fw_iso_context_start(context, -1, 0,
FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
if (retval < 0)
- goto failed_rcv_queue;
+ goto failed;
/* FIXME: adjust it according to the min. speed of all known peers? */
dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
@@ -1210,19 +1186,8 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
return 0;
- failed_rcv_queue:
- kfree(dev->broadcast_rcv_buffer_ptrs);
- dev->broadcast_rcv_buffer_ptrs = NULL;
- failed_ptrs_alloc:
- fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
- failed_buffer_init:
- fw_iso_context_destroy(context);
- dev->broadcast_rcv_context = NULL;
- failed_context_create:
- fw_core_remove_address_handler(&dev->handler);
- failed_initial:
- dev->local_fifo = FWNET_NO_FIFO_ADDR;
-
+ failed:
+ __fwnet_broadcast_stop(dev);
return retval;
}
@@ -1240,11 +1205,10 @@ static int fwnet_open(struct net_device *net)
struct fwnet_device *dev = netdev_priv(net);
int ret;
- if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
- ret = fwnet_broadcast_start(dev);
- if (ret)
- return ret;
- }
+ ret = fwnet_broadcast_start(dev);
+ if (ret)
+ return ret;
+
netif_start_queue(net);
spin_lock_irq(&dev->lock);
@@ -1257,9 +1221,10 @@ static int fwnet_open(struct net_device *net)
/* ifdown */
static int fwnet_stop(struct net_device *net)
{
- netif_stop_queue(net);
+ struct fwnet_device *dev = netdev_priv(net);
- /* Deallocate iso context for use by other applications? */
+ netif_stop_queue(net);
+ fwnet_broadcast_stop(dev);
return 0;
}
@@ -1299,19 +1264,27 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
* We might need to rebuild the header on tx failure.
*/
memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
- skb_pull(skb, sizeof(hdr_buf));
-
proto = hdr_buf.h_proto;
+
+ switch (proto) {
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_IP):
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+#endif
+ break;
+ default:
+ goto fail;
+ }
+
+ skb_pull(skb, sizeof(hdr_buf));
dg_size = skb->len;
/*
* Set the transmission type for the packet. ARP packets and IP
* broadcast packets are sent via GASP.
*/
- if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
- || proto == htons(ETH_P_ARP)
- || (proto == htons(ETH_P_IP)
- && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
+ if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
max_payload = dev->broadcast_xmt_max_payload;
datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
@@ -1320,11 +1293,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
ptask->dest_node = IEEE1394_ALL_NODES;
ptask->speed = SCODE_100;
} else {
- __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
+ union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
+ __be64 guid = get_unaligned(&ha->uc.uniq_id);
u8 generation;
peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
- if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
+ if (!peer)
goto fail;
generation = peer->generation;
@@ -1332,32 +1306,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
max_payload = peer->max_payload;
datagram_label_ptr = &peer->datagram_label;
- ptask->fifo_addr = peer->fifo;
+ ptask->fifo_addr = fwnet_hwaddr_fifo(ha);
ptask->generation = generation;
ptask->dest_node = dest_node;
ptask->speed = peer->speed;
}
- /* If this is an ARP packet, convert it */
- if (proto == htons(ETH_P_ARP)) {
- struct arphdr *arp = (struct arphdr *)skb->data;
- unsigned char *arp_ptr = (unsigned char *)(arp + 1);
- struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
- __be32 ipaddr;
-
- ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
-
- arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
- arp1394->max_rec = dev->card->max_receive;
- arp1394->sspd = dev->card->link_speed;
-
- put_unaligned_be16(dev->local_fifo >> 32,
- &arp1394->fifo_hi);
- put_unaligned_be32(dev->local_fifo & 0xffffffff,
- &arp1394->fifo_lo);
- put_unaligned(ipaddr, &arp1394->sip);
- }
-
ptask->hdr.w0 = 0;
ptask->hdr.w1 = 0;
ptask->skb = skb;
@@ -1472,8 +1426,6 @@ static int fwnet_add_peer(struct fwnet_device *dev,
peer->dev = dev;
peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
- peer->fifo = FWNET_NO_FIFO_ADDR;
- peer->ip = 0;
INIT_LIST_HEAD(&peer->pd_list);
peer->pdg_size = 0;
peer->datagram_label = 0;
@@ -1503,6 +1455,7 @@ static int fwnet_probe(struct device *_dev)
struct fwnet_device *dev;
unsigned max_mtu;
int ret;
+ union fwnet_hwaddr *ha;
mutex_lock(&fwnet_device_mutex);
@@ -1533,6 +1486,11 @@ static int fwnet_probe(struct device *_dev)
dev->card = card;
dev->netdev = net;
+ ret = fwnet_fifo_start(dev);
+ if (ret < 0)
+ goto out;
+ dev->local_fifo = dev->handler.offset;
+
/*
* Use the RFC 2734 default 1500 octets or the maximum payload
* as initial MTU
@@ -1542,24 +1500,31 @@ static int fwnet_probe(struct device *_dev)
net->mtu = min(1500U, max_mtu);
/* Set our hardware address while we're at it */
- put_unaligned_be64(card->guid, net->dev_addr);
- put_unaligned_be64(~0ULL, net->broadcast);
+ ha = (union fwnet_hwaddr *)net->dev_addr;
+ put_unaligned_be64(card->guid, &ha->uc.uniq_id);
+ ha->uc.max_rec = dev->card->max_receive;
+ ha->uc.sspd = dev->card->link_speed;
+ put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
+ put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
+
+ memset(net->broadcast, -1, net->addr_len);
+
ret = register_netdev(net);
if (ret)
goto out;
list_add_tail(&dev->dev_link, &fwnet_device_list);
- dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n",
+ dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
dev_name(card->device));
have_dev:
ret = fwnet_add_peer(dev, unit, device);
if (ret && allocated_netdev) {
unregister_netdev(net);
list_del(&dev->dev_link);
- }
out:
- if (ret && allocated_netdev)
+ fwnet_fifo_stop(dev);
free_netdev(net);
+ }
mutex_unlock(&fwnet_device_mutex);
@@ -1592,22 +1557,14 @@ static int fwnet_remove(struct device *_dev)
mutex_lock(&fwnet_device_mutex);
net = dev->netdev;
- if (net && peer->ip)
- arp_invalidate(net, peer->ip);
fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) {
unregister_netdev(net);
- if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
- fw_core_remove_address_handler(&dev->handler);
- if (dev->broadcast_rcv_context) {
- fw_iso_context_stop(dev->broadcast_rcv_context);
- fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
- dev->card);
- fw_iso_context_destroy(dev->broadcast_rcv_context);
- }
+ fwnet_fifo_stop(dev);
+
for (i = 0; dev->queued_datagrams && i < 5; i++)
ssleep(1);
WARN_ON(dev->queued_datagrams);
@@ -1646,6 +1603,14 @@ static const struct ieee1394_device_id fwnet_id_table[] = {
.specifier_id = IANA_SPECIFIER_ID,
.version = RFC2734_SW_VERSION,
},
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .specifier_id = IANA_SPECIFIER_ID,
+ .version = RFC3146_SW_VERSION,
+ },
+#endif
{ }
};
@@ -1683,6 +1648,30 @@ static struct fw_descriptor rfc2374_unit_directory = {
.data = rfc2374_unit_directory_data
};
+#if IS_ENABLED(CONFIG_IPV6)
+static const u32 rfc3146_unit_directory_data[] = {
+ 0x00040000, /* directory_length */
+ 0x1200005e, /* unit_specifier_id: IANA */
+ 0x81000003, /* textual descriptor offset */
+ 0x13000002, /* unit_sw_version: RFC 3146 */
+ 0x81000005, /* textual descriptor offset */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49414e41, /* I A N A */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49507636, /* I P v 6 */
+};
+
+static struct fw_descriptor rfc3146_unit_directory = {
+ .length = ARRAY_SIZE(rfc3146_unit_directory_data),
+ .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
+ .data = rfc3146_unit_directory_data
+};
+#endif
+
static int __init fwnet_init(void)
{
int err;
@@ -1691,11 +1680,17 @@ static int __init fwnet_init(void)
if (err)
return err;
+#if IS_ENABLED(CONFIG_IPV6)
+ err = fw_core_add_descriptor(&rfc3146_unit_directory);
+ if (err)
+ goto out;
+#endif
+
fwnet_packet_task_cache = kmem_cache_create("packet_task",
sizeof(struct fwnet_packet_task), 0, 0, NULL);
if (!fwnet_packet_task_cache) {
err = -ENOMEM;
- goto out;
+ goto out2;
}
err = driver_register(&fwnet_driver.driver);
@@ -1703,7 +1698,11 @@ static int __init fwnet_init(void)
return 0;
kmem_cache_destroy(fwnet_packet_task_cache);
+out2:
+#if IS_ENABLED(CONFIG_IPV6)
+ fw_core_remove_descriptor(&rfc3146_unit_directory);
out:
+#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
return err;
@@ -1714,11 +1713,14 @@ static void __exit fwnet_cleanup(void)
{
driver_unregister(&fwnet_driver.driver);
kmem_cache_destroy(fwnet_packet_task_cache);
+#if IS_ENABLED(CONFIG_IPV6)
+ fw_core_remove_descriptor(&rfc3146_unit_directory);
+#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
}
module_exit(fwnet_cleanup);
MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
-MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
+MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a3fde52840c..65c30ea8c1a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
+ struct cpl_t5_act_open_req *t5_req;
struct sk_buff *skb;
u64 opt0;
u32 opt2;
unsigned int mtu_idx;
int wscale;
- int wrlen = roundup(sizeof *req, 16);
+ int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
+ sizeof(struct cpl_act_open_req) :
+ sizeof(struct cpl_t5_act_open_req);
+ int wrlen = roundup(size, 16);
PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
@@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= WND_SCALE_EN(1);
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
- req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
- req->opt2 = cpu_to_be32(opt2);
+ if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
+ req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid << 14) | ep->atid)));
+ req->local_port = ep->com.local_addr.sin_port;
+ req->peer_port = ep->com.remote_addr.sin_port;
+ req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be32(select_ntuple(ep->com.dev,
+ ep->dst, ep->l2t));
+ req->opt2 = cpu_to_be32(opt2);
+ } else {
+ t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(t5_req, 0);
+ OPCODE_TID(t5_req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid << 14) | ep->atid)));
+ t5_req->local_port = ep->com.local_addr.sin_port;
+ t5_req->peer_port = ep->com.remote_addr.sin_port;
+ t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
+ t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
+ t5_req->opt0 = cpu_to_be64(opt0);
+ t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+ select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
+ t5_req->opt2 = cpu_to_be32(opt2);
+ }
+
set_bit(ACT_OPEN_REQ, &ep->com.history);
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
@@ -1676,9 +1699,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_TIMEDOUT:
break;
case CPL_ERR_TCAM_FULL:
+ dev->rdev.stats.tcam_full++;
if (dev->rdev.lldi.enable_fw_ofld_conn) {
mutex_lock(&dev->rdev.stats.lock);
- dev->rdev.stats.tcam_full++;
mutex_unlock(&dev->rdev.stats.lock);
send_fw_act_open_req(ep,
GET_TID_TID(GET_AOPEN_ATID(
@@ -2875,12 +2898,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
{
u32 l2info;
- u16 vlantag, len, hdr_len;
+ u16 vlantag, len, hdr_len, eth_hdr_len;
u8 intf;
struct cpl_rx_pkt *cpl = cplhdr(skb);
struct cpl_pass_accept_req *req;
struct tcp_options_received tmp_opt;
+ struct c4iw_dev *dev;
+ dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
/* Store values from cpl_rx_pkt in temporary location. */
vlantag = (__force u16) cpl->vlan;
len = (__force u16) cpl->len;
@@ -2896,7 +2921,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
*/
memset(&tmp_opt, 0, sizeof(tmp_opt));
tcp_clear_options(&tmp_opt);
- tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL);
+ tcp_parse_options(skb, &tmp_opt, 0, NULL);
req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
@@ -2904,14 +2929,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
V_SYN_MAC_IDX(G_RX_MACIDX(
(__force int) htonl(l2info))) |
F_SYN_XACT_MATCH);
+ eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
+ G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
+ G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
(__force int) htonl(l2info))) |
V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
(__force int) htons(hdr_len))) |
V_IP_HDR_LEN(G_RX_IPHDR_LEN(
(__force int) htons(hdr_len))) |
- V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(
- (__force int) htonl(l2info))));
+ V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
req->vlan = (__force __be16) vlantag;
req->len = (__force __be16) len;
req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
@@ -2999,7 +3026,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
u16 window;
struct port_info *pi;
struct net_device *pdev;
- u16 rss_qid;
+ u16 rss_qid, eth_hdr_len;
int step;
u32 tx_chan;
struct neighbour *neigh;
@@ -3028,7 +3055,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
- if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
+ eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
+ G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
+ G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
+ if (eth_hdr_len == ETH_HLEN) {
eh = (struct ethhdr *)(req + 1);
iph = (struct iphdr *)(eh + 1);
} else {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80069ad595c..ae656016e1a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -41,10 +41,20 @@
#define DRV_VERSION "0.1"
MODULE_AUTHOR("Steve Wise");
-MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
+MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
+static int allow_db_fc_on_t5;
+module_param(allow_db_fc_on_t5, int, 0644);
+MODULE_PARM_DESC(allow_db_fc_on_t5,
+ "Allow DB Flow Control on T5 (default = 0)");
+
+static int allow_db_coalescing_on_t5;
+module_param(allow_db_coalescing_on_t5, int, 0644);
+MODULE_PARM_DESC(allow_db_coalescing_on_t5,
+ "Allow DB Coalescing on T5 (default = 0)");
+
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
@@ -614,7 +624,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop)
{
return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
- infop->vr->cq.size > 0 && infop->vr->ocq.size > 0;
+ infop->vr->cq.size > 0;
}
static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -627,6 +637,22 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
pci_name(infop->pdev));
return ERR_PTR(-ENOSYS);
}
+ if (!ocqp_supported(infop))
+ pr_info("%s: On-Chip Queues not supported on this device.\n",
+ pci_name(infop->pdev));
+
+ if (!is_t4(infop->adapter_type)) {
+ if (!allow_db_fc_on_t5) {
+ db_fc_threshold = 100000;
+ pr_info("DB Flow Control Disabled.\n");
+ }
+
+ if (!allow_db_coalescing_on_t5) {
+ db_coalescing_threshold = -1;
+ pr_info("DB Coalescing Disabled.\n");
+ }
+ }
+
devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
if (!devp) {
printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -678,8 +704,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
int i;
if (!vers_printed++)
- printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
- DRV_VERSION);
+ pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
+ DRV_VERSION);
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7eec5e13fa8..485183ad34c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -162,7 +162,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
}
-#define C4IW_WR_TO (10*HZ)
+#define C4IW_WR_TO (30*HZ)
struct c4iw_wr_wait {
struct completion completion;
@@ -369,7 +369,6 @@ struct c4iw_fr_page_list {
DEFINE_DMA_UNMAP_ADDR(mapping);
dma_addr_t dma_addr;
struct c4iw_dev *dev;
- int size;
};
static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
@@ -817,6 +816,15 @@ static inline int compute_wscale(int win)
return wscale;
}
+static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
+{
+#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
+ return infop->vr->ocq.size > 0;
+#else
+ return 0;
+#endif
+}
+
u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
@@ -930,6 +938,8 @@ extern struct cxgb4_client t4c_client;
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
extern int c4iw_max_read_depth;
extern int db_fc_threshold;
+extern int db_coalescing_threshold;
+extern int use_dsgl;
#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 903a92d6f91..4cb8eb24497 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -30,16 +30,76 @@
* SOFTWARE.
*/
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <rdma/ib_umem.h>
#include <linux/atomic.h>
#include "iw_cxgb4.h"
+int use_dsgl = 1;
+module_param(use_dsgl, int, 0644);
+MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
+
#define T4_ULPTX_MIN_IO 32
#define C4IW_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
+#define C4IW_INLINE_THRESHOLD 128
-static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data)
+static int inline_threshold = C4IW_INLINE_THRESHOLD;
+module_param(inline_threshold, int, 0644);
+MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
+
+static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
+ u32 len, dma_addr_t data, int wait)
+{
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_sgl *sgl;
+ u8 wr_len;
+ int ret = 0;
+ struct c4iw_wr_wait wr_wait;
+
+ addr &= 0x7FFFFFF;
+
+ if (wait)
+ c4iw_init_wr_wait(&wr_wait);
+ wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
+
+ skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
+ if (!skb)
+ return -ENOMEM;
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ memset(req, 0, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, 0);
+ req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
+ (wait ? FW_WR_COMPL(1) : 0));
+ req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
+ req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+ req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+ req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
+ req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
+ req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
+ req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(req + 1);
+ sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_NSGE(1));
+ sgl->len0 = cpu_to_be32(len);
+ sgl->addr0 = cpu_to_be64(data);
+
+ ret = c4iw_ofld_send(rdev, skb);
+ if (ret)
+ return ret;
+ if (wait)
+ ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+ return ret;
+}
+
+static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
{
struct sk_buff *skb;
struct ulp_mem_io *req;
@@ -47,6 +107,12 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
u8 wr_len, *to_dp, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
+ __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
+
+ if (is_t4(rdev->lldi.adapter_type))
+ cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
+ else
+ cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
addr &= 0x7FFFFFF;
PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -77,7 +143,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
req->wr.wr_mid = cpu_to_be32(
FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23));
+ req->cmd = cmd;
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
@@ -107,6 +173,67 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
+int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+{
+ u32 remain = len;
+ u32 dmalen;
+ int ret = 0;
+ dma_addr_t daddr;
+ dma_addr_t save;
+
+ daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
+ return -1;
+ save = daddr;
+
+ while (remain > inline_threshold) {
+ if (remain < T4_ULPTX_MAX_DMA) {
+ if (remain & ~T4_ULPTX_MIN_IO)
+ dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
+ else
+ dmalen = remain;
+ } else
+ dmalen = T4_ULPTX_MAX_DMA;
+ remain -= dmalen;
+ ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
+ !remain);
+ if (ret)
+ goto out;
+ addr += dmalen >> 5;
+ data += dmalen;
+ daddr += dmalen;
+ }
+ if (remain)
+ ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
+out:
+ dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
+ return ret;
+}
+
+/*
+ * write len bytes of data into addr (32B aligned address)
+ * If data is NULL, clear len byte of memory to zero.
+ */
+static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
+ void *data)
+{
+ if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
+ if (len > inline_threshold) {
+ if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
+ printk_ratelimited(KERN_WARNING
+ "%s: dma map"
+ " failure (non fatal)\n",
+ pci_name(rdev->lldi.pdev));
+ return _c4iw_write_mem_inline(rdev, addr, len,
+ data);
+ } else
+ return 0;
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+ } else
+ return _c4iw_write_mem_inline(rdev, addr, len, data);
+}
+
/*
* Build and write a TPT entry.
* IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
@@ -760,19 +887,23 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
struct c4iw_fr_page_list *c4pl;
struct c4iw_dev *dev = to_c4iw_dev(device);
dma_addr_t dma_addr;
- int size = sizeof *c4pl + page_list_len * sizeof(u64);
+ int pll_len = roundup(page_list_len * sizeof(u64), 32);
- c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size,
- &dma_addr, GFP_KERNEL);
+ c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
if (!c4pl)
return ERR_PTR(-ENOMEM);
+ c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
+ pll_len, &dma_addr,
+ GFP_KERNEL);
+ if (!c4pl->ibpl.page_list) {
+ kfree(c4pl);
+ return ERR_PTR(-ENOMEM);
+ }
dma_unmap_addr_set(c4pl, mapping, dma_addr);
c4pl->dma_addr = dma_addr;
c4pl->dev = dev;
- c4pl->size = size;
- c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
- c4pl->ibpl.max_page_list_len = page_list_len;
+ c4pl->ibpl.max_page_list_len = pll_len;
return &c4pl->ibpl;
}
@@ -781,8 +912,10 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
{
struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
- dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size,
- c4pl, dma_unmap_addr(c4pl, mapping));
+ dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
+ c4pl->ibpl.max_page_list_len,
+ c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
+ kfree(c4pl);
}
int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e084fdc6da7..7e94c9a656a 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
*/
if (addr >= rdev->oc_mw_pa)
vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
- else
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ else {
+ if (is_t5(rdev->lldi.adapter_type))
+ vma->vm_page_prot =
+ t4_pgprot_wc(vma->vm_page_prot);
+ else
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ }
ret = io_remap_pfn_range(vma, vma->vm_start,
addr >> PAGE_SHIFT,
len, vma->vm_page_prot);
@@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev,
dev = to_c4iw_dev(ibdev);
memset(props, 0, sizeof *props);
memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
- props->hw_ver = dev->rdev.lldi.adapter_type;
+ props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
props->page_size_cap = T4_PAGESIZE_MASK;
@@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
PDBG("%s dev 0x%p\n", __func__, dev);
- return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type);
+ return sprintf(buf, "%d\n",
+ CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70b1808a08f..5b059e2d80c 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -42,10 +42,21 @@ static int ocqp_support = 1;
module_param(ocqp_support, int, 0644);
MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
-int db_fc_threshold = 2000;
+int db_fc_threshold = 1000;
module_param(db_fc_threshold, int, 0644);
-MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
- "db flow control mode (default = 2000)");
+MODULE_PARM_DESC(db_fc_threshold,
+ "QP count/threshold that triggers"
+ " automatic db flow control mode (default = 1000)");
+
+int db_coalescing_threshold;
+module_param(db_coalescing_threshold, int, 0644);
+MODULE_PARM_DESC(db_coalescing_threshold,
+ "QP count/threshold that triggers"
+ " disabling db coalescing (default = 0)");
+
+static int max_fr_immd = T4_MAX_FR_IMMD;
+module_param(max_fr_immd, int, 0644);
+MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
{
@@ -76,7 +87,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
- if (!ocqp_support || !t4_ocqp_supported())
+ if (!ocqp_support || !ocqp_supported(&rdev->lldi))
return -ENOSYS;
sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
if (!sq->dma_addr)
@@ -129,7 +140,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
int wr_len;
struct c4iw_wr_wait wr_wait;
struct sk_buff *skb;
- int ret;
+ int ret = 0;
int eqsize;
wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@ -169,17 +180,14 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
}
if (user) {
- ret = alloc_oc_sq(rdev, &wq->sq);
- if (ret)
+ if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
goto free_hwaddr;
-
- ret = alloc_host_sq(rdev, &wq->sq);
- if (ret)
- goto free_sq;
- } else
+ } else {
ret = alloc_host_sq(rdev, &wq->sq);
if (ret)
goto free_hwaddr;
+ }
+
memset(wq->sq.queue, 0, wq->sq.memsize);
dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
@@ -534,7 +542,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
}
static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
- struct ib_send_wr *wr, u8 *len16)
+ struct ib_send_wr *wr, u8 *len16, u8 t5dev)
{
struct fw_ri_immd *imdp;
@@ -556,28 +564,51 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
0xffffffff);
- WARN_ON(pbllen > T4_MAX_FR_IMMD);
- imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
- imdp->op = FW_RI_DATA_IMMD;
- imdp->r1 = 0;
- imdp->r2 = 0;
- imdp->immdlen = cpu_to_be32(pbllen);
- p = (__be64 *)(imdp + 1);
- rem = pbllen;
- for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
- *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
- rem -= sizeof *p;
- if (++p == (__be64 *)&sq->queue[sq->size])
- p = (__be64 *)sq->queue;
- }
- BUG_ON(rem < 0);
- while (rem) {
- *p = 0;
- rem -= sizeof *p;
- if (++p == (__be64 *)&sq->queue[sq->size])
- p = (__be64 *)sq->queue;
+
+ if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+ struct c4iw_fr_page_list *c4pl =
+ to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
+ struct fw_ri_dsgl *sglp;
+
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
+ cpu_to_be64((u64)
+ wr->wr.fast_reg.page_list->page_list[i]);
+ }
+
+ sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
+ sglp->op = FW_RI_DATA_DSGL;
+ sglp->r1 = 0;
+ sglp->nsge = cpu_to_be16(1);
+ sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
+ sglp->len0 = cpu_to_be32(pbllen);
+
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
+ } else {
+ imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+ imdp->op = FW_RI_DATA_IMMD;
+ imdp->r1 = 0;
+ imdp->r2 = 0;
+ imdp->immdlen = cpu_to_be32(pbllen);
+ p = (__be64 *)(imdp + 1);
+ rem = pbllen;
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ *p = cpu_to_be64(
+ (u64)wr->wr.fast_reg.page_list->page_list[i]);
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ BUG_ON(rem < 0);
+ while (rem) {
+ *p = 0;
+ rem -= sizeof(*p);
+ if (++p == (__be64 *)&sq->queue[sq->size])
+ p = (__be64 *)sq->queue;
+ }
+ *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
+ + pbllen, 16);
}
- *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
return 0;
}
@@ -678,7 +709,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_FAST_REG_MR:
fw_opcode = FW_RI_FR_NSMR_WR;
swsqe->opcode = FW_RI_FAST_REGISTER;
- err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
+ err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
+ is_t5(
+ qhp->rhp->rdev.lldi.adapter_type) ?
+ 1 : 0);
break;
case IB_WR_LOCAL_INV:
if (wr->send_flags & IB_SEND_FENCE)
@@ -1450,6 +1484,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
rhp->db_state = NORMAL;
idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
}
+ if (db_coalescing_threshold >= 0)
+ if (rhp->qpcnt <= db_coalescing_threshold)
+ cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
spin_unlock_irq(&rhp->lock);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@ -1561,11 +1598,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
spin_lock_irq(&rhp->lock);
if (rhp->db_state != NORMAL)
t4_disable_wq_db(&qhp->wq);
- if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
+ rhp->qpcnt++;
+ if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
rhp->rdev.stats.db_state_transitions++;
rhp->db_state = FLOW_CONTROL;
idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
}
+ if (db_coalescing_threshold >= 0)
+ if (rhp->qpcnt > db_coalescing_threshold)
+ cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
spin_unlock_irq(&rhp->lock);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 16f26ab2930..ebcb03bd1b7 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -84,7 +84,7 @@ struct t4_status_page {
sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
sizeof(struct fw_ri_immd)) & ~31UL)
-#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
+#define T4_MAX_FR_DEPTH (1024 / sizeof(u64))
#define T4_RQ_NUM_SLOTS 2
#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
@@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
#endif
}
-static inline int t4_ocqp_supported(void)
-{
-#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
- return 1;
-#else
- return 0;
-#endif
-}
-
enum {
T4_SQ_ONCHIP = (1<<0),
};
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 832bc807ed2..cc9f1927a32 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -469,8 +469,7 @@ static int capidrv_add_ack(struct capidrv_ncci *nccip,
{
struct ncci_datahandle_queue *n, **pp;
- n = (struct ncci_datahandle_queue *)
- kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
+ n = kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
if (!n) {
printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
return -1;
diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
index 1bb291021fd..c7a94713e9e 100644
--- a/drivers/isdn/hisax/fsm.c
+++ b/drivers/isdn/hisax/fsm.c
@@ -26,7 +26,7 @@ FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount)
{
int i;
- fsm->jumpmatrix = (FSMFNPTR *)
+ fsm->jumpmatrix =
kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL);
if (!fsm->jumpmatrix)
return -ENOMEM;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 90f34ae2b80..dc4574f735e 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1479,7 +1479,7 @@ int setup_hfcsx(struct IsdnCard *card)
release_region(cs->hw.hfcsx.base, 2);
return (0);
}
- if (!(cs->hw.hfcsx.extra = (void *)
+ if (!(cs->hw.hfcsx.extra =
kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) {
release_region(cs->hw.hfcsx.base, 2);
printk(KERN_WARNING "HFC-SX: unable to allocate memory\n");
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index babc621a07f..88d657dff47 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1385,7 +1385,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
skb->pkt_type = PACKET_OTHERHOST;
}
- if (ntohs(eth->h_proto) >= 1536)
+ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
return eth->h_proto;
rawp = skb->data;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 44225b186f6..83a23afb13a 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -185,7 +185,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
skb->pkt_type=PACKET_MULTICAST;
}
- if (ntohs(eth->h_proto) >= 1536)
+ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
return eth->h_proto;
rawp = skb->data;
@@ -228,9 +228,9 @@ static int ule_test_sndu( struct dvb_net_priv *p )
static int ule_bridged_sndu( struct dvb_net_priv *p )
{
struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
- if(ntohs(hdr->h_proto) < 1536) {
+ if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
- /* A frame Type < 1536 for a bridged frame, introduces a LLC Length field. */
+ /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
if(framelen != ntohs(hdr->h_proto)) {
return -1;
}
@@ -320,7 +320,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
(int) p->ule_sndu_type, l, total_ext_len);
#endif
- } while (p->ule_sndu_type < 1536);
+ } while (p->ule_sndu_type < ETH_P_802_3_MIN);
return total_ext_len;
}
@@ -712,7 +712,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
}
/* Handle ULE Extension Headers. */
- if (priv->ule_sndu_type < 1536) {
+ if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
/* There is an extension header. Handle it accordingly. */
int l = handle_ule_extensions(priv);
if (l < 0) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 87f1d39ca55..3835321b8cf 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -151,6 +151,7 @@ config MACVTAP
config VXLAN
tristate "Virtual eXtensible Local Area Network (VXLAN)"
depends on INET
+ select NET_IP_TUNNEL
---help---
This allows one to create vxlan virtual interfaces that provide
Layer 2 Networks over Layer 3 Networks. VXLAN is often used
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index f5a89164e77..4ce6ca5f3d3 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -106,20 +106,4 @@ config IPDDP_ENCAP
IP packets inside AppleTalk frames; this is useful if your Linux box
is stuck on an AppleTalk network (which hopefully contains a
decapsulator somewhere). Please see
- <file:Documentation/networking/ipddp.txt> for more information. If
- you said Y to "AppleTalk-IP driver support" above and you say Y
- here, then you cannot say Y to "AppleTalk-IP to IP Decapsulation
- support", below.
-
-config IPDDP_DECAP
- bool "Appletalk-IP to IP Decapsulation support"
- depends on IPDDP
- help
- If you say Y here, the AppleTalk-IP code will be able to decapsulate
- AppleTalk-IP frames to IP packets; this is useful if you want your
- Linux box to act as an Internet gateway for an AppleTalk network.
- Please see <file:Documentation/networking/ipddp.txt> for more
- information. If you said Y to "AppleTalk-IP driver support" above
- and you say Y here, then you cannot say Y to "IP to AppleTalk-IP
- Encapsulation support", above.
-
+ <file:Documentation/networking/ipddp.txt> for more information.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6bbd90e1123..11a8cb381b8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -796,9 +796,8 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mcast_work.work);
- rcu_read_lock();
+
bond_resend_igmp_join_requests(bond);
- rcu_read_unlock();
}
/*
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 60c2142373c..a966128c2a7 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -32,13 +32,6 @@ config CAIF_SPI_SYNC
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
-config CAIF_SHM
- tristate "CAIF shared memory protocol driver"
- depends on CAIF && U5500_MBOX
- default n
- ---help---
- The CAIF shared memory protocol driver for the STE UX5500 platform.
-
config CAIF_HSI
tristate "CAIF HSI transport driver"
depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560..15a9d2fc753 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -7,9 +7,5 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
-# Shared memory
-caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
-obj-$(CONFIG_CAIF_SHM) += caif_shm.o
-
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
deleted file mode 100644
index 89d76b7b325..00000000000
--- a/drivers/net/caif/caif_shm_u5500.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
- * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <mach/mbox-db5500.h>
-#include <net/caif/caif_shm.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
-
-#define MAX_SHM_INSTANCES 1
-
-enum {
- MBX_ACC0,
- MBX_ACC1,
- MBX_DSP
-};
-
-static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
-
-static unsigned int shm_start;
-static unsigned int shm_size;
-
-module_param(shm_size, uint , 0440);
-MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
-
-module_param(shm_start, uint , 0440);
-MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
-
-static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
-{
- /* Always block until msg is written successfully */
- mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
- return 0;
-}
-
-static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
- void *pshm_drv)
-{
- /*
- * For UX5500, we have only 1 SHM instance which uses MBX0
- * for communication with the peer modem
- */
- pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
-
- if (!pshm_dev->hmbx)
- return -ENODEV;
- else
- return 0;
-}
-
-static int __init caif_shmdev_init(void)
-{
- int i, result;
-
- /* Loop is currently overkill, there is only one instance */
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
-
- shmdev_lyr[i].shm_base_addr = shm_start;
- shmdev_lyr[i].shm_total_sz = shm_size;
-
- if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
- || (shmdev_lyr[i].shm_total_sz <= 0)) {
- pr_warn("ERROR,"
- "Shared memory Address and/or Size incorrect"
- ", Bailing out ...\n");
- result = -EINVAL;
- goto clean;
- }
-
- pr_info("SHM AREA (instance %d) STARTS"
- " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
-
- shmdev_lyr[i].shm_id = i;
- shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
- shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
-
- /*
- * Finally, CAIF core module is called with details in place:
- * 1. SHM base address
- * 2. SHM size
- * 3. MBX handle
- */
- result = caif_shmcore_probe(&shmdev_lyr[i]);
- if (result) {
- pr_warn("ERROR[%d],"
- "Could not probe SHM core (instance %d)"
- " Bailing out ...\n", result, i);
- goto clean;
- }
- }
-
- return 0;
-
-clean:
- /*
- * For now, we assume that even if one instance of SHM fails, we bail
- * out of the driver support completely. For this, we need to release
- * any memory allocated and unregister any instance of SHM net device.
- */
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
- if (shmdev_lyr[i].pshm_netdev)
- unregister_netdev(shmdev_lyr[i].pshm_netdev);
- }
- return result;
-}
-
-static void __exit caif_shmdev_exit(void)
-{
- int i;
-
- for (i = 0; i < MAX_SHM_INSTANCES; i++) {
- caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
- kfree((void *)shmdev_lyr[i].shm_base_addr);
- }
-
-}
-
-module_init(caif_shmdev_init);
-module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
deleted file mode 100644
index bce8bac311c..00000000000
--- a/drivers/net/caif/caif_shmcore.c
+++ /dev/null
@@ -1,747 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
- * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
- * Daniel Martensson / daniel.martensson@stericsson.com
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
-
-#include <linux/spinlock.h>
-#include <linux/sched.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/io.h>
-
-#include <net/caif/caif_device.h>
-#include <net/caif/caif_shm.h>
-
-#define NR_TX_BUF 6
-#define NR_RX_BUF 6
-#define TX_BUF_SZ 0x2000
-#define RX_BUF_SZ 0x2000
-
-#define CAIF_NEEDED_HEADROOM 32
-
-#define CAIF_FLOW_ON 1
-#define CAIF_FLOW_OFF 0
-
-#define LOW_WATERMARK 3
-#define HIGH_WATERMARK 4
-
-/* Maximum number of CAIF buffers per shared memory buffer. */
-#define SHM_MAX_FRMS_PER_BUF 10
-
-/*
- * Size in bytes of the descriptor area
- * (With end of descriptor signalling)
- */
-#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
- sizeof(struct shm_pck_desc))
-
-/*
- * Offset to the first CAIF frame within a shared memory buffer.
- * Aligned on 32 bytes.
- */
-#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
-
-/* Number of bytes for CAIF shared memory header. */
-#define SHM_HDR_LEN 1
-
-/* Number of padding bytes for the complete CAIF frame. */
-#define SHM_FRM_PAD_LEN 4
-
-#define CAIF_MAX_MTU 4096
-
-#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
-#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
-
-#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
-#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
-
-#define SHM_FULL_MASK (0x0F << 0)
-#define SHM_EMPTY_MASK (0x0F << 4)
-
-struct shm_pck_desc {
- /*
- * Offset from start of shared memory area to start of
- * shared memory CAIF frame.
- */
- u32 frm_ofs;
- u32 frm_len;
-};
-
-struct buf_list {
- unsigned char *desc_vptr;
- u32 phy_addr;
- u32 index;
- u32 len;
- u32 frames;
- u32 frm_ofs;
- struct list_head list;
-};
-
-struct shm_caif_frm {
- /* Number of bytes of padding before the CAIF frame. */
- u8 hdr_ofs;
-};
-
-struct shmdrv_layer {
- /* caif_dev_common must always be first in the structure*/
- struct caif_dev_common cfdev;
-
- u32 shm_tx_addr;
- u32 shm_rx_addr;
- u32 shm_base_addr;
- u32 tx_empty_available;
- spinlock_t lock;
-
- struct list_head tx_empty_list;
- struct list_head tx_pend_list;
- struct list_head tx_full_list;
- struct list_head rx_empty_list;
- struct list_head rx_pend_list;
- struct list_head rx_full_list;
-
- struct workqueue_struct *pshm_tx_workqueue;
- struct workqueue_struct *pshm_rx_workqueue;
-
- struct work_struct shm_tx_work;
- struct work_struct shm_rx_work;
-
- struct sk_buff_head sk_qhead;
- struct shmdev_layer *pshm_dev;
-};
-
-static int shm_netdev_open(struct net_device *shm_netdev)
-{
- netif_wake_queue(shm_netdev);
- return 0;
-}
-
-static int shm_netdev_close(struct net_device *shm_netdev)
-{
- netif_stop_queue(shm_netdev);
- return 0;
-}
-
-int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
-{
- struct buf_list *pbuf;
- struct shmdrv_layer *pshm_drv;
- struct list_head *pos;
- u32 avail_emptybuff = 0;
- unsigned long flags = 0;
-
- pshm_drv = priv;
-
- /* Check for received buffers. */
- if (mbx_msg & SHM_FULL_MASK) {
- int idx;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check whether we have any outstanding buffers. */
- if (list_empty(&pshm_drv->rx_empty_list)) {
-
- /* Release spin lock. */
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* We print even in IRQ context... */
- pr_warn("No empty Rx buffers to fill: "
- "mbx_msg:%x\n", mbx_msg);
-
- /* Bail out. */
- goto err_sync;
- }
-
- pbuf =
- list_entry(pshm_drv->rx_empty_list.next,
- struct buf_list, list);
- idx = pbuf->index;
-
- /* Check buffer synchronization. */
- if (idx != SHM_GET_FULL(mbx_msg)) {
-
- /* We print even in IRQ context... */
- pr_warn(
- "phyif_shm_mbx_msg_cb: RX full out of sync:"
- " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
- idx, mbx_msg, SHM_GET_FULL(mbx_msg));
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Bail out. */
- goto err_sync;
- }
-
- list_del_init(&pbuf->list);
- list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Schedule RX work queue. */
- if (!work_pending(&pshm_drv->shm_rx_work))
- queue_work(pshm_drv->pshm_rx_workqueue,
- &pshm_drv->shm_rx_work);
- }
-
- /* Check for emptied buffers. */
- if (mbx_msg & SHM_EMPTY_MASK) {
- int idx;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check whether we have any outstanding buffers. */
- if (list_empty(&pshm_drv->tx_full_list)) {
-
- /* We print even in IRQ context... */
- pr_warn("No TX to empty: msg:%x\n", mbx_msg);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Bail out. */
- goto err_sync;
- }
-
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- idx = pbuf->index;
-
- /* Check buffer synchronization. */
- if (idx != SHM_GET_EMPTY(mbx_msg)) {
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* We print even in IRQ context... */
- pr_warn("TX empty "
- "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
-
- /* Bail out. */
- goto err_sync;
- }
- list_del_init(&pbuf->list);
-
- /* Reset buffer parameters. */
- pbuf->frames = 0;
- pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
-
- list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
-
- /* Check the available no. of buffers in the empty list */
- list_for_each(pos, &pshm_drv->tx_empty_list)
- avail_emptybuff++;
-
- /* Check whether we have to wake up the transmitter. */
- if ((avail_emptybuff > HIGH_WATERMARK) &&
- (!pshm_drv->tx_empty_available)) {
- pshm_drv->tx_empty_available = 1;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_ON);
-
-
- /* Schedule the work queue. if required */
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue,
- &pshm_drv->shm_tx_work);
- } else
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- }
-
- return 0;
-
-err_sync:
- return -EIO;
-}
-
-static void shm_rx_work_func(struct work_struct *rx_work)
-{
- struct shmdrv_layer *pshm_drv;
- struct buf_list *pbuf;
- unsigned long flags = 0;
- struct sk_buff *skb;
- char *p;
- int ret;
-
- pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
-
- while (1) {
-
- struct shm_pck_desc *pck_desc;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check for received buffers. */
- if (list_empty(&pshm_drv->rx_full_list)) {
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- break;
- }
-
- pbuf =
- list_entry(pshm_drv->rx_full_list.next, struct buf_list,
- list);
- list_del_init(&pbuf->list);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- /* Retrieve pointer to start of the packet descriptor area. */
- pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
-
- /*
- * Check whether descriptor contains a CAIF shared memory
- * frame.
- */
- while (pck_desc->frm_ofs) {
- unsigned int frm_buf_ofs;
- unsigned int frm_pck_ofs;
- unsigned int frm_pck_len;
- /*
- * Check whether offset is within buffer limits
- * (lower).
- */
- if (pck_desc->frm_ofs <
- (pbuf->phy_addr - pshm_drv->shm_base_addr))
- break;
- /*
- * Check whether offset is within buffer limits
- * (higher).
- */
- if (pck_desc->frm_ofs >
- ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
- pbuf->len))
- break;
-
- /* Calculate offset from start of buffer. */
- frm_buf_ofs =
- pck_desc->frm_ofs - (pbuf->phy_addr -
- pshm_drv->shm_base_addr);
-
- /*
- * Calculate offset and length of CAIF packet while
- * taking care of the shared memory header.
- */
- frm_pck_ofs =
- frm_buf_ofs + SHM_HDR_LEN +
- (*(pbuf->desc_vptr + frm_buf_ofs));
- frm_pck_len =
- (pck_desc->frm_len - SHM_HDR_LEN -
- (*(pbuf->desc_vptr + frm_buf_ofs)));
-
- /* Check whether CAIF packet is within buffer limits */
- if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
- break;
-
- /* Get a suitable CAIF packet and copy in data. */
- skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
- frm_pck_len + 1);
-
- if (skb == NULL) {
- pr_info("OOM: Try next frame in descriptor\n");
- break;
- }
-
- p = skb_put(skb, frm_pck_len);
- memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = pshm_drv->pshm_dev->pshm_netdev;
-
- /* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
-
- if (!ret) {
- pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_packets++;
- pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_bytes += pck_desc->frm_len;
- } else
- ++pshm_drv->pshm_dev->pshm_netdev->stats.
- rx_dropped;
- /* Move to next packet descriptor. */
- pck_desc++;
- }
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
- list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
-
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- }
-
- /* Schedule the work queue. if required */
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
-
-}
-
-static void shm_tx_work_func(struct work_struct *tx_work)
-{
- u32 mbox_msg;
- unsigned int frmlen, avail_emptybuff, append = 0;
- unsigned long flags = 0;
- struct buf_list *pbuf = NULL;
- struct shmdrv_layer *pshm_drv;
- struct shm_caif_frm *frm;
- struct sk_buff *skb;
- struct shm_pck_desc *pck_desc;
- struct list_head *pos;
-
- pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
-
- do {
- /* Initialize mailbox message. */
- mbox_msg = 0x00;
- avail_emptybuff = 0;
-
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
- /* Check for pending receive buffers. */
- if (!list_empty(&pshm_drv->rx_pend_list)) {
-
- pbuf = list_entry(pshm_drv->rx_pend_list.next,
- struct buf_list, list);
-
- list_del_init(&pbuf->list);
- list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
- /*
- * Value index is never changed,
- * so read access should be safe.
- */
- mbox_msg |= SHM_SET_EMPTY(pbuf->index);
- }
-
- skb = skb_peek(&pshm_drv->sk_qhead);
-
- if (skb == NULL)
- goto send_msg;
- /* Check the available no. of buffers in the empty list */
- list_for_each(pos, &pshm_drv->tx_empty_list)
- avail_emptybuff++;
-
- if ((avail_emptybuff < LOW_WATERMARK) &&
- pshm_drv->tx_empty_available) {
- /* Update blocking condition. */
- pshm_drv->tx_empty_available = 0;
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
- pshm_drv->cfdev.flowctrl
- (pshm_drv->pshm_dev->pshm_netdev,
- CAIF_FLOW_OFF);
- spin_lock_irqsave(&pshm_drv->lock, flags);
- }
- /*
- * We simply return back to the caller if we do not have space
- * either in Tx pending list or Tx empty list. In this case,
- * we hold the received skb in the skb list, waiting to
- * be transmitted once Tx buffers become available
- */
- if (list_empty(&pshm_drv->tx_empty_list))
- goto send_msg;
-
- /* Get the first free Tx buffer. */
- pbuf = list_entry(pshm_drv->tx_empty_list.next,
- struct buf_list, list);
- do {
- if (append) {
- skb = skb_peek(&pshm_drv->sk_qhead);
- if (skb == NULL)
- break;
- }
-
- frm = (struct shm_caif_frm *)
- (pbuf->desc_vptr + pbuf->frm_ofs);
-
- frm->hdr_ofs = 0;
- frmlen = 0;
- frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
-
- /* Add tail padding if needed. */
- if (frmlen % SHM_FRM_PAD_LEN)
- frmlen += SHM_FRM_PAD_LEN -
- (frmlen % SHM_FRM_PAD_LEN);
-
- /*
- * Verify that packet, header and additional padding
- * can fit within the buffer frame area.
- */
- if (frmlen >= (pbuf->len - pbuf->frm_ofs))
- break;
-
- if (!append) {
- list_del_init(&pbuf->list);
- append = 1;
- }
-
- skb = skb_dequeue(&pshm_drv->sk_qhead);
- if (skb == NULL)
- break;
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, pbuf->desc_vptr +
- pbuf->frm_ofs + SHM_HDR_LEN +
- frm->hdr_ofs, skb->len);
-
- pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
- pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
- frmlen;
- dev_kfree_skb_irq(skb);
-
- /* Fill in the shared memory packet descriptor area. */
- pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
- /* Forward to current frame. */
- pck_desc += pbuf->frames;
- pck_desc->frm_ofs = (pbuf->phy_addr -
- pshm_drv->shm_base_addr) +
- pbuf->frm_ofs;
- pck_desc->frm_len = frmlen;
- /* Terminate packet descriptor area. */
- pck_desc++;
- pck_desc->frm_ofs = 0;
- /* Update buffer parameters. */
- pbuf->frames++;
- pbuf->frm_ofs += frmlen + (frmlen % 32);
-
- } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
-
- /* Assign buffer as full. */
- list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
- append = 0;
- mbox_msg |= SHM_SET_FULL(pbuf->index);
-send_msg:
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
- if (mbox_msg)
- pshm_drv->pshm_dev->pshmdev_mbxsend
- (pshm_drv->pshm_dev->shm_id, mbox_msg);
- } while (mbox_msg);
-}
-
-static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
-{
- struct shmdrv_layer *pshm_drv;
-
- pshm_drv = netdev_priv(shm_netdev);
-
- skb_queue_tail(&pshm_drv->sk_qhead, skb);
-
- /* Schedule Tx work queue. for deferred processing of skbs*/
- if (!work_pending(&pshm_drv->shm_tx_work))
- queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
-
- return 0;
-}
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = shm_netdev_open,
- .ndo_stop = shm_netdev_close,
- .ndo_start_xmit = shm_netdev_tx,
-};
-
-static void shm_netdev_setup(struct net_device *pshm_netdev)
-{
- struct shmdrv_layer *pshm_drv;
- pshm_netdev->netdev_ops = &netdev_ops;
-
- pshm_netdev->mtu = CAIF_MAX_MTU;
- pshm_netdev->type = ARPHRD_CAIF;
- pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
- pshm_netdev->tx_queue_len = 0;
- pshm_netdev->destructor = free_netdev;
-
- pshm_drv = netdev_priv(pshm_netdev);
-
- /* Initialize structures in a clean state. */
- memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
-
- pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
-}
-
-int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
-{
- int result, j;
- struct shmdrv_layer *pshm_drv = NULL;
-
- pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
- "cfshm%d", shm_netdev_setup);
- if (!pshm_dev->pshm_netdev)
- return -ENOMEM;
-
- pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
- pshm_drv->pshm_dev = pshm_dev;
-
- /*
- * Initialization starts with the verification of the
- * availability of MBX driver by calling its setup function.
- * MBX driver must be available by this time for proper
- * functioning of SHM driver.
- */
- if ((pshm_dev->pshmdev_mbxsetup
- (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
- pr_warn("Could not config. SHM Mailbox,"
- " Bailing out.....\n");
- free_netdev(pshm_dev->pshm_netdev);
- return -ENODEV;
- }
-
- skb_queue_head_init(&pshm_drv->sk_qhead);
-
- pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
- " INSTANCE AT pshm_drv =0x%p\n",
- pshm_drv->pshm_dev->shm_id, pshm_drv);
-
- if (pshm_dev->shm_total_sz <
- (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
-
- pr_warn("ERROR, Amount of available"
- " Phys. SHM cannot accommodate current SHM "
- "driver configuration, Bailing out ...\n");
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
-
- pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
- pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
-
- if (pshm_dev->shm_loopback)
- pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
- else
- pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
- (NR_TX_BUF * TX_BUF_SZ);
-
- spin_lock_init(&pshm_drv->lock);
- INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
- INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
- INIT_LIST_HEAD(&pshm_drv->tx_full_list);
-
- INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
- INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
- INIT_LIST_HEAD(&pshm_drv->rx_full_list);
-
- INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
- INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
-
- pshm_drv->pshm_tx_workqueue =
- create_singlethread_workqueue("shm_tx_work");
- pshm_drv->pshm_rx_workqueue =
- create_singlethread_workqueue("shm_rx_work");
-
- for (j = 0; j < NR_TX_BUF; j++) {
- struct buf_list *tx_buf =
- kmalloc(sizeof(struct buf_list), GFP_KERNEL);
-
- if (tx_buf == NULL) {
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
- tx_buf->index = j;
- tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
- tx_buf->len = TX_BUF_SZ;
- tx_buf->frames = 0;
- tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
-
- if (pshm_dev->shm_loopback)
- tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
- else
- /*
- * FIXME: the result of ioremap is not a pointer - arnd
- */
- tx_buf->desc_vptr =
- ioremap(tx_buf->phy_addr, TX_BUF_SZ);
-
- list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
- }
-
- for (j = 0; j < NR_RX_BUF; j++) {
- struct buf_list *rx_buf =
- kmalloc(sizeof(struct buf_list), GFP_KERNEL);
-
- if (rx_buf == NULL) {
- free_netdev(pshm_dev->pshm_netdev);
- return -ENOMEM;
- }
- rx_buf->index = j;
- rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
- rx_buf->len = RX_BUF_SZ;
-
- if (pshm_dev->shm_loopback)
- rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
- else
- rx_buf->desc_vptr =
- ioremap(rx_buf->phy_addr, RX_BUF_SZ);
- list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
- }
-
- pshm_drv->tx_empty_available = 1;
- result = register_netdev(pshm_dev->pshm_netdev);
- if (result)
- pr_warn("ERROR[%d], SHM could not, "
- "register with NW FRMWK Bailing out ...\n", result);
-
- return result;
-}
-
-void caif_shmcore_remove(struct net_device *pshm_netdev)
-{
- struct buf_list *pbuf;
- struct shmdrv_layer *pshm_drv = NULL;
-
- pshm_drv = netdev_priv(pshm_netdev);
-
- while (!(list_empty(&pshm_drv->tx_pend_list))) {
- pbuf =
- list_entry(pshm_drv->tx_pend_list.next,
- struct buf_list, list);
-
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->tx_full_list))) {
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->tx_empty_list))) {
- pbuf =
- list_entry(pshm_drv->tx_empty_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_full_list))) {
- pbuf =
- list_entry(pshm_drv->tx_full_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_pend_list))) {
- pbuf =
- list_entry(pshm_drv->tx_pend_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- while (!(list_empty(&pshm_drv->rx_empty_list))) {
- pbuf =
- list_entry(pshm_drv->rx_empty_list.next,
- struct buf_list, list);
- list_del(&pbuf->list);
- kfree(pbuf);
- }
-
- /* Destroy work queues. */
- destroy_workqueue(pshm_drv->pshm_tx_workqueue);
- destroy_workqueue(pshm_drv->pshm_rx_workqueue);
-
- unregister_netdev(pshm_netdev);
-}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9862b2e0764..e456b70933c 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9X5
+ depends on ARM
---help---
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 44f363792b5..db52f4414de 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
@@ -155,19 +156,20 @@ struct at91_priv {
canid_t mb0_id;
};
-static const struct at91_devtype_data at91_devtype_data[] = {
- [AT91_DEVTYPE_SAM9263] = {
- .rx_first = 1,
- .rx_split = 8,
- .rx_last = 11,
- .tx_shift = 2,
- },
- [AT91_DEVTYPE_SAM9X5] = {
- .rx_first = 0,
- .rx_split = 4,
- .rx_last = 5,
- .tx_shift = 1,
- },
+static const struct at91_devtype_data at91_at91sam9263_data = {
+ .rx_first = 1,
+ .rx_split = 8,
+ .rx_last = 11,
+ .tx_shift = 2,
+ .type = AT91_DEVTYPE_SAM9263,
+};
+
+static const struct at91_devtype_data at91_at91sam9x5_data = {
+ .rx_first = 0,
+ .rx_split = 4,
+ .rx_last = 5,
+ .tx_shift = 1,
+ .type = AT91_DEVTYPE_SAM9X5,
};
static const struct can_bittiming_const at91_bittiming_const = {
@@ -1249,10 +1251,42 @@ static struct attribute_group at91_sysfs_attr_group = {
.attrs = at91_sysfs_attrs,
};
+#if defined(CONFIG_OF)
+static const struct of_device_id at91_can_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9x5-can",
+ .data = &at91_at91sam9x5_data,
+ }, {
+ .compatible = "atmel,at91sam9263-can",
+ .data = &at91_at91sam9263_data,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
+#else
+#define at91_can_dt_ids NULL
+#endif
+
+static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+
+ match = of_match_node(at91_can_dt_ids, pdev->dev.of_node);
+ if (!match) {
+ dev_err(&pdev->dev, "no matching node found in dtb\n");
+ return NULL;
+ }
+ return (const struct at91_devtype_data *)match->data;
+ }
+ return (const struct at91_devtype_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
static int at91_can_probe(struct platform_device *pdev)
{
const struct at91_devtype_data *devtype_data;
- enum at91_devtype devtype;
struct net_device *dev;
struct at91_priv *priv;
struct resource *res;
@@ -1260,8 +1294,12 @@ static int at91_can_probe(struct platform_device *pdev)
void __iomem *addr;
int err, irq;
- devtype = pdev->id_entry->driver_data;
- devtype_data = &at91_devtype_data[devtype];
+ devtype_data = at91_can_get_driver_data(pdev);
+ if (!devtype_data) {
+ dev_err(&pdev->dev, "no driver data\n");
+ err = -ENODEV;
+ goto exit;
+ }
clk = clk_get(&pdev->dev, "can_clk");
if (IS_ERR(clk)) {
@@ -1310,7 +1348,6 @@ static int at91_can_probe(struct platform_device *pdev)
priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
- priv->devtype_data.type = devtype;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
priv->mb0_id = 0x7ff;
@@ -1373,10 +1410,10 @@ static int at91_can_remove(struct platform_device *pdev)
static const struct platform_device_id at91_can_id_table[] = {
{
.name = "at91_can",
- .driver_data = AT91_DEVTYPE_SAM9263,
+ .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
}, {
.name = "at91sam9x5_can",
- .driver_data = AT91_DEVTYPE_SAM9X5,
+ .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
}, {
/* sentinel */
}
@@ -1389,6 +1426,7 @@ static struct platform_driver at91_can_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
+ .of_match_table = at91_can_dt_ids,
},
.id_table = at91_can_id_table,
};
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 6a0532176b6..d4a15e82bfc 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -412,7 +412,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
return 0;
}
-irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
+static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct bfin_can_priv *priv = netdev_priv(dev);
@@ -504,7 +504,7 @@ static int bfin_can_close(struct net_device *dev)
return 0;
}
-struct net_device *alloc_bfin_candev(void)
+static struct net_device *alloc_bfin_candev(void)
{
struct net_device *dev;
struct bfin_can_priv *priv;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index f32b9fc6a98..2b620c8aa13 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -1138,9 +1138,11 @@ static int mcp251x_can_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_PM
-static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+
+static int mcp251x_can_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
struct net_device *net = priv->net;
@@ -1170,8 +1172,9 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
return 0;
}
-static int mcp251x_can_resume(struct spi_device *spi)
+static int mcp251x_can_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
@@ -1191,9 +1194,13 @@ static int mcp251x_can_resume(struct spi_device *spi)
enable_irq(spi->irq);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
+ mcp251x_can_resume);
+#define MCP251X_PM_OPS (&mcp251x_can_pm_ops)
+
#else
-#define mcp251x_can_suspend NULL
-#define mcp251x_can_resume NULL
+#define MCP251X_PM_OPS NULL
#endif
static const struct spi_device_id mcp251x_id_table[] = {
@@ -1207,29 +1214,15 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static struct spi_driver mcp251x_can_driver = {
.driver = {
.name = DEVICE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .pm = MCP251X_PM_OPS,
},
.id_table = mcp251x_id_table,
.probe = mcp251x_can_probe,
.remove = mcp251x_can_remove,
- .suspend = mcp251x_can_suspend,
- .resume = mcp251x_can_resume,
};
-
-static int __init mcp251x_can_init(void)
-{
- return spi_register_driver(&mcp251x_can_driver);
-}
-
-static void __exit mcp251x_can_exit(void)
-{
- spi_unregister_driver(&mcp251x_can_driver);
-}
-
-module_init(mcp251x_can_init);
-module_exit(mcp251x_can_exit);
+module_spi_driver(mcp251x_can_driver);
MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
"Christian Pellegrin <chripell@evolware.org>");
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index a175d0be1ae..ee705771bd2 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev)
/* allocate a new skb for next time receive */
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
- if (!new_skb) {
- pr_notice("init: low on mem - packet dropped\n");
+ if (!new_skb)
goto init_error;
- }
+
skb_reserve(new_skb, NET_IP_ALIGN);
/* Invidate the data cache of skb->data range when it is write back
* cache. It will prevent overwritting the new data from DMA
@@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev)
new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
if (!new_skb) {
- netdev_notice(dev, "rx: low on mem - packet dropped\n");
dev->stats.rx_dropped++;
goto out;
}
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 0be2195e503..269295403fc 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,35 +1464,23 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->tx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(&dev->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error3;
}
- memset(greth->tx_bd_base, 0, 1024);
-
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
- 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL);
-
+ greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL | __GFP_ZERO);
if (!greth->rx_bd_base) {
- if (netif_msg_probe(greth))
- dev_err(greth->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM;
goto error4;
}
- memset(greth->rx_bd_base, 0, 1024);
-
/* Get MAC address from: module param, OF property or ID prom */
for (i = 0; i < 6; i++) {
if (macaddr[i] != 0)
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 6e722dc37db..65926a95657 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- printk ("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3789affbc0e..0866e762743 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev)
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
if (!skb) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 60e2b701afe..9793767996a 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
dev->stats.rx_packets++;
} else {
am_writeword (dev, hdraddr + 2, RMD_OWN);
- printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 98f4522fd17..c178eb4c816 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- netdev_warn(dev, "Memory squeeze, deferring packet\n");
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 84219df72f5..e8d0ef508f4 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
@@ -1149,9 +1147,7 @@ static struct net_device *atarilance_dev;
static int __init atarilance_module_init(void)
{
atarilance_dev = atarilance_probe(-1);
- if (IS_ERR(atarilance_dev))
- return PTR_ERR(atarilance_dev);
- return 0;
+ return PTR_RET(atarilance_dev);
}
static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index de774d41914..688aede742c 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev)
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
if (skb == NULL) {
- netdev_err(dev, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
continue;
}
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index baca0bd1b39..3d86ffeb4e1 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == 0) {
- printk("%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
*rds_ptr(rd, mblength, lp->type) = 0;
*rds_ptr(rd, rmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 9af3c307862..a51497c9d2a 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,9 +188,7 @@ static struct net_device *dev_mvme147_lance;
int __init init_module(void)
{
dev_mvme147_lance = mvme147lance_probe(-1);
- if (IS_ERR(dev_mvme147_lance))
- return PTR_ERR(dev_mvme147_lance);
- return 0;
+ return PTR_RET(dev_mvme147_lance);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 013b6510853..26fc0ce0faa 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
- return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
+ return PTR_RET(dev_ni65);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 797f847edf1..ed213072764 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
if (skb == NULL) {
- netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 74b3891b648..4375abe61da 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev )
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
- dev->name ));
-
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
@@ -943,9 +940,7 @@ static struct net_device *sun3lance_dev;
int __init init_module(void)
{
sun3lance_dev = sun3lance_probe(-1);
- if (IS_ERR(sun3lance_dev))
- return PTR_ERR(sun3lance_dev);
- return 0;
+ return PTR_RET(sun3lance_dev);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 6a40290d372..f47b780892e 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev)
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1377,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
dma_alloc_coherent(&op->dev,
sizeof(struct lance_init_block),
&lp->init_block_dvma, GFP_ATOMIC);
- if (!lp->init_block_mem) {
- printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
+ if (!lp->init_block_mem)
goto fail;
- }
+
lp->pio_buffer = 0;
lp->init_ring = lance_init_ring_dvma;
lp->rx = lance_rx_dvma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a206779c68c..4ce8ceb6220 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev)
/* Allocate the DMA ring buffers */
mp->tx_ring = dma_alloc_coherent(mp->device,
- N_TX_RING * MACE_BUFF_SIZE,
- &mp->tx_ring_phys, GFP_KERNEL);
- if (mp->tx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
+ N_TX_RING * MACE_BUFF_SIZE,
+ &mp->tx_ring_phys, GFP_KERNEL);
+ if (mp->tx_ring == NULL)
goto out1;
- }
mp->rx_ring = dma_alloc_coherent(mp->device,
- N_RX_RING * MACE_BUFF_SIZE,
- &mp->rx_ring_phys, GFP_KERNEL);
- if (mp->rx_ring == NULL) {
- printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
+ N_RX_RING * MACE_BUFF_SIZE,
+ &mp->rx_ring_phys, GFP_KERNEL);
+ if (mp->rx_ring == NULL)
goto out2;
- }
mace_dma_off(dev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index f73d5609439..7e0a822289c 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
- if (skb == NULL) {
- netdev_warn(netdev,
- "Memory squeeze, deferring packet\n");
+ if (skb == NULL)
goto skip_pkt;
- }
+
memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
skb_put(skb, packet_size);
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5b0d9931c72..9948fee28ae 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2774,7 +2774,7 @@ static int atl1_close(struct net_device *netdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int atl1_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1278b47022e..a046b6ff847 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
/* alloc new buffer */
skb = netdev_alloc_skb_ip_align(netdev, rx_size);
if (NULL == skb) {
- printk(KERN_WARNING
- "%s: Mem squeeze, deferring packet.\n",
- netdev->name);
/*
* Check that some rx space is free. If not,
* free one and mark stats->rx_dropped++.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7d81e059e81..0b3e23ec37f 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
- memset(p, 0, size);
priv->rx_desc_alloc_size = size;
priv->rx_desc_cpu = p;
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!p) {
- dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
- memset(p, 0, size);
priv->tx_desc_alloc_size = size;
priv->tx_desc_cpu = p;
@@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
struct mii_bus *bus;
const char *clk_name;
- unsigned int iomem_size;
int i, ret;
/* stop if shared driver failed, assume driver->probe will be
@@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- iomem_size = resource_size(res_mem);
- if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
- ret = -EBUSY;
- goto out;
- }
-
- priv->base = ioremap(res_mem->start, iomem_size);
+ priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
if (priv->base == NULL) {
ret = -ENOMEM;
- goto out_release_mem;
+ goto out;
}
+
dev->irq = priv->irq = res_irq->start;
priv->irq_rx = res_irq_rx->start;
priv->irq_tx = res_irq_tx->start;
@@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->mac_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(priv->mac_clk)) {
ret = PTR_ERR(priv->mac_clk);
- goto out_unmap;
+ goto out;
}
- clk_enable(priv->mac_clk);
+ clk_prepare_enable(priv->mac_clk);
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->phy_clk = NULL;
goto out_put_clk_mac;
}
- clk_enable(priv->phy_clk);
+ clk_prepare_enable(priv->phy_clk);
}
/* do minimal hardware init to be able to probe mii bus */
@@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
* if a slave is not present on hw */
bus->phy_mask = ~(1 << priv->phy_id);
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto out_free_mdio;
@@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
return 0;
out_unregister_mdio:
- if (priv->mii_bus) {
+ if (priv->mii_bus)
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
- }
out_free_mdio:
if (priv->mii_bus)
@@ -1807,19 +1798,13 @@ out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
out_put_clk_mac:
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
-
-out_unmap:
- iounmap(priv->base);
-
-out_release_mem:
- release_mem_region(res_mem->start, iomem_size);
out:
free_netdev(dev);
return ret;
@@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
- struct resource *res;
/* stop netdevice */
dev = platform_get_drvdata(pdev);
@@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
if (priv->has_phy) {
mdiobus_unregister(priv->mii_bus);
- kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
} else {
struct bcm63xx_enet_platform_data *pd;
@@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev)
bcm_enet_mdio_write_mii);
}
- /* release device resources */
- iounmap(priv->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
/* disable hw block clocks */
if (priv->phy_clk) {
- clk_disable(priv->phy_clk);
+ clk_disable_unprepare(priv->phy_clk);
clk_put(priv->phy_clk);
}
- clk_disable(priv->mac_clk);
+ clk_disable_unprepare(priv->mac_clk);
clk_put(priv->mac_clk);
platform_set_drvdata(pdev, NULL);
@@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = {
static int bcm_enet_shared_probe(struct platform_device *pdev)
{
struct resource *res;
- unsigned int iomem_size;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- iomem_size = resource_size(res);
- if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
- return -EBUSY;
-
- bcm_enet_shared_base = ioremap(res->start, iomem_size);
- if (!bcm_enet_shared_base) {
- release_mem_region(res->start, iomem_size);
+ bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!bcm_enet_shared_base)
return -ENOMEM;
- }
+
return 0;
}
static int bcm_enet_shared_remove(struct platform_device *pdev)
{
- struct resource *res;
-
- iounmap(bcm_enet_shared_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index da5f4397f87..eec0af45b85 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/mii.h>
+#include <linux/phy.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <bcm47xx_nvram.h>
@@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
/* Alloc skb */
slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!slot->skb) {
- bgmac_err(bgmac, "Allocation of skb failed!\n");
+ if (!slot->skb)
return -ENOMEM;
- }
/* Poison - if everything goes fine, hardware will overwrite it */
rx = (struct bgmac_rx_header *)slot->skb->data;
@@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
};
/**************************************************
+ * MII
+ **************************************************/
+
+static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ return bgmac_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ return bgmac_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+static int bgmac_mii_register(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus;
+ int i, err = 0;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "bgmac mii bus";
+ sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
+ bgmac->core->core_unit);
+ mii_bus->priv = bgmac;
+ mii_bus->read = bgmac_mii_read;
+ mii_bus->write = bgmac_mii_write;
+ mii_bus->parent = &bgmac->core->dev;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
+
+ mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+ if (!mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_free_bus;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ bgmac_err(bgmac, "Registration of mii bus failed\n");
+ goto err_free_irq;
+ }
+
+ bgmac->mii_bus = mii_bus;
+
+ return err;
+
+err_free_irq:
+ kfree(mii_bus->irq);
+err_free_bus:
+ mdiobus_free(mii_bus);
+ return err;
+}
+
+static void bgmac_mii_unregister(struct bgmac *bgmac)
+{
+ struct mii_bus *mii_bus = bgmac->mii_bus;
+
+ mdiobus_unregister(mii_bus);
+ kfree(mii_bus->irq);
+ mdiobus_free(mii_bus);
+}
+
+/**************************************************
* BCMA bus ops
**************************************************/
@@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ err = bgmac_mii_register(bgmac);
+ if (err) {
+ bgmac_err(bgmac, "Cannot register MDIO\n");
+ err = -ENOTSUPP;
+ goto err_dma_free;
+ }
+
err = register_netdev(bgmac->net_dev);
if (err) {
bgmac_err(bgmac, "Cannot register net device\n");
err = -ENOTSUPP;
- goto err_dma_free;
+ goto err_mii_unregister;
}
netif_carrier_off(net_dev);
@@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core)
return 0;
+err_mii_unregister:
+ bgmac_mii_unregister(bgmac);
err_dma_free:
bgmac_dma_free(bgmac);
@@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core)
netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
+ bgmac_mii_unregister(bgmac);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4ede614c81f..98d4b5fcc07 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -399,6 +399,7 @@ struct bgmac {
struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
struct net_device *net_dev;
struct napi_struct napi;
+ struct mii_bus *mii_bus;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2f0ba8f2fd6..e709296e3b8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
sizeof(struct statistics_block);
status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping, GFP_KERNEL);
+ &bp->status_blk_mapping,
+ GFP_KERNEL | __GFP_ZERO);
if (status_blk == NULL)
goto alloc_mem_err;
- memset(status_blk, 0, bp->status_stats_size);
-
bnapi = &bp->bnx2_napi[0];
bnapi->status_blk.msi = status_blk;
bnapi->hw_tx_cons_ptr =
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e4605a96508..c6303428f9e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t {
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
-#define BNX2X_NAPI_WEIGHT 128
struct napi_struct napi;
union host_hc_status_block status_blk;
/* chip independed shortcuts into sb structure */
@@ -613,9 +612,10 @@ struct bnx2x_fastpath {
* START_BD - describes packed
* START_BD(splitted) - includes unpaged data segment for GSO
* PARSING_BD - for TSO and CSUM data
+ * PARSING_BD2 - for encapsulation data
* Frag BDs - decribes pages for frags
*/
-#define BDS_PER_TX_PKT 3
+#define BDS_PER_TX_PKT 4
#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
/* max BDs per tx packet including next pages */
#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -730,18 +730,24 @@ struct bnx2x_fastpath {
#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
skb->csum_offset))
-#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
-#define XMIT_PLAIN 0
-#define XMIT_CSUM_V4 0x1
-#define XMIT_CSUM_V6 0x2
-#define XMIT_CSUM_TCP 0x4
-#define XMIT_GSO_V4 0x8
-#define XMIT_GSO_V6 0x10
+#define XMIT_PLAIN 0
+#define XMIT_CSUM_V4 (1 << 0)
+#define XMIT_CSUM_V6 (1 << 1)
+#define XMIT_CSUM_TCP (1 << 2)
+#define XMIT_GSO_V4 (1 << 3)
+#define XMIT_GSO_V6 (1 << 4)
+#define XMIT_CSUM_ENC_V4 (1 << 5)
+#define XMIT_CSUM_ENC_V6 (1 << 6)
+#define XMIT_GSO_ENC_V4 (1 << 7)
+#define XMIT_GSO_ENC_V6 (1 << 8)
-#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6)
-#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6)
+#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
+#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
+#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
+#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
/* stuff added to make the code fit 80Col */
#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -1215,14 +1221,16 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_HYPERVISOR_VLAN,
};
struct bnx2x_prev_path_list {
+ struct list_head list;
u8 bus;
u8 slot;
u8 path;
- struct list_head list;
+ u8 aer;
u8 undi;
};
@@ -1269,6 +1277,8 @@ struct bnx2x {
#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
#ifdef CONFIG_BNX2X_SRIOV
+ /* protects vf2pf mailbox from simultaneous access */
+ struct mutex vf2pf_mutex;
/* vf pf channel mailbox contains request and response buffers */
struct bnx2x_vf_mbx_msg *vf2pf_mbox;
dma_addr_t vf2pf_mbox_mapping;
@@ -1281,6 +1291,8 @@ struct bnx2x {
dma_addr_t pf2vf_bulletin_mapping;
struct pf_vf_bulletin_content old_bulletin;
+
+ u16 requested_nr_virtfn;
#endif /* CONFIG_BNX2X_SRIOV */
struct net_device *dev;
@@ -1944,12 +1956,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x) \
- memset(x, 0, size); \
- } while (0)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -2286,7 +2295,7 @@ static const u32 dmae_reg_go_c[] = {
DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
#define BNX2X_MF_SD_PROTOCOL(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4046f97378c..352e58ede4d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
* Compute number of aggregated segments, and gso_type.
*/
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
- u16 len_on_bd, unsigned int pkt_len)
+ u16 len_on_bd, unsigned int pkt_len,
+ u16 num_of_coalesced_segs)
{
/* TPA aggregation won't have either IP options or TCP options
* other than timestamp or IPv6 extension headers.
@@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
- NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len,
- skb_shinfo(skb)->gso_size);
+ NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
}
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* This is needed in order to enable forwarding support */
if (frag_size)
bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
- le16_to_cpu(cqe->pkt_len));
+ le16_to_cpu(cqe->pkt_len),
+ le16_to_cpu(cqe->num_of_coalesced_segs));
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@ -2009,7 +2010,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
* Cleans the object that have internal lists without sending
* ramrods. Should be run when interrutps are disabled.
*/
-static void bnx2x_squeeze_objects(struct bnx2x *bp)
+void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@ -2774,7 +2775,7 @@ load_error0:
#endif /* ! BNX2X_STOP_ON_ERROR */
}
-static int bnx2x_drain_tx_queues(struct bnx2x *bp)
+int bnx2x_drain_tx_queues(struct bnx2x *bp)
{
u8 rc = 0, cos, i;
@@ -3086,11 +3087,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
*/
-static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata,
- struct sw_tx_bd *tx_buf,
- struct eth_tx_start_bd **tx_bd, u16 hlen,
- u16 bd_prod, int nbd)
+static u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+ struct sw_tx_bd *tx_buf,
+ struct eth_tx_start_bd **tx_bd, u16 hlen,
+ u16 bd_prod)
{
struct eth_tx_start_bd *h_tx_bd = *tx_bd;
struct eth_tx_bd *d_tx_bd;
@@ -3098,11 +3099,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
int old_len = le16_to_cpu(h_tx_bd->nbytes);
/* first fix first BD */
- h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
- h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -3131,7 +3131,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
-static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
{
__sum16 tsum = (__force __sum16) csum;
@@ -3146,30 +3146,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
return bswab16(tsum);
}
-static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
{
u32 rc;
+ __u8 prot = 0;
+ __be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- rc = XMIT_PLAIN;
+ return XMIT_PLAIN;
- else {
- if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
- rc = XMIT_CSUM_V6;
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- rc |= XMIT_CSUM_TCP;
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IPV6)) {
+ rc = XMIT_CSUM_V6;
+ prot = ipv6_hdr(skb)->nexthdr;
+ } else {
+ rc = XMIT_CSUM_V4;
+ prot = ip_hdr(skb)->protocol;
+ }
+ if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ rc |= XMIT_CSUM_ENC_V6;
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
} else {
- rc = XMIT_CSUM_V4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ rc |= XMIT_CSUM_ENC_V4;
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
}
}
+ if (prot == IPPROTO_TCP)
+ rc |= XMIT_CSUM_TCP;
- if (skb_is_gso_v6(skb))
- rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
- else if (skb_is_gso(skb))
- rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+ if (skb_is_gso_v6(skb)) {
+ rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V6;
+ } else if (skb_is_gso(skb)) {
+ rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
+ if (rc & XMIT_CSUM_ENC)
+ rc |= XMIT_GSO_ENC_V4;
+ }
return rc;
}
@@ -3254,14 +3271,23 @@ exit_lbl:
}
#endif
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+ u32 xmit_type)
{
+ struct ipv6hdr *ipv6;
+
*parsing_data |= (skb_shinfo(skb)->gso_size <<
ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
ETH_TX_PARSE_BD_E2_LSO_MSS;
- if ((xmit_type & XMIT_GSO_V6) &&
- (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+
+ if (xmit_type & XMIT_GSO_ENC_V6)
+ ipv6 = inner_ipv6_hdr(skb);
+ else if (xmit_type & XMIT_GSO_V6)
+ ipv6 = ipv6_hdr(skb);
+ else
+ ipv6 = NULL;
+
+ if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
}
@@ -3272,13 +3298,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
* @pbd: parse BD
* @xmit_type: xmit flags
*/
-static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static void bnx2x_set_pbd_gso(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
- pbd->tcp_flags = pbd_tcp_flags(skb);
+ pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
if (xmit_type & XMIT_GSO_V4) {
pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@ -3298,6 +3324,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
}
/**
+ * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
+ *
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
+ *
+ * 57712/578xx related, when skb has encapsulation
+ */
+static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
+{
+ *parsing_data |=
+ ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
+
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+ return skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data;
+ }
+
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_inner_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
+}
+
+/**
* bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
*
* @bp: driver handle
@@ -3305,15 +3365,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
* @parsing_data: data to be updated
* @xmit_type: xmit flags
*
- * 57712 related
+ * 57712/578xx related
*/
-static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
- u32 *parsing_data, u32 xmit_type)
+static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+ u32 *parsing_data, u32 xmit_type)
{
*parsing_data |=
((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
if (xmit_type & XMIT_CSUM_TCP) {
*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3328,17 +3388,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
}
-static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+/* set FW indication according to inner or outer protocols if tunneled */
+static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_start_bd *tx_start_bd,
+ u32 xmit_type)
{
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
- if (xmit_type & XMIT_CSUM_V4)
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IP_CSUM;
- else
- tx_start_bd->bd_flags.as_bitfield |=
- ETH_TX_BD_FLAGS_IPV6;
+ if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
+ tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
if (!(xmit_type & XMIT_CSUM_TCP))
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3352,9 +3410,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
* @pbd: parse BD to be updated
* @xmit_type: xmit flags
*/
-static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
- struct eth_tx_parse_bd_e1x *pbd,
- u32 xmit_type)
+static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+ struct eth_tx_parse_bd_e1x *pbd,
+ u32 xmit_type)
{
u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
@@ -3400,6 +3458,70 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
return hlen;
}
+static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
+ struct eth_tx_parse_bd_e2 *pbd_e2,
+ struct eth_tx_parse_2nd_bd *pbd2,
+ u16 *global_data,
+ u32 xmit_type)
+{
+ u16 hlen_w = 0;
+ u8 outerip_off, outerip_len = 0;
+ /* from outer IP to transport */
+ hlen_w = (skb_inner_transport_header(skb) -
+ skb_network_header(skb)) >> 1;
+
+ /* transport len */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen_w += inner_tcp_hdrlen(skb) >> 1;
+ else
+ hlen_w += sizeof(struct udphdr) >> 1;
+
+ pbd2->fw_ip_hdr_to_payload_w = hlen_w;
+
+ if (xmit_type & XMIT_CSUM_ENC_V4) {
+ struct iphdr *iph = ip_hdr(skb);
+ pbd2->fw_ip_csum_wo_len_flags_frag =
+ bswab16(csum_fold((~iph->check) -
+ iph->tot_len - iph->frag_off));
+ } else {
+ pbd2->fw_ip_hdr_to_payload_w =
+ hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
+ }
+
+ pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
+
+ pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
+
+ if (xmit_type & XMIT_GSO_V4) {
+ pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
+
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_tcpudp_magic(
+ inner_ip_hdr(skb)->saddr,
+ inner_ip_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+
+ outerip_len = ip_hdr(skb)->ihl << 1;
+ } else {
+ pbd_e2->data.tunnel_data.pseudo_csum =
+ bswab16(~csum_ipv6_magic(
+ &inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0));
+ }
+
+ outerip_off = (skb_network_header(skb) - skb->data) >> 1;
+
+ *global_data |=
+ outerip_off |
+ (!!(xmit_type & XMIT_CSUM_V6) <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
+ (outerip_len <<
+ ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
+ ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+ ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
+}
+
/* called with netif_tx_lock
* bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
* netif_wake_queue()
@@ -3415,6 +3537,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+ struct eth_tx_parse_2nd_bd *pbd2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
int nbd, txq_index;
@@ -3482,7 +3605,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mac_type = MULTICAST_ADDRESS;
}
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* First, check if we need to linearize the skb (due to FW
restrictions). No need to check fragmentation if page size > 8K
(there will be no violation to FW restrictions) */
@@ -3530,12 +3653,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
first_bd = tx_start_bd;
tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
- SET_FLAG(tx_start_bd->general_data,
- ETH_TX_START_BD_PARSE_NBDS,
- 0);
- /* header nbd */
- SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+ /* header nbd: indirectly zero other flags! */
+ tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
/* remember the first BD of the packet */
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3555,19 +3675,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
-#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp)) {
-#endif
+ if (IS_VF(bp))
tx_start_bd->vlan_or_ethertype =
cpu_to_le16(ntohs(eth->h_proto));
-#ifndef BNX2X_STOP_ON_ERROR
- } else {
+ else
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
- }
-#endif
}
+ nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+
/* turn on parsing and get a BD */
bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
@@ -3577,23 +3694,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!CHIP_IS_E1x(bp)) {
pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
- /* Set PBD in checksum offload case */
- if (xmit_type & XMIT_CSUM)
+
+ if (xmit_type & XMIT_CSUM_ENC) {
+ u16 global_data = 0;
+
+ /* Set PBD in enc checksum offload case */
+ hlen = bnx2x_set_pbd_csum_enc(bp, skb,
+ &pbd_e2_parsing_data,
+ xmit_type);
+
+ /* turn on 2nd parsing and get a BD */
+ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+ pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
+
+ memset(pbd2, 0, sizeof(*pbd2));
+
+ pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
+ (skb_inner_network_header(skb) -
+ skb->data) >> 1;
+
+ if (xmit_type & XMIT_GSO_ENC)
+ bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
+ &global_data,
+ xmit_type);
+
+ pbd2->global_data = cpu_to_le16(global_data);
+
+ /* add addition parse BD indication to start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_PARSE_NBDS, 1);
+ /* set encapsulation flag in start BD */
+ SET_FLAG(tx_start_bd->general_data,
+ ETH_TX_START_BD_TUNNEL_EXIST, 1);
+ nbd++;
+ } else if (xmit_type & XMIT_CSUM) {
+ /* Set PBD in checksum offload case w/o encapsulation */
hlen = bnx2x_set_pbd_csum_e2(bp, skb,
&pbd_e2_parsing_data,
xmit_type);
+ }
- if (IS_MF_SI(bp) || IS_VF(bp)) {
- /* fill in the MAC addresses in the PBD - for local
- * switching
- */
- bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
- &pbd_e2->src_mac_addr_mid,
- &pbd_e2->src_mac_addr_lo,
+ /* Add the macs to the parsing BD this is a vf */
+ if (IS_VF(bp)) {
+ /* override GRE parameters in BD */
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
+ &pbd_e2->data.mac_addr.src_mid,
+ &pbd_e2->data.mac_addr.src_lo,
eth->h_source);
- bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
- &pbd_e2->dst_mac_addr_mid,
- &pbd_e2->dst_mac_addr_lo,
+
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
}
@@ -3615,14 +3767,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Setup the data pointer of the first BD of the packet */
tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
- nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
DP(NETIF_MSG_TX_QUEUED,
- "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
+ "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
- le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+ le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
le16_to_cpu(tx_start_bd->vlan_or_ethertype));
@@ -3635,10 +3786,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
- if (unlikely(skb_headlen(skb) > hlen))
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ nbd++;
bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
&tx_start_bd, hlen,
- bd_prod, ++nbd);
+ bd_prod);
+ }
if (!CHIP_IS_E1x(bp))
bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
xmit_type);
@@ -3728,9 +3881,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e2)
DP(NETIF_MSG_TX_QUEUED,
"PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
- pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
- pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
- pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+ pbd_e2,
+ pbd_e2->data.mac_addr.dst_hi,
+ pbd_e2->data.mac_addr.dst_mid,
+ pbd_e2->data.mac_addr.dst_lo,
+ pbd_e2->data.mac_addr.src_hi,
+ pbd_e2->data.mac_addr.src_mid,
+ pbd_e2->data.mac_addr.src_lo,
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index aee7671ff4c..54e1b149acb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -50,13 +50,13 @@ extern int int_mode;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0, size); \
- } while (0)
+#define BNX2X_PCI_ALLOC(x, y, size) \
+do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
+ GFP_KERNEL | __GFP_ZERO); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+} while (0)
#define BNX2X_ALLOC(x, size) \
do { \
@@ -496,7 +496,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+int bnx2x_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi);
int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
+int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@ -834,7 +837,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -844,7 +847,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, BNX2X_NAPI_WEIGHT);
+ bnx2x_poll, NAPI_POLL_WEIGHT);
}
static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@ -970,6 +973,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
+ start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
+
return bnx2x_func_state_change(bp, &func_params);
}
@@ -1396,4 +1402,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
*
*/
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
+
+int bnx2x_drain_tx_queues(struct bnx2x *bp);
+void bnx2x_squeeze_objects(struct bnx2x *bp);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index edfa67adf2f..129d6b21317 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1393,10 +1393,9 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
u8 *data)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc = 0, phy_idx;
+ int rc = -EINVAL, phy_idx;
u8 *user_data = data;
- int remaining_len = ee->len, xfer_size;
- unsigned int page_off = ee->offset;
+ unsigned int start_addr = ee->offset, xfer_size = 0;
if (!netif_running(dev)) {
DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1405,21 +1404,52 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
}
phy_idx = bnx2x_get_cur_phy_idx(bp);
- bnx2x_acquire_phy_lock(bp);
- while (!rc && remaining_len > 0) {
- xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
- SFP_EEPROM_PAGE_SIZE : remaining_len;
+
+ /* Read A0 section */
+ if (start_addr < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
+ xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
+ else
+ xfer_size = ee->len;
+ bnx2x_acquire_phy_lock(bp);
rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
&bp->link_params,
- page_off,
+ I2C_DEV_ADDR_A0,
+ start_addr,
xfer_size,
user_data);
- remaining_len -= xfer_size;
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
+
+ return -EINVAL;
+ }
user_data += xfer_size;
- page_off += xfer_size;
+ start_addr += xfer_size;
}
- bnx2x_release_phy_lock(bp);
+ /* Read A2 section */
+ if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
+ (start_addr < ETH_MODULE_SFF_8472_LEN)) {
+ xfer_size = ee->len - xfer_size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
+ xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A2,
+ start_addr,
+ xfer_size,
+ user_data);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
+ return -EINVAL;
+ }
+ }
return rc;
}
@@ -1427,24 +1457,50 @@ static int bnx2x_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
struct bnx2x *bp = netdev_priv(dev);
- int phy_idx;
+ int phy_idx, rc;
+ u8 sff8472_comp, diag_type;
+
if (!netif_running(dev)) {
- DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"cannot access eeprom when the interface is down\n");
return -EAGAIN;
}
-
phy_idx = bnx2x_get_cur_phy_idx(bp);
- switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFPP_10G_FIBER:
- case ETH_PHY_SFP_1G_FIBER:
- case ETH_PHY_DA_TWINAX:
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_SFF_8472_COMP_ADDR,
+ SFP_EEPROM_SFF_8472_COMP_SIZE,
+ &sff8472_comp);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
+ return -EINVAL;
+ }
+
+ bnx2x_acquire_phy_lock(bp);
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ I2C_DEV_ADDR_A0,
+ SFP_EEPROM_DIAG_TYPE_ADDR,
+ SFP_EEPROM_DIAG_TYPE_SIZE,
+ &diag_type);
+ bnx2x_release_phy_lock(bp);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
+ return -EINVAL;
+ }
+
+ if (!sff8472_comp ||
+ (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
modinfo->type = ETH_MODULE_SFF_8079;
modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- return 0;
- default:
- return -EOPNOTSUPP;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
+ return 0;
}
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
@@ -3232,7 +3288,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
};
-void bnx2x_set_ethtool_ops(struct net_device *netdev)
+static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
+ .get_settings = bnx2x_get_settings,
+ .set_settings = bnx2x_set_settings,
+ .get_drvinfo = bnx2x_get_drvinfo,
+ .get_msglevel = bnx2x_get_msglevel,
+ .set_msglevel = bnx2x_set_msglevel,
+ .get_link = bnx2x_get_link,
+ .get_coalesce = bnx2x_get_coalesce,
+ .get_ringparam = bnx2x_get_ringparam,
+ .set_ringparam = bnx2x_set_ringparam,
+ .get_sset_count = bnx2x_get_sset_count,
+ .get_strings = bnx2x_get_strings,
+ .get_ethtool_stats = bnx2x_get_ethtool_stats,
+ .get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
+ .get_rxfh_indir = bnx2x_get_rxfh_indir,
+ .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+};
+
+void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ if (IS_PF(bp))
+ SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+ else /* vf */
+ SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index e5f808377c9..40f22c6794c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -30,31 +30,31 @@
* IRO[138].m2) + ((sbId) * IRO[138].m3))
#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[316].base + ((pfId) * IRO[316].m1))
-#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[317].base + ((pfId) * IRO[317].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[318].base + ((pfId) * IRO[318].m1))
#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
- (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+ (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+ (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
- (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+ (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
- (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+ (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
- (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+ (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
- (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
+ (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
- (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+ (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[315].base + ((pfId) * IRO[315].m1))
+ (IRO[316].base + ((pfId) * IRO[316].m1))
#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[307].base + ((pfId) * IRO[307].m1))
+ (IRO[308].base + ((pfId) * IRO[308].m1))
#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[306].base + ((pfId) * IRO[306].m1))
+ (IRO[307].base + ((pfId) * IRO[307].m1))
#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[305].base + ((pfId) * IRO[305].m1))
+ (IRO[306].base + ((pfId) * IRO[306].m1))
#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
(IRO[151].base + ((funcId) * IRO[151].m1))
#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -114,7 +114,7 @@
#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[268].base + ((pfId) * IRO[268].m1))
#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
- (IRO[277].base + ((pfId) * IRO[277].m1))
+ (IRO[278].base + ((pfId) * IRO[278].m1))
#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
(IRO[264].base + ((pfId) * IRO[264].m1))
#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +136,32 @@
#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
(IRO[176].base + ((assertListEntry) * IRO[176].m1))
-#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
- (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
- IRO[205].m2))
#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
(IRO[183].base + ((portId) * IRO[183].m1))
#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
- (IRO[318].base + ((pfId) * IRO[318].m1))
+ (IRO[319].base + ((pfId) * IRO[319].m1))
#define USTORM_FUNC_EN_OFFSET(funcId) \
(IRO[178].base + ((funcId) * IRO[178].m1))
#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
- (IRO[282].base + ((pfId) * IRO[282].m1))
-#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
(IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+ (IRO[284].base + ((pfId) * IRO[284].m1))
#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
- (IRO[287].base + ((pfId) * IRO[287].m1))
+ (IRO[288].base + ((pfId) * IRO[288].m1))
#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
- (IRO[284].base + ((pfId) * IRO[284].m1))
+ (IRO[285].base + ((pfId) * IRO[285].m1))
#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[280].base + ((pfId) * IRO[280].m1))
+ (IRO[281].base + ((pfId) * IRO[281].m1))
#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[279].base + ((pfId) * IRO[279].m1))
+ (IRO[280].base + ((pfId) * IRO[280].m1))
#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[278].base + ((pfId) * IRO[278].m1))
+ (IRO[279].base + ((pfId) * IRO[279].m1))
#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[281].base + ((pfId) * IRO[281].m1))
+ (IRO[282].base + ((pfId) * IRO[282].m1))
#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
- (IRO[285].base + ((pfId) * IRO[285].m1))
-#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+ (IRO[287].base + ((pfId) * IRO[287].m1))
#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
(IRO[182].base + ((pfId) * IRO[182].m1))
#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +187,39 @@
#define XSTORM_FUNC_EN_OFFSET(funcId) \
(IRO[47].base + ((funcId) * IRO[47].m1))
#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
- (IRO[295].base + ((pfId) * IRO[295].m1))
+ (IRO[296].base + ((pfId) * IRO[296].m1))
#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
- (IRO[298].base + ((pfId) * IRO[298].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[299].base + ((pfId) * IRO[299].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
(IRO[300].base + ((pfId) * IRO[300].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
(IRO[301].base + ((pfId) * IRO[301].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
(IRO[302].base + ((pfId) * IRO[302].m1))
-#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
(IRO[303].base + ((pfId) * IRO[303].m1))
-#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
(IRO[304].base + ((pfId) * IRO[304].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+ (IRO[305].base + ((pfId) * IRO[305].m1))
#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
- (IRO[294].base + ((pfId) * IRO[294].m1))
+ (IRO[295].base + ((pfId) * IRO[295].m1))
#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
- (IRO[293].base + ((pfId) * IRO[293].m1))
+ (IRO[294].base + ((pfId) * IRO[294].m1))
#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
- (IRO[292].base + ((pfId) * IRO[292].m1))
+ (IRO[293].base + ((pfId) * IRO[293].m1))
#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
- (IRO[297].base + ((pfId) * IRO[297].m1))
+ (IRO[298].base + ((pfId) * IRO[298].m1))
#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
- (IRO[296].base + ((pfId) * IRO[296].m1))
+ (IRO[297].base + ((pfId) * IRO[297].m1))
#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
- (IRO[291].base + ((pfId) * IRO[291].m1))
+ (IRO[292].base + ((pfId) * IRO[292].m1))
#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
- (IRO[290].base + ((pfId) * IRO[290].m1))
+ (IRO[291].base + ((pfId) * IRO[291].m1))
#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
- (IRO[289].base + ((pfId) * IRO[289].m1))
+ (IRO[290].base + ((pfId) * IRO[290].m1))
#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
- (IRO[288].base + ((pfId) * IRO[288].m1))
+ (IRO[289].base + ((pfId) * IRO[289].m1))
#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
(IRO[44].base + ((pfId) * IRO[44].m1))
#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 037860ecc34..12f00a40cdf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -114,6 +114,10 @@ struct license_key {
#define EPIO_CFG_EPIO30 0x0000001f
#define EPIO_CFG_EPIO31 0x00000020
+struct mac_addr {
+ u32 upper;
+ u32 lower;
+};
struct shared_hw_cfg { /* NVRAM Offset */
/* Up to 16 bytes of NULL-terminated string */
@@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
#define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
- u32 reserved0[6]; /* 0x178 */
+ /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
+ * LOM recommended and tested value is 0xBEB2. Using a different
+ * value means using a value not tested by BRCM
+ */
+ u32 sfi_tap_values; /* 0x178 */
+ #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
+ #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
+
+ /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
+ * value is 0x2. LOM recommended and tested value is 0x2. Using a
+ * different value means using a value not tested by BRCM
+ */
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
+ #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
+
+ u32 reserved0[5]; /* 0x17c */
u32 aeu_int_mask; /* 0x190 */
@@ -2821,8 +2840,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 2
-#define BCM_5710_FW_ENGINEERING_VERSION 0
+#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3513,11 +3532,14 @@ struct client_init_tx_data {
#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
-#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
-#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
u8 default_vlan_flg;
u8 force_default_pri_flg;
- __le32 reserved3;
+ u8 tunnel_lso_inc_ip_id;
+ u8 refuse_outband_vlan_flg;
+ u8 tunnel_non_lso_pcsum_location;
+ u8 reserved1;
};
/*
@@ -3551,6 +3573,11 @@ struct client_update_ramrod_data {
__le16 silent_vlan_mask;
u8 silent_vlan_removal_flg;
u8 silent_vlan_change_flg;
+ u8 refuse_outband_vlan_flg;
+ u8 refuse_outband_vlan_change_flg;
+ u8 tx_switching_flg;
+ u8 tx_switching_change_flg;
+ __le32 reserved1;
__le32 echo;
};
@@ -3620,7 +3647,8 @@ struct eth_classify_header {
*/
struct eth_classify_mac_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd {
*/
struct eth_classify_pair_cmd {
struct eth_classify_cmd_header header;
- __le32 reserved0;
+ __le16 reserved0;
+ __le16 inner_mac;
__le16 mac_lsb;
__le16 mac_mid;
__le16 mac_msb;
@@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data {
/*
- * Command for setting multicast classification for a client
+ * destination and source mac address.
+ */
+struct eth_mac_addresses {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_lo;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 src_lo;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 src_hi;
+ __le16 src_mid;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 src_mid;
+ __le16 src_hi;
+#endif
+};
+
+/* tunneling related data */
+struct eth_tunnel_data {
+#if defined(__BIG_ENDIAN)
+ __le16 dst_mid;
+ __le16 dst_lo;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_lo;
+ __le16 dst_mid;
+#endif
+#if defined(__BIG_ENDIAN)
+ __le16 reserved0;
+ __le16 dst_hi;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 dst_hi;
+ __le16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved1;
+ u8 ip_hdr_start_inner_w;
+ __le16 pseudo_csum;
+#elif defined(__LITTLE_ENDIAN)
+ __le16 pseudo_csum;
+ u8 ip_hdr_start_inner_w;
+ u8 reserved1;
+#endif
+};
+
+/* union for mac addresses and for tunneling data.
+ * considered as tunneling data only if (tunnel_exist == 1).
*/
+union eth_mac_addr_or_tunnel_data {
+ struct eth_mac_addresses mac_addr;
+ struct eth_tunnel_data tunnel_data;
+};
+
+/*Command for setting multicast classification for a client */
struct eth_multicast_rules_cmd {
u8 cmd_general_data;
#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd {
struct regpair reserved3;
};
-
/*
* parameters for multicast classification ramrod
*/
@@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data {
struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
};
-
/*
* Place holder for ramrods protocol specific data
*/
@@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data {
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
u8 rss_result_mask;
u8 rss_mode;
- __le32 __reserved2;
+ __le16 udp_4tuple_dst_port_mask;
+ __le16 udp_4tuple_dst_port_value;
u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
__le32 rss_key[T_ETH_RSS_KEY];
__le32 echo;
@@ -4115,6 +4205,23 @@ enum eth_tpa_update_command {
MAX_ETH_TPA_UPDATE_COMMAND
};
+/* In case of LSO over IPv4 tunnel, whether to increment
+ * IP ID on external IP header or internal IP header
+ */
+enum eth_tunnel_lso_inc_ip_id {
+ EXT_HEADER,
+ INT_HEADER,
+ MAX_ETH_TUNNEL_LSO_INC_IP_ID
+};
+
+/* In case tunnel exist and L4 checksum offload,
+ * the pseudo checksum location, on packet or on BD.
+ */
+enum eth_tunnel_non_lso_pcsum_location {
+ PCSUM_ON_PKT,
+ PCSUM_ON_BD,
+ MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
+};
/*
* Tx regular BD structure
@@ -4166,8 +4273,8 @@ struct eth_tx_start_bd {
#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
-#define ETH_TX_START_BD_RESREVED (0x1<<7)
-#define ETH_TX_START_BD_RESREVED_SHIFT 7
+#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
+#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
};
/*
@@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x {
* Tx parsing BD structure for ETH E2
*/
struct eth_tx_parse_bd_e2 {
- __le16 dst_mac_addr_lo;
- __le16 dst_mac_addr_mid;
- __le16 dst_mac_addr_hi;
- __le16 src_mac_addr_lo;
- __le16 src_mac_addr_mid;
- __le16 src_mac_addr_hi;
+ union eth_mac_addr_or_tunnel_data data;
__le32 parsing_data;
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0)
-#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
+#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 {
};
/*
- * The last BD in the BD memory will hold a pointer to the next BD memory
+ * Tx 2nd parsing BD structure for ETH packet
*/
+struct eth_tx_parse_2nd_bd {
+ __le16 global_data;
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
+#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
+#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
+#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
+ __le16 reserved1;
+ u8 tcp_flags;
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
+ u8 reserved2;
+ u8 tunnel_udp_hdr_start_w;
+ u8 fw_ip_hdr_to_payload_w;
+ __le16 fw_ip_csum_wo_len_flags_frag;
+ __le16 hw_ip_id;
+ __le32 tcp_send_seq;
+};
+
+/* The last BD in the BD memory will hold a pointer to the next BD memory */
struct eth_tx_next_bd {
__le32 addr_lo;
__le32 addr_hi;
@@ -4252,6 +4397,7 @@ union eth_tx_bd_types {
struct eth_tx_bd reg_bd;
struct eth_tx_parse_bd_e1x parse_bd_e1x;
struct eth_tx_parse_bd_e2 parse_bd_e2;
+ struct eth_tx_parse_2nd_bd parse_2nd_bd;
struct eth_tx_next_bd next_bd;
};
@@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
+ RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
MAX_COMMON_SPQE_CMD_ID
};
-
/*
* Per-protocol connection types
*/
@@ -4863,7 +5009,7 @@ struct vf_flr_event_data {
*/
struct malicious_vf_event_data {
u8 vf_id;
- u8 reserved0;
+ u8 err_id;
u16 reserved1;
u32 reserved2;
u32 reserved3;
@@ -4969,10 +5115,10 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
EVENT_RING_OPCODE_MULTICAST_RULES,
+ EVENT_RING_OPCODE_SET_TIMESYNC,
MAX_EVENT_RING_OPCODE
};
-
/*
* Modes for fairness algorithm
*/
@@ -5010,14 +5156,18 @@ struct flow_control_configuration {
*/
struct function_start_data {
u8 function_mode;
- u8 reserved;
+ u8 allow_npar_tx_switching;
__le16 sd_vlan_tag;
__le16 vif_id;
u8 path_id;
u8 network_cos_mode;
+ u8 dmae_cmd_id;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ __le16 reserved1[2];
};
-
struct function_update_data {
u8 vif_id_change_flg;
u8 afex_default_vlan_change_flg;
@@ -5027,14 +5177,19 @@ struct function_update_data {
__le16 afex_default_vlan;
u8 allowed_priorities;
u8 network_cos_mode;
+ u8 lb_mode_en_change_flg;
u8 lb_mode_en;
u8 tx_switch_suspend_change_flg;
u8 tx_switch_suspend;
u8 echo;
- __le16 reserved1;
+ u8 reserved1;
+ u8 update_gre_cfg_flg;
+ u8 gre_tunnel_mode;
+ u8 gre_tunnel_rss;
+ u8 nvgre_clss_en;
+ u32 reserved3;
};
-
/*
* FW version stored in the Xstorm RAM
*/
@@ -5061,6 +5216,22 @@ struct fw_version {
#define __FW_VERSION_RESERVED_SHIFT 4
};
+/* GRE RSS Mode */
+enum gre_rss_mode {
+ GRE_OUTER_HEADERS_RSS,
+ GRE_INNER_HEADERS_RSS,
+ NVGRE_KEY_ENTROPY_RSS,
+ MAX_GRE_RSS_MODE
+};
+
+/* GRE Tunnel Mode */
+enum gre_tunnel_type {
+ NO_GRE_TUNNEL,
+ NVGRE_TUNNEL,
+ L2GRE_TUNNEL,
+ IPGRE_TUNNEL,
+ MAX_GRE_TUNNEL_TYPE
+};
/*
* Dynamic Host-Coalescing - Driver(host) counters
@@ -5224,6 +5395,26 @@ enum ip_ver {
MAX_IP_VER
};
+/*
+ * Malicious VF error ID
+ */
+enum malicious_vf_error_id {
+ VF_PF_CHANNEL_NOT_READY,
+ ETH_ILLEGAL_BD_LENGTHS,
+ ETH_PACKET_TOO_SHORT,
+ ETH_PAYLOAD_TOO_BIG,
+ ETH_ILLEGAL_ETH_TYPE,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_TOO_MANY_BDS,
+ ETH_ZERO_HDR_NBDS,
+ ETH_START_BD_NOT_SET,
+ ETH_ILLEGAL_PARSE_NBDS,
+ ETH_IPV6_AND_CHECKSUM,
+ ETH_VLAN_FLG_INCORRECT,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_TUNNEL_NOT_SUPPORTED,
+ MAX_MALICIOUS_VF_ERROR_ID
+};
/*
* Multi-function modes
@@ -5368,7 +5559,6 @@ struct protocol_common_spe {
union protocol_common_specific_data data;
};
-
/*
* The send queue element
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 77ebae0ac64..6cc6c6374a9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,6 +27,10 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
+typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
+ struct link_params *params,
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8);
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -152,6 +156,7 @@
#define SFP_EEPROM_CON_TYPE_ADDR 0x2
#define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
#define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
+ #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -3127,11 +3132,6 @@ static int bnx2x_bsc_read(struct link_params *params,
int rc = 0;
struct bnx2x *bp = params->bp;
- if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
- DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
- return -EINVAL;
- }
-
if (xfer_cnt > 16) {
DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
xfer_cnt);
@@ -3629,6 +3629,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
* init configuration, and set/clear SGMII flag. Internal
* phy init is done purely in phy_init stage.
*/
+#define WC_TX_DRIVER(post2, idriver, ipre) \
+ ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
+ (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
+ (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
+
+#define WC_TX_FIR(post, main, pre) \
+ ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
+ (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
+ (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
+
static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -3728,7 +3738,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
+ u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
an_adv |= (1<<5);
/* Enable CL37 1G Parallel Detect */
@@ -3753,20 +3763,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Set Transmit PMD settings */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
/* Configure the next lane if dual mode */
if (phy->flags & FLAGS_WC_DUAL_MODE)
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
- ((0x02 <<
- MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x06 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x09 <<
- MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ WC_TX_DRIVER(0x02, 0x06, 0x09));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
0x03f0);
@@ -3909,6 +3912,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
+ u32 cfg_tap_val, tx_drv_brdct, tx_equal;
+
/* Hold rxSeqStart */
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3957,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
if (is_xfi) {
misc1_val |= 0x5;
- tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
-
+ tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
+ tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
} else {
+ cfg_tap_val = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].
+ sfi_tap_values));
+
+ tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
+
+ tx_drv_brdct = (cfg_tap_val &
+ PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
+ PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
+
misc1_val |= 0x9;
- tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
- tx_driver_val =
- ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+ /* TAP values are controlled by nvram, if value there isn't 0 */
+ if (tx_equal)
+ tap_val = (u16)tx_equal;
+ else
+ tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
+
+ if (tx_drv_brdct)
+ tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
+ 0x06);
+ else
+ tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
}
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4120,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
/* Set Transmit PMD settings */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX_FIR_TAP,
- ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
- (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
- (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
- MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+ (WC_TX_FIR(0x12, 0x2d, 0x00) |
+ MDIO_WC_REG_TX_FIR_TAP_ENABLE));
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
- ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
- (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+ MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+ WC_TX_DRIVER(0x02, 0x02, 0x02));
}
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -4750,8 +4761,8 @@ void bnx2x_link_status_update(struct link_params *params,
port_mb[port].link_status));
/* Force link UP in non LOOPBACK_EXT loopback mode(s) */
- if (bp->link_params.loopback_mode != LOOPBACK_NONE &&
- bp->link_params.loopback_mode != LOOPBACK_EXT)
+ if (params->loopback_mode != LOOPBACK_NONE &&
+ params->loopback_mode != LOOPBACK_EXT)
vars->link_status |= LINK_STATUS_LINK_UP;
if (bnx2x_eee_has_cap(params))
@@ -7758,7 +7769,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val = 0;
@@ -7771,7 +7783,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Set the read command byte count */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
- (byte_cnt | 0xa000));
+ (byte_cnt | (dev_addr << 8)));
/* Set the read command address */
bnx2x_cl45_write(bp, phy,
@@ -7845,6 +7857,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
+ u8 dev_addr,
u16 addr, u8 byte_cnt,
u8 *o_buf, u8 is_init)
{
@@ -7869,7 +7882,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
usleep_range(1000, 2000);
bnx2x_warpcore_power_module(params, 1);
}
- rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+ rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -7885,7 +7898,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
- u16 addr, u8 byte_cnt, u8 *o_buf)
+ u8 dev_addr, u16 addr, u8 byte_cnt,
+ u8 *o_buf, u8 is_init)
{
struct bnx2x *bp = params->bp;
u16 val, i;
@@ -7896,6 +7910,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
+ /* Set 2-wire transfer rate of SFP+ module EEPROM
+ * to 100Khz since some DACs(direct attached cables) do
+ * not work at 400Khz.
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+ ((dev_addr << 8) | 1));
+
/* Need to read from 1.8000 to clear it */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -7968,26 +7991,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf)
{
- int rc = -EOPNOTSUPP;
+ int rc = 0;
+ struct bnx2x *bp = params->bp;
+ u8 xfer_size;
+ u8 *user_data = o_buf;
+ read_sfp_module_eeprom_func_p read_func;
+
+ if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
+ DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
+ return -EINVAL;
+ }
+
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
- rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8726_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
- rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf);
- break;
+ read_func = bnx2x_8727_read_sfp_module_eeprom;
+ break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
- byte_cnt, o_buf, 0);
- break;
+ read_func = bnx2x_warpcore_read_sfp_module_eeprom;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ while (!rc && (byte_cnt > 0)) {
+ xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : byte_cnt;
+ rc = read_func(phy, params, dev_addr, addr, xfer_size,
+ user_data, 0);
+ byte_cnt -= xfer_size;
+ user_data += xfer_size;
+ addr += xfer_size;
}
return rc;
}
@@ -8004,6 +8045,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
/* First check for copper cable */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_CON_TYPE_ADDR,
2,
(u8 *)val) != 0) {
@@ -8021,6 +8063,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*/
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_FC_TX_TECH_ADDR,
1,
&copper_module_type) != 0) {
@@ -8049,20 +8092,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
+ case SFP_EEPROM_CON_TYPE_VAL_RJ45:
check_limiting_mode = 1;
if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
SFP_EEPROM_COMP_CODE_LR_MASK |
SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ DP(NETIF_MSG_LINK, "1G SFP module detected\n");
gport = params->port;
phy->media_type = ETH_PHY_SFP_1G_FIBER;
- phy->req_line_speed = SPEED_1000;
- if (!CHIP_IS_E1x(bp))
- gport = BP_PATH(bp) + (params->port << 1);
- netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps."
- " Current SFP module in port %d is not"
- " compliant with 10G Ethernet\n",
- gport);
+ if (phy->req_line_speed != SPEED_1000) {
+ phy->req_line_speed = SPEED_1000;
+ if (!CHIP_IS_E1x(bp)) {
+ gport = BP_PATH(bp) +
+ (params->port << 1);
+ }
+ netdev_err(bp->dev,
+ "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
+ gport);
+ }
} else {
int idx, cfg_idx = 0;
DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8101,6 +8148,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
u8 options[SFP_EEPROM_OPTIONS_SIZE];
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_OPTIONS_ADDR,
SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
@@ -8167,6 +8215,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
/* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_VENDOR_NAME_ADDR,
SFP_EEPROM_VENDOR_NAME_SIZE,
(u8 *)vendor_name))
@@ -8175,6 +8224,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
if (bnx2x_read_sfp_module_eeprom(phy,
params,
+ I2C_DEV_ADDR_A0,
SFP_EEPROM_PART_NO_ADDR,
SFP_EEPROM_PART_NO_SIZE,
(u8 *)vendor_pn))
@@ -8205,12 +8255,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
for (timeout = 0; timeout < 60; timeout++) {
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
- rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
- params, 1,
- 1, &val, 1);
+ rc = bnx2x_warpcore_read_sfp_module_eeprom(
+ phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
+ 1);
else
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
- &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params,
+ I2C_DEV_ADDR_A0,
+ 1, 1, &val);
if (rc == 0) {
DP(NETIF_MSG_LINK,
"SFP+ module initialization took %d ms\n",
@@ -8219,7 +8270,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
}
usleep_range(5000, 10000);
}
- rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
+ rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
+ 1, 1, &val);
return rc;
}
@@ -8376,15 +8428,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
val);
-
- /* Set 2-wire transfer rate of SFP+ module EEPROM
- * to 100Khz since some DACs(direct attached cables) do
- * not work at 400Khz.
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
- 0xa001);
break;
default:
DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -9528,8 +9571,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- for (i = 0; i < ARRAY_SIZE(reg_set);
- i++)
+ for (i = 0; i < ARRAY_SIZE(reg_set); i++)
bnx2x_cl45_write(bp, phy, reg_set[i].devad,
reg_set[i].reg, reg_set[i].val);
@@ -10281,7 +10323,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
/* Determine if EEE was negotiated */
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+ (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
bnx2x_eee_an_resolve(phy, params, vars);
}
@@ -12242,7 +12285,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_bmac_enable(params, vars, 1, 1);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12261,7 +12304,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params,
vars->phy_flags = PHY_XGXS_FLAG;
bnx2x_xgxs_deassert(params);
- /* set bmac loopback */
+ /* Set bmac loopback */
bnx2x_emac_enable(params, vars, 1);
bnx2x_emac_program(params, vars);
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12521,6 +12564,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
params->req_line_speed[0], params->req_flow_ctrl[0]);
DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
params->req_line_speed[1], params->req_flow_ctrl[1]);
+ DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -13446,8 +13490,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
}
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
- * since some switches tend to reinit the AN process and clear the
- * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * Since some switches tend to reinit the AN process and clear the
+ * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
@@ -13465,8 +13509,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* CL73 has not begun yet */
if (base_page == 0) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
bnx2x_kr2_recovery(params, vars, phy);
+ DP(NETIF_MSG_LINK, "No BP\n");
+ }
return;
}
@@ -13482,7 +13528,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
- next_page);
+ next_page);
bnx2x_kr2_recovery(params, vars, phy);
}
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 56c2aae4e2c..4df45234fdc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,9 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+
#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -54,6 +57,15 @@
#define SFP_EEPROM_SERIAL_SIZE 16
#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
#define SFP_EEPROM_DATE_SIZE 6
+#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
+#define SFP_EEPROM_DIAG_TYPE_SIZE 1
+#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+
+#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
+#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
+
#define PWR_FLT_ERR_MSG_LEN 250
#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -420,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
+ struct link_params *params, u8 dev_addr,
+ u16 addr, u16 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8c..fdfe33bc097 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -75,8 +75,6 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
/* tx only connections collect statistics (on the same index as the
- * parent connection). The statistics are zeroed when the parent
- * connection is initialized.
+ * parent connection). The statistics are zeroed when the parent
+ * connection is initialized.
*/
__set_bit(BNX2X_Q_FLG_STATS, &flags);
if (zero_stats)
__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+ __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
+ __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
#ifdef BNX2X_STOP_ON_ERROR
__set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
{
struct eth_stats_info *ether_stat =
&bp->slowpath->drv_info_to_mcp.ether_stat;
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bp->sp_objs->mac_obj;
+ int i;
strlcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
- bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
-
+ /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
+ * mac_local field in ether_stat struct. The base address is offset by 2
+ * bytes to account for the field being 8 bytes but a mac address is
+ * only 6 bytes. Likewise, the stride for the get_n_elements function is
+ * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
+ * allocated by the ether_stat struct, so the macs will land in their
+ * proper positions.
+ */
+ for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
+ memset(ether_stat->mac_local + i, 0,
+ sizeof(ether_stat->mac_local[0]));
+ mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local + MAC_PAD, MAC_PAD,
+ ETH_ALEN);
ether_stat->mtu_size = bp->dev->mtu;
-
if (bp->dev->features & NETIF_F_RXCSUM)
ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
if (!CNIC_LOADED(bp))
return;
- memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
- bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
+ ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -6029,9 +6041,10 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
rmb();
bnx2x_init_rx_rings(bp);
bnx2x_init_tx_rings(bp);
-
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_memset_stats(bp);
return;
+ }
/* Initialize MOD_ABS interrupts */
bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
@@ -9525,6 +9538,10 @@ sp_rtnl_not_reset:
bnx2x_vfpf_storm_rx_mode(bp);
}
+ if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state))
+ bnx2x_pf_set_vfs_vlan(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -9532,8 +9549,10 @@ sp_rtnl_not_reset:
/* enable SR-IOV if applicable */
if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
- &bp->sp_rtnl_state))
+ &bp->sp_rtnl_state)) {
+ bnx2x_disable_sriov(bp);
bnx2x_enable_sriov(bp);
+ }
}
static void bnx2x_period_task(struct work_struct *work)
@@ -9701,6 +9720,31 @@ static struct bnx2x_prev_path_list *
return NULL;
}
+static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
+{
+ struct bnx2x_prev_path_list *tmp_list;
+ int rc;
+
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ tmp_list->aer = 1;
+ rc = 0;
+ } else {
+ BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
+ BP_PATH(bp));
+ }
+
+ up(&bnx2x_prev_sem);
+
+ return rc;
+}
+
static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
{
struct bnx2x_prev_path_list *tmp_list;
@@ -9709,14 +9753,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
if (down_trylock(&bnx2x_prev_sem))
return false;
- list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
- if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
- bp->pdev->bus->number == tmp_list->bus &&
- BP_PATH(bp) == tmp_list->path) {
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (tmp_list->aer) {
+ DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
+ BP_PATH(bp));
+ } else {
rc = true;
BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
BP_PATH(bp));
- break;
}
}
@@ -9730,6 +9775,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
struct bnx2x_prev_path_list *tmp_list;
int rc;
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Received %d when tried to take lock\n", rc);
+ return rc;
+ }
+
+ /* Check whether the entry for this path already exists */
+ tmp_list = bnx2x_prev_path_get_entry(bp);
+ if (tmp_list) {
+ if (!tmp_list->aer) {
+ BNX2X_ERR("Re-Marking the path.\n");
+ } else {
+ DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
+ BP_PATH(bp));
+ tmp_list->aer = 0;
+ }
+ up(&bnx2x_prev_sem);
+ return 0;
+ }
+ up(&bnx2x_prev_sem);
+
+ /* Create an entry for this path and add it */
tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
if (!tmp_list) {
BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9739,6 +9806,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
tmp_list->bus = bp->pdev->bus->number;
tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
tmp_list->path = BP_PATH(bp);
+ tmp_list->aer = 0;
tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
rc = down_interruptible(&bnx2x_prev_sem);
@@ -9746,8 +9814,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
BNX2X_ERR("Received %d when tried to take lock\n", rc);
kfree(tmp_list);
} else {
- BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
- BP_PATH(bp));
+ DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
+ BP_PATH(bp));
list_add(&tmp_list->list, &bnx2x_prev_list);
up(&bnx2x_prev_sem);
}
@@ -9986,6 +10054,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
}
do {
+ int aer = 0;
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
@@ -9994,7 +10063,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
break;
}
- if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+ rc = down_interruptible(&bnx2x_prev_sem);
+ if (rc) {
+ BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
+ rc);
+ } else {
+ /* If Path is marked by EEH, ignore unload status */
+ aer = !!(bnx2x_prev_path_get_entry(bp) &&
+ bnx2x_prev_path_get_entry(bp)->aer);
+ up(&bnx2x_prev_sem);
+ }
+
+ if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
rc = bnx2x_prev_unload_common(bp);
break;
}
@@ -10034,8 +10114,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
id = ((val & 0xffff) << 16);
val = REG_RD(bp, MISC_REG_CHIP_REV);
id |= ((val & 0xf) << 12);
- val = REG_RD(bp, MISC_REG_CHIP_METAL);
- id |= ((val & 0xff) << 4);
+
+ /* Metal is read from PCI regs, but we can't access >=0x400 from
+ * the configuration space (so we need to reg_rd)
+ */
+ val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
+ id |= (((val >> 24) & 0xf) << 4);
val = REG_RD(bp, MISC_REG_BOND_ID);
id |= (val & 0xf);
bp->common.chip_id = id;
@@ -10812,14 +10896,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
}
}
- if (IS_MF_STORAGE_SD(bp))
- /* Zero primary MAC configuration */
- memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp))
- /* use FIP MAC as primary MAC */
+ /* If this is a storage-only interface, use SAN mac as
+ * primary MAC. Notice that for SD this is already the case,
+ * as the SAN mac was copied from the primary MAC.
+ */
+ if (IS_MF_FCOE_AFEX(bp))
memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
-
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11056,6 +11138,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
} else
BNX2X_DEV_INFO("illegal OV for SD\n");
break;
+ case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
+ bp->mf_config[vn] = 0;
+ break;
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
@@ -11402,26 +11487,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* net_device service functions
*/
-static int bnx2x_open_epilog(struct bnx2x *bp)
-{
- /* Enable sriov via delayed work. This must be done via delayed work
- * because it causes the probe of the vf devices to be run, which invoke
- * register_netdevice which must have rtnl lock taken. As we are holding
- * the lock right now, that could only work if the probe would not take
- * the lock. However, as the probe of the vf may be called from other
- * contexts as well (such as passthrough to vm failes) it can't assume
- * the lock is being held for it. Using delayed work here allows the
- * probe code to simply take the lock (i.e. wait for it to be released
- * if it is being held).
- */
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
- return 0;
-}
-
/* called with rtnl_lock */
static int bnx2x_open(struct net_device *dev)
{
@@ -11791,6 +11856,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_setup_tc = bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
+ .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
+ .ndo_get_vf_config = bnx2x_get_vf_config,
#endif
#ifdef NETDEV_FCOE_WWNN
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -11953,7 +12020,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->netdev_ops = &bnx2x_netdev_ops;
- bnx2x_set_ethtool_ops(dev);
+ bnx2x_set_ethtool_ops(bp, dev);
dev->priv_flags |= IFF_UNICAST_FLT;
@@ -11961,6 +12028,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ if (!CHIP_IS_E1x(bp)) {
+ dev->hw_features |= NETIF_F_GSO_GRE;
+ dev->hw_enc_features =
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE;
+ }
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -12447,7 +12521,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
* l2 connections.
*/
if (IS_VF(bp)) {
- bnx2x_vf_map_doorbells(bp);
+ bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
goto init_one_exit;
@@ -12475,13 +12549,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
- /* Enable SRIOV if capability found in configuration space.
- * Once the generic SR-IOV framework makes it in from the
- * pci tree this will be revised, to allow dynamic control
- * over the number of VFs. Right now, change the num of vfs
- * param below to enable SR-IOV.
- */
- rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
+ /* Enable SRIOV if capability found in configuration space */
+ rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
goto init_one_exit;
@@ -12493,16 +12562,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
- /* disable FCOE for 57840 device, until FW supports it */
- switch (ent->driver_data) {
- case BCM57840_O:
- case BCM57840_4_10:
- case BCM57840_2_20:
- case BCM57840_MFO:
- case BCM57840_MF:
- bp->flags |= NO_FCOE_FLAG;
- }
-
/* Set bp->num_queues for MSI-X mode*/
bnx2x_set_num_queues(bp);
@@ -12636,9 +12695,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
{
- int i;
-
- bp->state = BNX2X_STATE_ERROR;
+ bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
bp->rx_mode = BNX2X_RX_MODE_NONE;
@@ -12647,29 +12704,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
-
- bnx2x_netif_stop(bp, 0);
/* Delete all NAPI objects */
bnx2x_del_all_napi(bp);
if (CNIC_LOADED(bp))
bnx2x_del_all_napi_cnic(bp);
+ netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
+ cancel_delayed_work(&bp->sp_task);
+ cancel_delayed_work(&bp->period_task);
- bnx2x_stats_handle(bp, STATS_EVENT_STOP);
-
- /* Release IRQs */
- bnx2x_free_irq(bp);
-
- /* Free SKBs, SGEs, TPA pool and driver internals */
- bnx2x_free_skbs(bp);
-
- for_each_rx_queue(bp, i)
- bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
-
- bnx2x_free_mem(bp);
+ spin_lock_bh(&bp->stats_lock);
+ bp->stats_state = STATS_STATE_DISABLED;
+ spin_unlock_bh(&bp->stats_lock);
- bp->state = BNX2X_STATE_CLOSED;
+ bnx2x_save_statistics(bp);
netif_carrier_off(bp->dev);
@@ -12705,6 +12754,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
+ BNX2X_ERR("IO error detected\n");
+
netif_device_detach(dev);
if (state == pci_channel_io_perm_failure) {
@@ -12715,6 +12766,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
if (netif_running(dev))
bnx2x_eeh_nic_unload(bp);
+ bnx2x_prev_path_mark_eeh(bp);
+
pci_disable_device(pdev);
rtnl_unlock();
@@ -12733,9 +12786,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2x *bp = netdev_priv(dev);
+ int i;
rtnl_lock();
-
+ BNX2X_ERR("IO slot reset initializing...\n");
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
@@ -12749,6 +12803,42 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
if (netif_running(dev))
bnx2x_set_power_state(bp, PCI_D0);
+ if (netif_running(dev)) {
+ BNX2X_ERR("IO slot reset --> driver unload\n");
+ if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 v;
+
+ v = SHMEM2_RD(bp,
+ drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+ bnx2x_drain_tx_queues(bp);
+ bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
+ bnx2x_netif_stop(bp, 1);
+ bnx2x_free_irq(bp);
+
+ /* Report UNLOAD_DONE to MCP */
+ bnx2x_send_unload_done(bp, true);
+
+ bp->sp_state = 0;
+ bp->port.pmf = 0;
+
+ bnx2x_prev_unload(bp);
+
+ /* We should have resetted the engine, so It's fair to
+ * assume the FW will no longer write to the bnx2x driver.
+ */
+ bnx2x_squeeze_objects(bp);
+ bnx2x_free_skbs(bp);
+ for_each_rx_queue(bp, i)
+ bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+ bnx2x_free_fp_mem(bp);
+ bnx2x_free_mem(bp);
+
+ bp->state = BNX2X_STATE_CLOSED;
+ }
+
rtnl_unlock();
return PCI_ERS_RESULT_RECOVERED;
@@ -12775,6 +12865,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
bnx2x_eeh_recover(bp);
+ bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
if (netif_running(dev))
bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -12797,6 +12890,9 @@ static struct pci_driver bnx2x_pci_driver = {
.suspend = bnx2x_suspend,
.resume = bnx2x_resume,
.err_handler = &bnx2x_err_handler,
+#ifdef CONFIG_BNX2X_SRIOV
+ .sriov_configure = bnx2x_sriov_configure,
+#endif
};
static int __init bnx2x_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 791eb2d5301..d22bc40091e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1491,10 +1491,6 @@
/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
Port. */
#define MISC_REG_BOND_ID 0xa400
-/* [R 8] These bits indicate the metal revision of the chip. This value
- starts at 0x00 for each all-layer tape-out and increments by one for each
- tape-out. */
-#define MISC_REG_CHIP_METAL 0xa404
/* [R 16] These bits indicate the part number for the chip. */
#define MISC_REG_CHIP_NUM 0xa408
/* [R 4] These bits indicate the base revision of the chip. This value
@@ -6331,6 +6327,8 @@
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
+#define PCI_ID_VAL3 0x43c
+
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7306416bc90..32a9609cc98 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,8 +30,6 @@
#define BNX2X_MAX_EMUL_MULTI 16
-#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
-
/**** Exe Queue interfaces ****/
/**
@@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
}
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf)
+ int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
- u8 *next = buf;
+ u8 *next = base;
int counter = 0;
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
if (counter < n) {
- /* place leading zeroes in buffer */
- memset(next, 0, MAC_LEADING_ZERO_CNT);
-
- /* place mac after leading zeroes*/
- memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
- ETH_ALEN);
-
- /* calculate address of next element and
- * advance counter
- */
+ memcpy(next, &pos->u, size);
counter++;
- next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
+ counter, next);
+ next += stride + size;
- DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
- counter, next, pos->u.mac.mac);
}
}
return counter * ETH_ALEN;
@@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
/* Check if a requested MAC already exists */
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return -EEXIST;
return 0;
@@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
list_for_each_entry(pos, &o->head, link)
- if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+ if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
+ (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
return pos;
return NULL;
@@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
- ETH_ALEN)))
+ ETH_ALEN)) &&
+ (data->vlan_mac.is_inner_mac ==
+ pos->u.vlan_mac.is_inner_mac))
return pos;
return NULL;
@@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
&rule_entry->mac.mac_mid,
&rule_entry->mac.mac_lsb, mac);
+ rule_entry->mac.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.
+ u.mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
-
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
/* MOVE: Add a rule that will add this MAC to the target Queue */
if (cmd == BNX2X_VLAN_MAC_MOVE) {
rule_entry++;
@@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
&rule_entry->pair.mac_mid,
&rule_entry->pair.mac_lsb, mac);
+ rule_entry->pair.inner_mac =
+ cpu_to_le16(elem->cmd_data.vlan_mac.u.
+ vlan_mac.is_inner_mac);
}
/* Set the ramrod data header */
@@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return rc;
}
list_del(&exeq_pos->link);
+ bnx2x_exe_queue_free_elem(bp, exeq_pos);
}
}
@@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
vlan_obj->check_move = bnx2x_check_move;
vlan_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ vlan_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
@@ -4426,6 +4432,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
tx_data->force_default_pri_flg =
test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
+ tx_data->tunnel_lso_inc_ip_id =
+ test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
+ tx_data->tunnel_non_lso_pcsum_location =
+ test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
+ PCSUM_ON_BD;
+
tx_data->tx_status_block_id = params->fw_sb_id;
tx_data->tx_sb_index_number = params->sb_cq_index;
tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5669,17 +5681,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
memset(rdata, 0, sizeof(*rdata));
/* Fill the ramrod data with provided parameters */
- rdata->function_mode = (u8)start_params->mf_mode;
- rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
- rdata->path_id = BP_PATH(bp);
- rdata->network_cos_mode = start_params->network_cos_mode;
-
- /*
- * No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ rdata->function_mode = (u8)start_params->mf_mode;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
+ rdata->path_id = BP_PATH(bp);
+ rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
+ rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
+
+ /* No need for an explicit memory barrier here as long we would
+ * need to ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read and we will have to put a full memory barrier there
+ * (inside bnx2x_sp_post()).
*/
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index ff907609b9f..43c00bc84a0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
/************************* VLAN-MAC commands related parameters ***************/
struct bnx2x_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
};
struct bnx2x_vlan_ramrod_data {
@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
struct bnx2x_vlan_mac_ramrod_data {
u8 mac[ETH_ALEN];
+ u8 is_inner_mac;
u16 vlan;
};
@@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj {
*
* @return number of copied bytes
*/
- int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
- int n, u8 *buf);
+ int (*get_n_elements)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
+ u8 stride, u8 size);
/**
* Checks if ADD-ramrod with the given params may be performed.
@@ -824,7 +827,9 @@ enum {
BNX2X_Q_FLG_TX_SEC,
BNX2X_Q_FLG_ANTI_SPOOF,
BNX2X_Q_FLG_SILENT_VLAN_REM,
- BNX2X_Q_FLG_FORCE_DEFAULT_PRI
+ BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
+ BNX2X_Q_FLG_PCSUM_ON_PKT,
+ BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
};
/* Queue type options: queue type may be a compination of below. */
@@ -842,6 +847,7 @@ enum bnx2x_q_type {
#define BNX2X_MULTI_TX_COS_E3B0 3
#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
+#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
struct bnx2x_queue_init_params {
struct {
@@ -1118,6 +1124,15 @@ struct bnx2x_func_start_params {
/* Function cos mode */
u8 network_cos_mode;
+
+ /* NVGRE classification enablement */
+ u8 nvgre_clss_en;
+
+ /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
+ u8 gre_tunnel_mode;
+
+ /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
+ u8 gre_tunnel_rss;
};
struct bnx2x_func_switch_update_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6adfa209358..2ce7c747136 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,9 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
/* General service functions */
static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc >= 0) {
cnt += pos->add ? 1 : -1;
- list_del(&pos->link);
- list_add(&pos->link, &rollback_list);
+ list_move(&pos->link, &rollback_list);
rc = 0;
} else if (rc == -EEXIST) {
rc = 0;
@@ -958,6 +959,12 @@ op_err:
BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
op_done:
case BNX2X_VFOP_QSETUP_DONE:
+ vf->cfg_flags |= VF_CFG_VLAN;
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
return bnx2x_is_pcie_pending(dev);
unknown_dev:
- BNX2X_ERR("Unknown device\n");
return false;
}
@@ -1926,20 +1932,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* SRIOV can be enabled only with MSIX */
if (int_mode_param == BNX2X_INT_MODE_MSI ||
- int_mode_param == BNX2X_INT_MODE_INTX)
+ int_mode_param == BNX2X_INT_MODE_INTX) {
BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
+ return 0;
+ }
err = -EIO;
/* verify ari is enabled */
if (!bnx2x_ari_enabled(bp->pdev)) {
- BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
- return err;
+ BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
+ return 0;
}
/* verify igu is in normal mode */
if (CHIP_INT_MODE_IS_BC(bp)) {
BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
- return err;
+ return 0;
}
/* allocate the vfs database */
@@ -1964,8 +1972,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
if (iov->total == 0)
goto failed;
- /* calculate the actual number of VFs */
- iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
+ iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
+
+ DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
+ num_vfs_param, iov->nr_virtfn);
/* allocate the vf array */
bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
@@ -2378,8 +2388,8 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
goto get_vf;
case EVENT_RING_OPCODE_MALICIOUS_VF:
abs_vfid = elem->message.data.malicious_vf_event.vf_id;
- DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n",
- abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
goto get_vf;
default:
return 1;
@@ -2436,8 +2446,8 @@ get_vf:
/* Do nothing for now */
break;
case EVENT_RING_OPCODE_MALICIOUS_VF:
- DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n",
- vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
+ abs_vfid, elem->message.data.malicious_vf_event.err_id);
/* Do nothing for now */
break;
}
@@ -3012,21 +3022,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->op_current = CHANNEL_TLV_NONE;
}
-void bnx2x_enable_sriov(struct bnx2x *bp)
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
- int rc = 0;
- /* disbale sriov in case it is still enabled */
+ struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+
+ DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+
+ /* HW channel is only operational when PF is up */
+ if (bp->state != BNX2X_STATE_OPEN) {
+ BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
+ return -EINVAL;
+ }
+
+ /* we are always bound by the total_vfs in the configuration space */
+ if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
+ num_vfs_param, BNX2X_NR_VIRTFN(bp));
+ num_vfs_param = BNX2X_NR_VIRTFN(bp);
+ }
+
+ bp->requested_nr_virtfn = num_vfs_param;
+ if (num_vfs_param == 0) {
+ pci_disable_sriov(dev);
+ return 0;
+ } else {
+ return bnx2x_enable_sriov(bp);
+ }
+}
+
+int bnx2x_enable_sriov(struct bnx2x *bp)
+{
+ int rc = 0, req_vfs = bp->requested_nr_virtfn;
+
+ rc = pci_enable_sriov(bp->pdev, req_vfs);
+ if (rc) {
+ BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
+ return rc;
+ }
+ DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
+ return req_vfs;
+}
+
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
+{
+ int vfidx;
+ struct pf_vf_bulletin_content *bulletin;
+
+ DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
+ for_each_vf(bp, vfidx) {
+ bulletin = BP_VF_BULLETIN(bp, vfidx);
+ if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+ bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
+ }
+}
+
+void bnx2x_disable_sriov(struct bnx2x *bp)
+{
pci_disable_sriov(bp->pdev);
- DP(BNX2X_MSG_IOV, "sriov disabled\n");
+}
+
+static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
+ struct bnx2x_virtf *vf)
+{
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("vf ndo called though sriov is disabled\n");
+ return -EINVAL;
+ }
+
+ if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
+ BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+ vfidx, BNX2X_NR_VIRTFN(bp));
+ return -EINVAL;
+ }
+
+ if (!vf) {
+ BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
- /* enable sriov */
- DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn));
- rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn));
+ return 0;
+}
+
+int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ int rc;
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
if (rc)
- BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
- else
- DP(BNX2X_MSG_IOV, "sriov enabled\n");
+ return rc;
+ if (!mac_obj || !vlan_obj || !bulletin) {
+ BNX2X_ERR("VF partially initialized\n");
+ return -EINVAL;
+ }
+
+ ivi->vf = vfidx;
+ ivi->qos = 0;
+ ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->spoofchk = 1; /*always enabled */
+ if (vf->state == VF_ENABLED) {
+ /* mac and vlan are in vlan_mac objects */
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
+ 0, VLAN_HLEN);
+ } else {
+ /* mac */
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
+ /* mac configured by ndo so its in bulletin board */
+ memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
+ else
+ /* funtion has not been loaded yet. Show mac as 0s */
+ memset(&ivi->mac, 0, ETH_ALEN);
+
+ /* vlan */
+ if (bulletin->valid_bitmap & (1 << VLAN_VALID))
+ /* vlan configured by ndo so its in bulletin board */
+ memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
+ else
+ /* funtion has not been loaded yet. Show vlans as 0s */
+ memset(&ivi->vlan, 0, VLAN_HLEN);
+ }
+
+ return 0;
}
/* New mac for VF. Consider these cases:
@@ -3044,23 +3171,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp)
* VF to configure any mac for itself except for this mac. In case of a race
* where the VF fails to see the new post on its bulletin board before sending a
* mac configuration request, the PF will simply fail the request and VF can try
- * again after consulting its bulletin board
+ * again after consulting its bulletin board.
*/
-int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
+int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state, vfidx = queue;
+ int rc, q_logical_state;
struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
- /* if SRIOV is disabled there is nothing to do (and somewhere, someone
- * has erred).
- */
- if (!IS_SRIOV(bp)) {
- BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
- return -EINVAL;
- }
-
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
if (!is_valid_ether_addr(mac)) {
BNX2X_ERR("mac address invalid\n");
return -EINVAL;
@@ -3085,7 +3208,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
- unsigned long flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
/* must lock vfpf channel to protect against vf flows */
@@ -3106,14 +3229,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
}
/* configure the new mac to device */
- __set_bit(RAMROD_COMP_WAIT, &flags);
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
- BNX2X_ETH_MAC, &flags);
+ BNX2X_ETH_MAC, &ramrod_flags);
bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
}
- return rc;
+ return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc, q_logical_state;
+ struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ /* sanity */
+ rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ if (rc)
+ return rc;
+
+ if (vlan > 4095) {
+ BNX2X_ERR("illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
+ vfidx, vlan, 0);
+
+ /* update PF's copy of the VF's bulletin. No point in posting the vlan
+ * to the VF since it doesn't have anything to do with it. But it useful
+ * to store it here in case the VF is not up yet and we can only
+ * configure the vlan later when it does.
+ */
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ bulletin->vlan = vlan;
+
+ /* is vf initialized and queue set up? */
+ q_logical_state =
+ bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ if (vf->state == VF_ENABLED &&
+ q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
+ /* configure the vlan in device on this vf's queue */
+ unsigned long ramrod_flags = 0;
+ unsigned long vlan_mac_flags = 0;
+ struct bnx2x_vlan_mac_obj *vlan_obj =
+ &bnx2x_vfq(vf, 0, vlan_obj);
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_queue_update_params *update_params;
+
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ return -EINVAL;
+ }
+
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure the new vlan to device */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ return -EINVAL;
+ }
+
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
+ */
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ update_params->def_vlan = vlan;
+ }
+
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ return rc;
+ }
+
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured th Vlan before vf was
+ * and we were called because the VF came up later
+ */
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ }
+ return 0;
}
/* crc is the first field in the bulletin board. compute the crc over the
@@ -3165,20 +3407,26 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
}
+ /* the vlan in bulletin board is valid and is new */
+ if (bulletin.valid_bitmap & 1 << VLAN_VALID)
+ memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
+
/* copy new bulletin board to bp */
bp->old_bulletin = bulletin;
return PFVF_BULLETIN_UPDATED;
}
-void bnx2x_vf_map_doorbells(struct bnx2x *bp)
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
{
/* vf doorbells are embedded within the regview */
- bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
+ return bp->regview + PXP_VF_ADDR_DB_START;
}
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
{
+ mutex_init(&bp->vf2pf_mutex);
+
/* allocate vf2pf mailbox for vf to pf channel */
BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
@@ -3196,3 +3444,26 @@ alloc_mem_err:
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
+
+int bnx2x_open_epilog(struct bnx2x *bp)
+{
+ /* Enable sriov via delayed work. This must be done via delayed work
+ * because it causes the probe of the vf devices to be run, which invoke
+ * register_netdevice which must have rtnl lock taken. As we are holding
+ * the lock right now, that could only work if the probe would not take
+ * the lock. However, as the probe of the vf may be called from other
+ * contexts as well (such as passthrough to vm failes) it can't assume
+ * the lock is being held for it. Using delayed work here allows the
+ * probe code to simply take the lock (i.e. wait for it to be released
+ * if it is being held). We only want to do this if the number of VFs
+ * was set before PF driver was loaded.
+ */
+ if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b4050173add..d4b17b7a774 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -193,6 +193,7 @@ struct bnx2x_virtf {
#define VF_CFG_TPA 0x0004
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
+#define VF_CFG_VLAN 0x0020
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -712,6 +713,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
u16 length);
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length);
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
bool bnx2x_tlv_supported(u16 tlvtype);
@@ -750,13 +752,17 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
}
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
-void bnx2x_vf_map_doorbells(struct bnx2x *bp);
+void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
-void bnx2x_enable_sriov(struct bnx2x *bp);
+int bnx2x_enable_sriov(struct bnx2x *bp);
+void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
}
+void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
+int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
+int bnx2x_open_epilog(struct bnx2x *bp);
#else /* CONFIG_BNX2X_SRIOV */
@@ -779,7 +785,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
int num_vfs_param) {return 0; }
static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
-static inline void bnx2x_enable_sriov(struct bnx2x *bp) {}
+static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
u8 tx_count, u8 rx_count) {return 0; }
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
@@ -802,8 +809,15 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
return PFVF_BULLETIN_UNCHANGED;
}
-static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; }
+static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
+{
+ return NULL;
+}
+
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
+static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
+static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
+static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
#endif /* CONFIG_BNX2X_SRIOV */
#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 4397f8b76f2..2ca3d94fcec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1547,11 +1547,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
}
}
+void bnx2x_memset_stats(struct bnx2x *bp)
+{
+ int i;
+
+ /* function stats */
+ for_each_queue(bp, i) {
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
+ if (bp->stats_init) {
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
+ }
+ }
+
+ memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+ }
+
+ bp->stats_state = STATS_STATE_DISABLED;
+
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
+
+ /* mark the end of statistics initializiation */
+ bp->stats_init = false;
+}
+
void bnx2x_stats_init(struct bnx2x *bp)
{
int /*abs*/port = BP_PORT(bp);
int mb_idx = BP_FW_MB_IDX(bp);
- int i;
bp->stats_pending = 0;
bp->executer_idx = 0;
@@ -1587,36 +1627,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
&(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
}
- /* function stats */
- for_each_queue(bp, i) {
- struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
-
- memset(&fp_stats->old_tclient, 0,
- sizeof(fp_stats->old_tclient));
- memset(&fp_stats->old_uclient, 0,
- sizeof(fp_stats->old_uclient));
- memset(&fp_stats->old_xclient, 0,
- sizeof(fp_stats->old_xclient));
- if (bp->stats_init) {
- memset(&fp_stats->eth_q_stats, 0,
- sizeof(fp_stats->eth_q_stats));
- memset(&fp_stats->eth_q_stats_old, 0,
- sizeof(fp_stats->eth_q_stats_old));
- }
- }
-
/* Prepare statistics ramrod data */
bnx2x_prep_fw_stats_req(bp);
- memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+ /* Clean SP from previous statistics */
if (bp->stats_init) {
- memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
- memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
- memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
- memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
- memset(&bp->func_stats, 0, sizeof(bp->func_stats));
-
- /* Clean SP from previous statistics */
if (bp->func_stx) {
memset(bnx2x_sp(bp, func_stats), 0,
sizeof(struct host_func_stats));
@@ -1626,13 +1641,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
}
}
- bp->stats_state = STATS_STATE_DISABLED;
-
- if (bp->port.pmf && bp->port.port_stx)
- bnx2x_port_stats_base_init(bp);
-
- /* mark the end of statistics initializiation */
- bp->stats_init = false;
+ bnx2x_memset_stats(bp);
}
void bnx2x_save_statistics(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 198f6f1c9ad..d117f472816 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -540,8 +540,8 @@ struct bnx2x_fw_port_stats_old {
/* forward */
struct bnx2x;
+void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
-
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 531eebf40d6..90fbf9cc2c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -36,6 +36,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
u16 type, u16 length)
{
+ mutex_lock(&bp->vf2pf_mutex);
+
DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
type);
@@ -49,6 +51,15 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
}
+/* releases the mailbox */
+void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
+{
+ DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
+ first_tlv->tl.type);
+
+ mutex_unlock(&bp->vf2pf_mutex);
+}
+
/* list the types and lengths of the tlvs on the buffer */
void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
{
@@ -181,8 +192,10 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vfdev_info.vf_id = vf_id;
req->vfdev_info.vf_os = 0;
@@ -213,7 +226,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF timeout */
if (rc)
- return rc;
+ goto out;
/* copy acquire response from buffer to bp */
memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
@@ -253,7 +266,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* PF reports error */
BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
bp->acquire_resp.hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
}
@@ -279,20 +293,24 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->acquire_resp.resc.current_mac_addr,
ETH_ALEN);
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
int bnx2x_vfpf_release(struct bnx2x *bp)
{
struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- u32 rc = 0, vf_id;
+ u32 rc, vf_id;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
- if (bnx2x_get_vf_id(bp, &vf_id))
- return -EAGAIN;
+ if (bnx2x_get_vf_id(bp, &vf_id)) {
+ rc = -EAGAIN;
+ goto out;
+ }
req->vf_id = vf_id;
@@ -308,7 +326,8 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
if (rc)
/* PF timeout */
- return rc;
+ goto out;
+
if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
/* PF released us */
DP(BNX2X_MSG_SP, "vf released\n");
@@ -316,10 +335,13 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
/* PF reports error */
BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* Tell PF about SB addresses */
@@ -350,16 +372,20 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc)
- return rc;
+ goto out;
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
resp->hdr.status);
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return rc;
}
/* CLOSE VF - opposite to INIT_VF */
@@ -401,6 +427,8 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
resp->hdr.status);
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
free_irq:
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 0);
@@ -435,7 +463,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
- flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
flags |= VFPF_QUEUE_FLG_VLAN;
DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -486,8 +513,11 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
fp_idx, resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
return rc;
}
@@ -515,17 +545,19 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
if (rc) {
BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
rc);
- return rc;
+ goto out;
}
/* PF failed the transaction */
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ return rc;
}
/* request pf to add a mac for the vf */
@@ -533,7 +565,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc;
+ int rc = 0;
/* clear mailbox and prep first tlv */
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
@@ -562,7 +594,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
- return rc;
+ goto out;
}
/* failure may mean PF was configured with a new mac for us */
@@ -587,8 +619,10 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -643,14 +677,16 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
if (rc) {
BNX2X_ERR("Sending a message failed: %d\n", rc);
- return rc;
+ goto out;
}
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return 0;
}
@@ -689,7 +725,8 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
break;
default:
BNX2X_ERR("BAD rx mode (%d)\n", mode);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -708,8 +745,10 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
return rc;
}
@@ -1004,7 +1043,7 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
}
/* convert MBX queue-flags to standard SP queue-flags */
-static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
+static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
unsigned long *sp_q_flags)
{
if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
@@ -1015,8 +1054,6 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
__set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
- if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
- __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
__set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
@@ -1025,6 +1062,10 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+
+ /* outer vlan removal is set according to the PF's multi fuction mode */
+ if (IS_MF_SD(bp))
+ __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
}
static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1075,11 +1116,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
init_p->tx.hc_rate = setup_q->txq.hc_rate;
init_p->tx.sb_cq_index = setup_q->txq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&init_p->tx.flags);
/* tx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
&setup_p->flags);
/* tx setup - general, nothing */
@@ -1107,11 +1148,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* rx init */
init_p->rx.hc_rate = setup_q->rxq.hc_rate;
init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&init_p->rx.flags);
/* rx setup - flags */
- bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
+ bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
&setup_p->flags);
/* rx setup - general */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index bfc80baec00..41708faab57 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -328,9 +328,15 @@ struct pf_vf_bulletin_content {
#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
* is available for it
*/
+#define VLAN_VALID 1 /* when set, the vf should not access
+ * the vfpf channel
+ */
u8 mac[ETH_ALEN];
- u8 padding[2];
+ u8 mac_padding[2];
+
+ u16 vlan;
+ u8 vlan_padding[6];
};
union pf_vf_bulletin {
@@ -353,6 +359,7 @@ enum channel_tlvs {
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
+ CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e9b35da375c..e80bfb60c3e 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
- if (sb_new == NULL) {
- pr_info("%s: sk_buff allocation failed\n",
- d->sbdma_eth->sbm_dev->name);
+ if (sb_new == NULL)
return -ENOBUFS;
- }
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 17a972734ba..a4416b09f20 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
#define FIRMWARE_TG3 "tigon/tg3.bin"
+#define FIRMWARE_TG357766 "tigon/tg357766.bin"
#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -3448,11 +3449,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
#define TX_CPU_SCRATCH_SIZE 0x04000
/* tp->lock is held. */
-static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
{
int i;
+ const int iters = 10000;
- BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+ for (i = 0; i < iters; i++) {
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
+ break;
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
+static int tg3_rxcpu_pause(struct tg3 *tp)
+{
+ int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
+
+ tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+ tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
+ udelay(10);
+
+ return rc;
+}
+
+/* tp->lock is held. */
+static int tg3_txcpu_pause(struct tg3 *tp)
+{
+ return tg3_pause_cpu(tp, TX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_MODE, 0x00000000);
+}
+
+/* tp->lock is held. */
+static void tg3_rxcpu_resume(struct tg3 *tp)
+{
+ tg3_resume_cpu(tp, RX_CPU_BASE);
+}
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
+{
+ int rc;
+
+ BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
if (tg3_asic_rev(tp) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3460,17 +3508,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
return 0;
}
- if (offset == RX_CPU_BASE) {
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
-
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
- udelay(10);
+ if (cpu_base == RX_CPU_BASE) {
+ rc = tg3_rxcpu_pause(tp);
} else {
/*
* There is only an Rx CPU for the 5750 derivative in the
@@ -3479,17 +3518,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
if (tg3_flag(tp, IS_SSB_CORE))
return 0;
- for (i = 0; i < 10000; i++) {
- tw32(offset + CPU_STATE, 0xffffffff);
- tw32(offset + CPU_MODE, CPU_MODE_HALT);
- if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
- break;
- }
+ rc = tg3_txcpu_pause(tp);
}
- if (i >= 10000) {
+ if (rc) {
netdev_err(tp->dev, "%s timed out, %s CPU\n",
- __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+ __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -3499,19 +3533,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
return 0;
}
-struct fw_info {
- unsigned int fw_base;
- unsigned int fw_len;
- const __be32 *fw_data;
-};
+static int tg3_fw_data_len(struct tg3 *tp,
+ const struct tg3_firmware_hdr *fw_hdr)
+{
+ int fw_len;
+
+ /* Non fragmented firmware have one firmware header followed by a
+ * contiguous chunk of data to be written. The length field in that
+ * header is not the length of data to be written but the complete
+ * length of the bss. The data length is determined based on
+ * tp->fw->size minus headers.
+ *
+ * Fragmented firmware have a main header followed by multiple
+ * fragments. Each fragment is identical to non fragmented firmware
+ * with a firmware header followed by a contiguous chunk of data. In
+ * the main header, the length field is unused and set to 0xffffffff.
+ * In each fragment header the length is the entire size of that
+ * fragment i.e. fragment data + header length. Data length is
+ * therefore length field in the header minus TG3_FW_HDR_LEN.
+ */
+ if (tp->fw_len == 0xffffffff)
+ fw_len = be32_to_cpu(fw_hdr->len);
+ else
+ fw_len = tp->fw->size;
+
+ return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
+}
/* tp->lock is held. */
static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
u32 cpu_scratch_base, int cpu_scratch_size,
- struct fw_info *info)
+ const struct tg3_firmware_hdr *fw_hdr)
{
- int err, lock_err, i;
+ int err, i;
void (*write_op)(struct tg3 *, u32, u32);
+ int total_len = tp->fw->size;
if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
netdev_err(tp->dev,
@@ -3520,30 +3576,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
return -EINVAL;
}
- if (tg3_flag(tp, 5705_PLUS))
+ if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
write_op = tg3_write_mem;
else
write_op = tg3_write_indirect_reg32;
- /* It is possible that bootcode is still loading at this point.
- * Get the nvram lock first before halting the cpu.
- */
- lock_err = tg3_nvram_lock(tp);
- err = tg3_halt_cpu(tp, cpu_base);
- if (!lock_err)
- tg3_nvram_unlock(tp);
- if (err)
- goto out;
+ if (tg3_asic_rev(tp) != ASIC_REV_57766) {
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ int lock_err = tg3_nvram_lock(tp);
+ err = tg3_halt_cpu(tp, cpu_base);
+ if (!lock_err)
+ tg3_nvram_unlock(tp);
+ if (err)
+ goto out;
- for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
- write_op(tp, cpu_scratch_base + i, 0);
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
- for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
- write_op(tp, (cpu_scratch_base +
- (info->fw_base & 0xffff) +
- (i * sizeof(u32))),
- be32_to_cpu(info->fw_data[i]));
+ for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+ write_op(tp, cpu_scratch_base + i, 0);
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE,
+ tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
+ } else {
+ /* Subtract additional main header for fragmented firmware and
+ * advance to the first fragment
+ */
+ total_len -= TG3_FW_HDR_LEN;
+ fw_hdr++;
+ }
+
+ do {
+ u32 *fw_data = (u32 *)(fw_hdr + 1);
+ for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
+ write_op(tp, cpu_scratch_base +
+ (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
+ (i * sizeof(u32)),
+ be32_to_cpu(fw_data[i]));
+
+ total_len -= be32_to_cpu(fw_hdr->len);
+
+ /* Advance to next fragment */
+ fw_hdr = (struct tg3_firmware_hdr *)
+ ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
+ } while (total_len > 0);
err = 0;
@@ -3552,13 +3627,33 @@ out:
}
/* tp->lock is held. */
+static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
+{
+ int i;
+ const int iters = 5;
+
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32_f(cpu_base + CPU_PC, pc);
+
+ for (i = 0; i < iters; i++) {
+ if (tr32(cpu_base + CPU_PC) == pc)
+ break;
+ tw32(cpu_base + CPU_STATE, 0xffffffff);
+ tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
+ tw32_f(cpu_base + CPU_PC, pc);
+ udelay(1000);
+ }
+
+ return (i == iters) ? -EBUSY : 0;
+}
+
+/* tp->lock is held. */
static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
- int err, i;
+ const struct tg3_firmware_hdr *fw_hdr;
+ int err;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3566,60 +3661,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
-
err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup only the RX cpu. */
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
- break;
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
- tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
"should be %08x\n", __func__,
- tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+ tr32(RX_CPU_BASE + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
- tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
+
+ tg3_rxcpu_resume(tp);
+
+ return 0;
+}
+
+static int tg3_validate_rxcpu_state(struct tg3 *tp)
+{
+ const int iters = 1000;
+ int i;
+ u32 val;
+
+ /* Wait for boot code to complete initialization and enter service
+ * loop. It is then safe to download service patches
+ */
+ for (i = 0; i < iters; i++) {
+ if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
+ break;
+
+ udelay(10);
+ }
+
+ if (i == iters) {
+ netdev_err(tp->dev, "Boot code not ready for service patches\n");
+ return -EBUSY;
+ }
+
+ val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
+ if (val & 0xff) {
+ netdev_warn(tp->dev,
+ "Other patches exist. Not downloading EEE patch\n");
+ return -EEXIST;
+ }
return 0;
}
/* tp->lock is held. */
+static void tg3_load_57766_firmware(struct tg3 *tp)
+{
+ struct tg3_firmware_hdr *fw_hdr;
+
+ if (!tg3_flag(tp, NO_NVRAM))
+ return;
+
+ if (tg3_validate_rxcpu_state(tp))
+ return;
+
+ if (!tp->fw)
+ return;
+
+ /* This firmware blob has a different format than older firmware
+ * releases as given below. The main difference is we have fragmented
+ * data to be written to non-contiguous locations.
+ *
+ * In the beginning we have a firmware header identical to other
+ * firmware which consists of version, base addr and length. The length
+ * here is unused and set to 0xffffffff.
+ *
+ * This is followed by a series of firmware fragments which are
+ * individually identical to previous firmware. i.e. they have the
+ * firmware header and followed by data for that fragment. The version
+ * field of the individual fragment header is unused.
+ */
+
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
+ if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
+ return;
+
+ if (tg3_rxcpu_pause(tp))
+ return;
+
+ /* tg3_load_firmware_cpu() will always succeed for the 57766 */
+ tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
+
+ tg3_rxcpu_resume(tp);
+}
+
+/* tp->lock is held. */
static int tg3_load_tso_firmware(struct tg3 *tp)
{
- struct fw_info info;
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
- int err, i;
+ int err;
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
+ if (!tg3_flag(tp, FW_TSO))
return 0;
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
start address and length. We are setting complete length.
@@ -3627,10 +3779,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
Remainder is the blob to be loaded contiguously
from start address. */
- info.fw_base = be32_to_cpu(fw_data[1]);
cpu_scratch_size = tp->fw_len;
- info.fw_len = tp->fw->size - 12;
- info.fw_data = &fw_data[3];
if (tg3_asic_rev(tp) == ASIC_REV_5705) {
cpu_base = RX_CPU_BASE;
@@ -3643,30 +3792,22 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
err = tg3_load_firmware_cpu(tp, cpu_base,
cpu_scratch_base, cpu_scratch_size,
- &info);
+ fw_hdr);
if (err)
return err;
/* Now startup the cpu. */
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
-
- for (i = 0; i < 5; i++) {
- if (tr32(cpu_base + CPU_PC) == info.fw_base)
- break;
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
- tw32_f(cpu_base + CPU_PC, info.fw_base);
- udelay(1000);
- }
- if (i >= 5) {
+ err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
+ be32_to_cpu(fw_hdr->base_addr));
+ if (err) {
netdev_err(tp->dev,
"%s fails to set CPU PC, is %08x should be %08x\n",
- __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+ __func__, tr32(cpu_base + CPU_PC),
+ be32_to_cpu(fw_hdr->base_addr));
return -ENODEV;
}
- tw32(cpu_base + CPU_STATE, 0xffffffff);
- tw32_f(cpu_base + CPU_MODE, 0x00000000);
+
+ tg3_resume_cpu(tp, cpu_base);
return 0;
}
@@ -8039,11 +8180,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
TG3_RX_RCB_RING_BYTES(tp),
&tnapi->rx_rcb_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->rx_rcb)
goto err_out;
-
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
}
return 0;
@@ -8093,12 +8232,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
sizeof(struct tg3_hw_stats),
&tp->stats_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tp->hw_stats)
goto err_out;
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
-
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
@@ -8106,11 +8243,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
TG3_HW_STATUS_SIZE,
&tnapi->status_mapping,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!tnapi->hw_status)
goto err_out;
- memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
sblk = tnapi->hw_status;
if (tg3_flag(tp, ENABLE_RSS)) {
@@ -9781,6 +9917,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
}
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ /* Ignore any errors for the firmware download. If download
+ * fails, the device will operate with EEE disabled
+ */
+ tg3_load_57766_firmware(tp);
+ }
+
if (tg3_flag(tp, TSO_CAPABLE)) {
err = tg3_load_tso_firmware(tp);
if (err)
@@ -10570,7 +10713,7 @@ static int tg3_test_msi(struct tg3 *tp)
static int tg3_request_firmware(struct tg3 *tp)
{
- const __be32 *fw_data;
+ const struct tg3_firmware_hdr *fw_hdr;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
@@ -10578,15 +10721,15 @@ static int tg3_request_firmware(struct tg3 *tp)
return -ENOENT;
}
- fw_data = (void *)tp->fw->data;
+ fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
/* Firmware blob starts with version numbers, followed by
* start address and _full_ length including BSS sections
* (which must be longer than the actual data, of course
*/
- tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
- if (tp->fw_len < (tp->fw->size - 12)) {
+ tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
+ if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
@@ -10885,7 +11028,15 @@ static int tg3_open(struct net_device *dev)
if (tp->fw_needed) {
err = tg3_request_firmware(tp);
- if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
+ if (tg3_asic_rev(tp) == ASIC_REV_57766) {
+ if (err) {
+ netdev_warn(tp->dev, "EEE capability disabled\n");
+ tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+ } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
+ netdev_warn(tp->dev, "EEE capability restored\n");
+ tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+ }
+ } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
if (err)
return err;
} else if (err) {
@@ -14515,6 +14666,7 @@ static int tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
(tg3_asic_rev(tp) == ASIC_REV_5719 ||
tg3_asic_rev(tp) == ASIC_REV_5720 ||
+ tg3_asic_rev(tp) == ASIC_REV_57766 ||
tg3_asic_rev(tp) == ASIC_REV_5762 ||
(tg3_asic_rev(tp) == ASIC_REV_5717 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
@@ -15300,7 +15452,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
tg3_asic_rev(tp) != ASIC_REV_5701 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- tg3_flag_set(tp, TSO_BUG);
+ tg3_flag_set(tp, FW_TSO);
+ tg3_flag_set(tp, TSO_BUG);
if (tg3_asic_rev(tp) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
@@ -15311,7 +15464,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- tp->fw_needed) {
+ tg3_flag(tp, FW_TSO)) {
/* For firmware TSO, assume ASF is disabled.
* We'll disable TSO later if we discover ASF
* is enabled in tg3_get_eeprom_hw_cfg().
@@ -15326,6 +15479,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
tp->fw_needed = FIRMWARE_TG3;
+ if (tg3_asic_rev(tp) == ASIC_REV_57766)
+ tp->fw_needed = FIRMWARE_TG357766;
+
tp->irq_max = 1;
if (tg3_flag(tp, 5750_PLUS)) {
@@ -15598,7 +15754,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
tg3_get_eeprom_hw_cfg(tp);
- if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -15786,6 +15942,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
udelay(50);
tg3_nvram_init(tp);
+ /* If the device has an NVRAM, no need to load patch firmware */
+ if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
+ !tg3_flag(tp, NO_NVRAM))
+ tp->fw_needed = NULL;
+
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d..1cdc1b641c7 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2222,6 +2222,12 @@
#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000
+#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000
+#define TG3_57766_FW_BASE_ADDR 0x00030000
+#define TG3_57766_FW_HANDSHAKE 0x0003fccc
+#define TG3_SBROM_IN_SERVICE_LOOP 0x51
+
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
@@ -3009,17 +3015,18 @@ enum TG3_FLAGS {
TG3_FLAG_JUMBO_CAPABLE,
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
- TG3_FLAG_TSO_BUG,
TG3_FLAG_MAX_RXPEND_64,
- TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
TG3_FLAG_ASF_NEW_HANDSHAKE,
TG3_FLAG_HW_AUTONEG,
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
+ TG3_FLAG_FW_TSO,
TG3_FLAG_HW_TSO_1,
TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
+ TG3_FLAG_TSO_CAPABLE,
+ TG3_FLAG_TSO_BUG,
TG3_FLAG_ICH_WORKAROUND,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
@@ -3064,6 +3071,13 @@ enum TG3_FLAGS {
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
};
+struct tg3_firmware_hdr {
+ __be32 version; /* unused for fragments */
+ __be32 base_addr;
+ __be32 len;
+};
+#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr))
+
struct tg3 {
/* begin "general, frequently-used members" cacheline section */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 3227fdde521..f2b73ffa912 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
-static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
+static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7cce42dc2f2..d588f842d55 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad,
mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva =
dma_alloc_coherent(&bnad->pcidev->dev,
- mem_info->len, &dma_pa,
- GFP_KERNEL);
-
+ mem_info->len, &dma_pa,
+ GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL)
goto err_return;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 3becdb2deb4..c6e40d65a3d 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev)
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- &lp->rx_ring_dma, GFP_KERNEL);
- if (!lp->rx_ring) {
- netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
+ (MAX_RX_DESCR *
+ sizeof(struct macb_dma_desc)),
+ &lp->rx_ring_dma, GFP_KERNEL);
+ if (!lp->rx_ring)
return -ENOMEM;
- }
lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * MAX_RBUFF_SZ,
- &lp->rx_buffers_dma, GFP_KERNEL);
+ MAX_RX_DESCR * MAX_RBUFF_SZ,
+ &lp->rx_buffers_dma, GFP_KERNEL);
if (!lp->rx_buffers) {
- netdev_err(dev, "unable to alloc rx data DMA buffer\n");
-
dma_free_coherent(&lp->pdev->dev,
- MAX_RX_DESCR * sizeof(struct macb_dma_desc),
- lp->rx_ring, lp->rx_ring_dma);
+ MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+ lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
}
@@ -209,7 +206,6 @@ static void at91ether_rx(struct net_device *dev)
netif_rx(skb);
} else {
lp->stats.rx_dropped++;
- netdev_notice(dev, "Memory squeeze, dropping packet.\n");
}
if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
@@ -519,18 +515,7 @@ static struct platform_driver at91ether_driver = {
},
};
-static int __init at91ether_init(void)
-{
- return platform_driver_probe(&at91ether_driver, at91ether_probe);
-}
-
-static void __exit at91ether_exit(void)
-{
- platform_driver_unregister(&at91ether_driver);
-}
-
-module_init(at91ether_init)
-module_exit(at91ether_exit)
+module_platform_driver_probe(at91ether_driver, at91ether_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 79039439bfd..ed2cb130f98 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -485,6 +485,8 @@ static void macb_tx_interrupt(struct macb *bp)
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
+ macb_writel(bp, ISR, MACB_BIT(TCOMP));
+
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
(unsigned long)status);
@@ -736,6 +738,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* now.
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+ macb_writel(bp, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -1054,6 +1057,7 @@ static void macb_configure_dma(struct macb *bp)
dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
dmacfg |= GEM_BF(FBLDO, 16);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+ dmacfg &= ~GEM_BIT(ENDIA);
gem_writel(bp, DMACFG, dmacfg);
}
}
@@ -1557,14 +1561,14 @@ static int __init macb_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get macb_clk\n");
goto err_out_free_dev;
}
- clk_enable(bp->pclk);
+ clk_prepare_enable(bp->pclk);
bp->hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
dev_err(&pdev->dev, "failed to get hclk\n");
goto err_out_put_pclk;
}
- clk_enable(bp->hclk);
+ clk_prepare_enable(bp->hclk);
bp->regs = ioremap(regs->start, resource_size(regs));
if (!bp->regs) {
@@ -1654,9 +1658,9 @@ err_out_free_irq:
err_out_iounmap:
iounmap(bp->regs);
err_out_disable_clocks:
- clk_disable(bp->hclk);
+ clk_disable_unprepare(bp->hclk);
clk_put(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->pclk);
err_out_put_pclk:
clk_put(bp->pclk);
err_out_free_dev:
@@ -1683,9 +1687,9 @@ static int __exit macb_remove(struct platform_device *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(bp->regs);
- clk_disable(bp->hclk);
+ clk_disable_unprepare(bp->hclk);
clk_put(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->pclk);
clk_put(bp->pclk);
free_netdev(dev);
platform_set_drvdata(pdev, NULL);
@@ -1703,8 +1707,8 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
netif_carrier_off(netdev);
netif_device_detach(netdev);
- clk_disable(bp->hclk);
- clk_disable(bp->pclk);
+ clk_disable_unprepare(bp->hclk);
+ clk_disable_unprepare(bp->pclk);
return 0;
}
@@ -1714,8 +1718,8 @@ static int macb_resume(struct platform_device *pdev)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
- clk_enable(bp->pclk);
- clk_enable(bp->hclk);
+ clk_prepare_enable(bp->pclk);
+ clk_prepare_enable(bp->hclk);
netif_device_attach(netdev);
@@ -1737,18 +1741,7 @@ static struct platform_driver macb_driver = {
},
};
-static int __init macb_init(void)
-{
- return platform_driver_probe(&macb_driver, macb_probe);
-}
-
-static void __exit macb_exit(void)
-{
- platform_driver_unregister(&macb_driver);
-}
-
-module_init(macb_init);
-module_exit(macb_exit);
+module_platform_driver_probe(macb_driver, macb_probe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 570908b9357..993d7038068 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -173,6 +173,8 @@
/* Bitfields in DMACFG. */
#define GEM_FBLDO_OFFSET 0
#define GEM_FBLDO_SIZE 5
+#define GEM_ENDIA_OFFSET 7
+#define GEM_ENDIA_SIZE 1
#define GEM_RXBMS_OFFSET 8
#define GEM_RXBMS_SIZE 2
#define GEM_TXPBMS_OFFSET 10
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 48297692515..55fe8c9f048 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -835,7 +835,7 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
struct sk_buff *skb;
dma_addr_t mapping;
- skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+ skb = dev_alloc_skb(q->rx_buffer_size);
if (!skb)
break;
@@ -1046,11 +1046,10 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
const struct freelQ_ce *ce = &fl->centries[fl->cidx];
if (len < copybreak) {
- skb = alloc_skb(len + 2, GFP_ATOMIC);
+ skb = netdev_alloc_skb_ip_align(NULL, len);
if (!skb)
goto use_orig_buf;
- skb_reserve(skb, 2); /* align IP header */
skb_put(skb, len);
pci_dma_sync_single_for_cpu(pdev,
dma_unmap_addr(ce, dma_addr),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6db997c78a5..681804b30a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -54,6 +54,10 @@
#define FW_VERSION_MINOR 1
#define FW_VERSION_MICRO 0
+#define FW_VERSION_MAJOR_T5 0
+#define FW_VERSION_MINOR_T5 0
+#define FW_VERSION_MICRO_T5 0
+
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
enum {
@@ -66,7 +70,9 @@ enum {
enum {
MEM_EDC0,
MEM_EDC1,
- MEM_MC
+ MEM_MC,
+ MEM_MC0 = MEM_MC,
+ MEM_MC1
};
enum {
@@ -74,8 +80,10 @@ enum {
MEMWIN0_BASE = 0x1b800,
MEMWIN1_APERTURE = 32768,
MEMWIN1_BASE = 0x28000,
+ MEMWIN1_BASE_T5 = 0x52000,
MEMWIN2_APERTURE = 65536,
MEMWIN2_BASE = 0x30000,
+ MEMWIN2_BASE_T5 = 0x54000,
};
enum dev_master {
@@ -431,6 +439,7 @@ struct sge_txq {
spinlock_t db_lock;
int db_disabled;
unsigned short db_pidx;
+ u64 udb;
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -504,13 +513,44 @@ struct sge {
struct l2t_data;
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A3,
+
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_FIRST_REV = T5_A1,
+ T5_LAST_REV = T5_A1,
+};
+
+#ifdef CONFIG_PCI_IOV
+
+/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
+ * Configuration initialization for T5 only has SR-IOV functionality enabled
+ * on PF0-3 in order to simplify everything.
+ */
+#define NUM_OF_PF_WITH_SRIOV 4
+
+#endif
+
struct adapter {
void __iomem *regs;
+ void __iomem *bar2;
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
unsigned int fn;
unsigned int flags;
+ enum chip_type chip;
int msg_enable;
@@ -673,6 +713,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_t5(enum chip_type chip)
+{
+ return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
+}
+
+static inline int is_t4(enum chip_type chip)
+{
+ return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+}
+
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
{
return readl(adap->regs + reg_addr);
@@ -858,7 +908,8 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags);
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+ u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e707e31abd8..e76cf035100 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -68,8 +68,8 @@
#include "t4fw_api.h"
#include "l2t.h"
-#define DRV_VERSION "1.3.0-ko"
-#define DRV_DESC "Chelsio T4 Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Network Driver"
/*
* Max interrupt hold-off timer value in us. Queues fall back to this value
@@ -229,11 +229,51 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x440a, 4),
CH_DEVICE(0x440d, 4),
CH_DEVICE(0x440e, 4),
+ CH_DEVICE(0x5001, 5),
+ CH_DEVICE(0x5002, 5),
+ CH_DEVICE(0x5003, 5),
+ CH_DEVICE(0x5004, 5),
+ CH_DEVICE(0x5005, 5),
+ CH_DEVICE(0x5006, 5),
+ CH_DEVICE(0x5007, 5),
+ CH_DEVICE(0x5008, 5),
+ CH_DEVICE(0x5009, 5),
+ CH_DEVICE(0x500A, 5),
+ CH_DEVICE(0x500B, 5),
+ CH_DEVICE(0x500C, 5),
+ CH_DEVICE(0x500D, 5),
+ CH_DEVICE(0x500E, 5),
+ CH_DEVICE(0x500F, 5),
+ CH_DEVICE(0x5010, 5),
+ CH_DEVICE(0x5011, 5),
+ CH_DEVICE(0x5012, 5),
+ CH_DEVICE(0x5013, 5),
+ CH_DEVICE(0x5401, 5),
+ CH_DEVICE(0x5402, 5),
+ CH_DEVICE(0x5403, 5),
+ CH_DEVICE(0x5404, 5),
+ CH_DEVICE(0x5405, 5),
+ CH_DEVICE(0x5406, 5),
+ CH_DEVICE(0x5407, 5),
+ CH_DEVICE(0x5408, 5),
+ CH_DEVICE(0x5409, 5),
+ CH_DEVICE(0x540A, 5),
+ CH_DEVICE(0x540B, 5),
+ CH_DEVICE(0x540C, 5),
+ CH_DEVICE(0x540D, 5),
+ CH_DEVICE(0x540E, 5),
+ CH_DEVICE(0x540F, 5),
+ CH_DEVICE(0x5410, 5),
+ CH_DEVICE(0x5411, 5),
+ CH_DEVICE(0x5412, 5),
+ CH_DEVICE(0x5413, 5),
{ 0, }
};
#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW5_FNAME "cxgb4/t5fw.bin"
#define FW_CFNAME "cxgb4/t4-config.txt"
+#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
@@ -241,6 +281,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE(FW5_FNAME);
/*
* Normally we're willing to become the firmware's Master PF but will be happy
@@ -319,7 +360,10 @@ static bool vf_acls;
module_param(vf_acls, bool, 0644);
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
-static unsigned int num_vf[4];
+/* Configure the number of PCI-E Virtual Function which are to be instantiated
+ * on SR-IOV Capable Physical Functions.
+ */
+static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
@@ -1002,21 +1046,36 @@ freeout: t4_free_sge_resources(adap);
static int upgrade_fw(struct adapter *adap)
{
int ret;
- u32 vers;
+ u32 vers, exp_major;
const struct fw_hdr *hdr;
const struct firmware *fw;
struct device *dev = adap->pdev_dev;
+ char *fw_file_name;
- ret = request_firmware(&fw, FW_FNAME, dev);
+ switch (CHELSIO_CHIP_VERSION(adap->chip)) {
+ case CHELSIO_T4:
+ fw_file_name = FW_FNAME;
+ exp_major = FW_VERSION_MAJOR;
+ break;
+ case CHELSIO_T5:
+ fw_file_name = FW5_FNAME;
+ exp_major = FW_VERSION_MAJOR_T5;
+ break;
+ default:
+ dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
+ return -EINVAL;
+ }
+
+ ret = request_firmware(&fw, fw_file_name, dev);
if (ret < 0) {
- dev_err(dev, "unable to load firmware image " FW_FNAME
- ", error %d\n", ret);
+ dev_err(dev, "unable to load firmware image %s, error %d\n",
+ fw_file_name, ret);
return ret;
}
hdr = (const struct fw_hdr *)fw->data;
vers = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+ if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
ret = -EINVAL; /* wrong major version, won't do */
goto out;
}
@@ -1024,18 +1083,15 @@ static int upgrade_fw(struct adapter *adap)
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
vers > adap->params.fw_vers) {
dev_info(dev, "upgrading firmware ...\n");
ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
/*force=*/false);
if (!ret)
- dev_info(dev, "firmware successfully upgraded to "
- FW_FNAME " (%d.%d.%d.%d)\n",
- FW_HDR_FW_VER_MAJOR_GET(vers),
- FW_HDR_FW_VER_MINOR_GET(vers),
- FW_HDR_FW_VER_MICRO_GET(vers),
- FW_HDR_FW_VER_BUILD_GET(vers));
+ dev_info(dev,
+ "firmware upgraded to version %pI4 from %s\n",
+ &hdr->fw_ver, fw_file_name);
else
dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
} else {
@@ -1308,6 +1364,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
+ "WriteCoalSuccess ",
+ "WriteCoalFail ",
};
static int get_sset_count(struct net_device *dev, int sset)
@@ -1321,10 +1379,15 @@ static int get_sset_count(struct net_device *dev, int sset)
}
#define T4_REGMAP_SIZE (160 * 1024)
+#define T5_REGMAP_SIZE (332 * 1024)
static int get_regs_len(struct net_device *dev)
{
- return T4_REGMAP_SIZE;
+ struct adapter *adap = netdev2adap(dev);
+ if (is_t4(adap->chip))
+ return T4_REGMAP_SIZE;
+ else
+ return T5_REGMAP_SIZE;
}
static int get_eeprom_len(struct net_device *dev)
@@ -1398,11 +1461,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
+ u32 val1, val2;
t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+ data += sizeof(struct queue_port_stats) / sizeof(u64);
+ if (!is_t4(adapter->chip)) {
+ t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
+ val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
+ val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
+ *data = val1 - val2;
+ data++;
+ *data = val2;
+ data++;
+ } else {
+ memset(data, 0, 2 * sizeof(u64));
+ *data += 2;
+ }
}
/*
@@ -1413,7 +1490,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
- return 4 | (ap->params.rev << 10) | (1 << 16);
+ return CHELSIO_CHIP_VERSION(ap->chip) |
+ (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -1428,7 +1506,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
- static const unsigned int reg_ranges[] = {
+ static const unsigned int t4_reg_ranges[] = {
0x1008, 0x1108,
0x1180, 0x11b4,
0x11fc, 0x123c,
@@ -1648,13 +1726,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x27e00, 0x27e04
};
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x1148,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a9c,
+ 0x5b9c, 0x5bfc,
+ 0x6000, 0x6040,
+ 0x6058, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x90f8,
+ 0x9400, 0x9470,
+ 0x9600, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x11088,
+ 0x1109c, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c60,
+ 0x19c94, 0x19e10,
+ 0x19e50, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30144,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x31600,
+ 0x31608, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e04, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32208, 0x3223c,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b70,
+ 0x33000, 0x33048,
+ 0x33060, 0x3309c,
+ 0x330f0, 0x33148,
+ 0x33160, 0x3319c,
+ 0x331f0, 0x332e4,
+ 0x332f8, 0x333e4,
+ 0x333f8, 0x33448,
+ 0x33460, 0x3349c,
+ 0x334f0, 0x33548,
+ 0x33560, 0x3359c,
+ 0x335f0, 0x336e4,
+ 0x336f8, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33948,
+ 0x33960, 0x3399c,
+ 0x339f0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34144,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x35600,
+ 0x35608, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e04, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36208, 0x3623c,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b70,
+ 0x37000, 0x37048,
+ 0x37060, 0x3709c,
+ 0x370f0, 0x37148,
+ 0x37160, 0x3719c,
+ 0x371f0, 0x372e4,
+ 0x372f8, 0x373e4,
+ 0x373f8, 0x37448,
+ 0x37460, 0x3749c,
+ 0x374f0, 0x37548,
+ 0x37560, 0x3759c,
+ 0x375f0, 0x376e4,
+ 0x376f8, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37948,
+ 0x37960, 0x3799c,
+ 0x379f0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38100, 0x38144,
+ 0x38190, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a04,
+ 0x38a0c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38c24,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x39600,
+ 0x39608, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e04, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a208, 0x3a23c,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab70,
+ 0x3b000, 0x3b048,
+ 0x3b060, 0x3b09c,
+ 0x3b0f0, 0x3b148,
+ 0x3b160, 0x3b19c,
+ 0x3b1f0, 0x3b2e4,
+ 0x3b2f8, 0x3b3e4,
+ 0x3b3f8, 0x3b448,
+ 0x3b460, 0x3b49c,
+ 0x3b4f0, 0x3b548,
+ 0x3b560, 0x3b59c,
+ 0x3b5f0, 0x3b6e4,
+ 0x3b6f8, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b948,
+ 0x3b960, 0x3b99c,
+ 0x3b9f0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca04,
+ 0x3ca0c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3cc24,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d600,
+ 0x3d608, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de04, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e208, 0x3e23c,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb70,
+ 0x3f000, 0x3f048,
+ 0x3f060, 0x3f09c,
+ 0x3f0f0, 0x3f148,
+ 0x3f160, 0x3f19c,
+ 0x3f1f0, 0x3f2e4,
+ 0x3f2f8, 0x3f3e4,
+ 0x3f3f8, 0x3f448,
+ 0x3f460, 0x3f49c,
+ 0x3f4f0, 0x3f548,
+ 0x3f560, 0x3f59c,
+ 0x3f5f0, 0x3f6e4,
+ 0x3f6f8, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f948,
+ 0x3f960, 0x3f99c,
+ 0x3f9f0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40068,
+ 0x40080, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40298,
+ 0x402ac, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41300, 0x413c4,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44078,
+ 0x440c0, 0x44278,
+ 0x442c0, 0x44478,
+ 0x444c0, 0x44678,
+ 0x446c0, 0x44878,
+ 0x448c0, 0x449fc,
+ 0x45000, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48068,
+ 0x48080, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48298,
+ 0x482ac, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49300, 0x493c4,
+ 0x49400, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c078,
+ 0x4c0c0, 0x4c278,
+ 0x4c2c0, 0x4c478,
+ 0x4c4c0, 0x4c678,
+ 0x4c6c0, 0x4c878,
+ 0x4c8c0, 0x4c9fc,
+ 0x4d000, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
int i;
struct adapter *ap = netdev2adap(dev);
+ static const unsigned int *reg_ranges;
+ int arr_size = 0, buf_size = 0;
+
+ if (is_t4(ap->chip)) {
+ reg_ranges = &t4_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t4_reg_ranges);
+ buf_size = T4_REGMAP_SIZE;
+ } else {
+ reg_ranges = &t5_reg_ranges[0];
+ arr_size = ARRAY_SIZE(t5_reg_ranges);
+ buf_size = T5_REGMAP_SIZE;
+ }
regs->version = mk_adap_vers(ap);
- memset(buf, 0, T4_REGMAP_SIZE);
- for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+ memset(buf, 0, buf_size);
+ for (i = 0; i < arr_size; i += 2)
reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
}
@@ -2363,8 +2880,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
int ret, ofst;
__be32 data[16];
- if (mem == MEM_MC)
- ret = t4_mc_read(adap, pos, data, NULL);
+ if ((mem == MEM_MC) || (mem == MEM_MC1))
+ ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
else
ret = t4_edc_read(adap, mem, pos, data, NULL);
if (ret)
@@ -2405,18 +2922,37 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
static int setup_debugfs(struct adapter *adap)
{
int i;
+ u32 size;
if (IS_ERR_OR_NULL(adap->debugfs_root))
return -1;
i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
- if (i & EDRAM0_ENABLE)
- add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
- if (i & EDRAM1_ENABLE)
- add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
- if (i & EXT_MEM_ENABLE)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+ if (i & EDRAM0_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM0_BAR);
+ add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
+ }
+ if (i & EDRAM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EDRAM1_BAR);
+ add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
+ }
+ if (is_t4(adap->chip)) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ if (i & EXT_MEM_ENABLE)
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(size));
+ } else {
+ if (i & EXT_MEM_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
+ add_debugfs_mem(adap, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_GET(size));
+ }
+ if (i & EXT_MEM1_ENABLE) {
+ size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
+ add_debugfs_mem(adap, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_GET(size));
+ }
+ }
if (adap->l2t)
debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
&t4_l2t_fops);
@@ -2747,10 +3283,18 @@ EXPORT_SYMBOL(cxgb4_port_chan);
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
{
struct adapter *adap = netdev2adap(dev);
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+ return lpfifo ? lp_count : hp_count;
}
EXPORT_SYMBOL(cxgb4_dbfifo_count);
@@ -2853,6 +3397,25 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
+void cxgb4_disable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
+ F_NOCOALESCE);
+}
+EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
+
+void cxgb4_enable_db_coalescing(struct net_device *dev)
+{
+ struct adapter *adap;
+
+ adap = netdev2adap(dev);
+ t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
+}
+EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
+
static struct pci_driver cxgb4_driver;
static void check_neigh_update(struct neighbour *neigh)
@@ -2888,14 +3451,23 @@ static struct notifier_block cxgb4_netevent_nb = {
static void drain_db_fifo(struct adapter *adap, int usecs)
{
- u32 v;
+ u32 v1, v2, lp_count, hp_count;
do {
+ v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
+ v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
+ if (is_t4(adap->chip)) {
+ lp_count = G_LP_COUNT(v1);
+ hp_count = G_HP_COUNT(v1);
+ } else {
+ lp_count = G_LP_COUNT_T5(v1);
+ hp_count = G_HP_COUNT_T5(v2);
+ }
+
+ if (lp_count == 0 && hp_count == 0)
+ break;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(usecs_to_jiffies(usecs));
- v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
- if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
- break;
} while (1);
}
@@ -3004,24 +3576,62 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
+ if (is_t4(adap->chip)) {
+ disable_dbs(adap);
+ notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
+ drain_db_fifo(adap, 1);
+ recover_all_queues(adap);
+ enable_dbs(adap);
+ } else {
+ u32 dropped_db = t4_read_reg(adap, 0x010ac);
+ u16 qid = (dropped_db >> 15) & 0x1ffff;
+ u16 pidx_inc = dropped_db & 0x1fff;
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+ u32 udb;
+
+ dev_warn(adap->pdev_dev,
+ "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
+ dropped_db, qid,
+ (dropped_db >> 14) & 1,
+ (dropped_db >> 13) & 1,
+ pidx_inc);
+
+ drain_db_fifo(adap, 1);
+
+ s_qpp = QUEUESPERPAGEPF1 * adap->fn;
+ udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ udb = qid << qpshift;
+ udb &= PAGE_MASK;
+ page = udb / PAGE_SIZE;
+ udb += (qid - (page * udb_density)) * 128;
+
+ writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
+
+ /* Re-enable BAR2 WC */
+ t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
+ }
+
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
- disable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, 1);
- recover_all_queues(adap);
- enable_dbs(adap);
}
void t4_db_full(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_INT_ENABLE3,
- DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
- queue_work(workq, &adap->db_full_task);
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, SGE_INT_ENABLE3,
+ DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
+ queue_work(workq, &adap->db_full_task);
+ }
}
void t4_db_dropped(struct adapter *adap)
{
- queue_work(workq, &adap->db_drop_task);
+ if (is_t4(adap->chip))
+ queue_work(workq, &adap->db_drop_task);
}
static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3566,17 +4176,27 @@ void t4_fatal_err(struct adapter *adap)
static void setup_memwin(struct adapter *adap)
{
- u32 bar0;
+ u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
+ if (is_t4(adap->chip)) {
+ mem_win0_base = bar0 + MEMWIN0_BASE;
+ mem_win1_base = bar0 + MEMWIN1_BASE;
+ mem_win2_base = bar0 + MEMWIN2_BASE;
+ } else {
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+ mem_win1_base = MEMWIN1_BASE_T5;
+ mem_win2_base = MEMWIN2_BASE_T5;
+ }
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
- (bar0 + MEMWIN0_BASE) | BIR(0) |
+ mem_win0_base | BIR(0) |
WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
- (bar0 + MEMWIN1_BASE) | BIR(0) |
+ mem_win1_base | BIR(0) |
WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
- (bar0 + MEMWIN2_BASE) | BIR(0) |
+ mem_win2_base | BIR(0) |
WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
}
@@ -3745,6 +4365,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
int ret, using_flash;
+ char *fw_config_file, fw_config_file_path[256];
/*
* Reset device if necessary.
@@ -3761,7 +4382,21 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
- ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ fw_config_file = FW_CFNAME;
+ break;
+ case CHELSIO_T5:
+ fw_config_file = FW5_CFNAME;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ adapter->pdev->device);
+ ret = -EINVAL;
+ goto bye;
+ }
+
+ ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
using_flash = 1;
mtype = FW_MEMTYPE_CF_FLASH;
@@ -3877,6 +4512,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
+ sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
@@ -3887,7 +4523,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
"Configuration File %s, version %#x, computed checksum %#x\n",
(using_flash
? "in device FLASH"
- : "/lib/firmware/" FW_CFNAME),
+ : fw_config_file_path),
finiver, cfcsum);
return 0;
@@ -4814,7 +5450,8 @@ static void print_port_info(const struct net_device *dev)
sprintf(bufp, "BASE-%s", base[pi->port_type]);
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id, adap->params.rev, buf,
+ adap->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
@@ -4854,10 +5491,11 @@ static void free_some_resources(struct adapter *adapter)
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+#define SEGMENT_SIZE 128
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int func, i, err;
+ int func, i, err, s_qpp, qpp, num_seg;
struct port_info *pi;
bool highdma = false;
struct adapter *adapter = NULL;
@@ -4934,7 +5572,34 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = t4_prep_adapter(adapter);
if (err)
- goto out_unmap_bar;
+ goto out_unmap_bar0;
+
+ if (!is_t4(adapter->chip)) {
+ s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
+ qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
+ num_seg = PAGE_SIZE / SEGMENT_SIZE;
+
+ /* Each segment size is 128B. Write coalescing is enabled only
+ * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
+ * queue is less no of segments that can be accommodated in
+ * a page size.
+ */
+ if (qpp > num_seg) {
+ dev_err(&pdev->dev,
+ "Incorrect number of egress queues per page\n");
+ err = -EINVAL;
+ goto out_unmap_bar0;
+ }
+ adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ if (!adapter->bar2) {
+ dev_err(&pdev->dev, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+ }
+
setup_memwin(adapter);
err = adap_init0(adapter);
setup_memwin_rdma(adapter);
@@ -5063,6 +5728,9 @@ sriov:
out_free_dev:
free_some_resources(adapter);
out_unmap_bar:
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
+ out_unmap_bar0:
iounmap(adapter->regs);
out_free_adapter:
kfree(adapter);
@@ -5113,6 +5781,8 @@ static void remove_one(struct pci_dev *pdev)
free_some_resources(adapter);
iounmap(adapter->regs);
+ if (!is_t4(adapter->chip))
+ iounmap(adapter->bar2);
kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e2bbc7f3e2d..4faf4d067ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -269,4 +269,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
+void cxgb4_disable_db_coalescing(struct net_device *dev);
+void cxgb4_enable_db_coalescing(struct net_device *dev);
+
#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fe9a2ea3588..8b47b253e20 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -506,10 +506,14 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
+ u32 val;
if (q->pend_cred >= 8) {
+ val = PIDX(q->pend_cred / 8);
+ if (!is_t4(adap->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
- QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+ QID(q->cntxt_id) | val);
q->pend_cred &= 7;
}
}
@@ -812,6 +816,22 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*end = 0;
}
+/* This function copies 64 byte coalesced work request to
+ * memory mapped BAR2 space(user space writes).
+ * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
+ */
+static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
+{
+ int count = 8;
+
+ while (count) {
+ writeq(*src, dst);
+ src++;
+ dst++;
+ count--;
+ }
+}
+
/**
* ring_tx_db - check and potentially ring a Tx queue's doorbell
* @adap: the adapter
@@ -822,11 +842,25 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
{
+ unsigned int *wr, index;
+
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
- QID(q->cntxt_id) | PIDX(n));
+ if (is_t4(adap->chip)) {
+ t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+ QID(q->cntxt_id) | PIDX(n));
+ } else {
+ if (n == 1) {
+ index = q->pidx ? (q->pidx - 1) : (q->size - 1);
+ wr = (unsigned int *)&q->desc[index];
+ cxgb_pio_copy((u64 __iomem *)
+ (adap->bar2 + q->udb + 64),
+ (u64 *)wr);
+ } else
+ writel(n, adap->bar2 + q->udb + 8);
+ wmb();
+ }
}
q->db_pidx = q->pidx;
spin_unlock(&q->db_lock);
@@ -1555,7 +1589,6 @@ static noinline int handle_trace_pkt(struct adapter *adap,
const struct pkt_gl *gl)
{
struct sk_buff *skb;
- struct cpl_trace_pkt *p;
skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
if (unlikely(!skb)) {
@@ -1563,8 +1596,11 @@ static noinline int handle_trace_pkt(struct adapter *adap,
return 0;
}
- p = (struct cpl_trace_pkt *)skb->data;
- __skb_pull(skb, sizeof(*p));
+ if (is_t4(adap->chip))
+ __skb_pull(skb, sizeof(struct cpl_trace_pkt));
+ else
+ __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
+
skb_reset_mac_header(skb);
skb->protocol = htons(0xffff);
skb->dev = adap->port[0];
@@ -1625,8 +1661,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct sge *s = &q->adap->sge;
+ int cpl_trace_pkt = is_t4(q->adap->chip) ?
+ CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
- if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+ if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
@@ -2143,11 +2181,27 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
+ q->cntxt_id = id;
+ if (!is_t4(adap->chip)) {
+ unsigned int s_qpp;
+ unsigned short udb_density;
+ unsigned long qpshift;
+ int page;
+
+ s_qpp = QUEUESPERPAGEPF1 * adap->fn;
+ udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
+ SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
+ qpshift = PAGE_SHIFT - ilog2(udb_density);
+ q->udb = q->cntxt_id << qpshift;
+ q->udb &= PAGE_MASK;
+ page = q->udb / PAGE_SIZE;
+ q->udb += (q->cntxt_id - (page * udb_density)) * 128;
+ }
+
q->in_use = 0;
q->cidx = q->pidx = 0;
q->stops = q->restarts = 0;
q->stat = (void *)&q->desc[q->size];
- q->cntxt_id = id;
spin_lock_init(&q->db_lock);
adap->sge.egr_map[id - adap->sge.egr_start] = q;
}
@@ -2587,11 +2641,20 @@ static int t4_sge_init_hard(struct adapter *adap)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
- V_HP_INT_THRESH(M_HP_INT_THRESH) |
- V_LP_INT_THRESH(M_LP_INT_THRESH),
- V_HP_INT_THRESH(dbfifo_int_thresh) |
- V_LP_INT_THRESH(dbfifo_int_thresh));
+ if (is_t4(adap->chip)) {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_HP_INT_THRESH(M_HP_INT_THRESH) |
+ V_LP_INT_THRESH(M_LP_INT_THRESH),
+ V_HP_INT_THRESH(dbfifo_int_thresh) |
+ V_LP_INT_THRESH(dbfifo_int_thresh));
+ } else {
+ t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
+ V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
+ V_LP_INT_THRESH_T5(dbfifo_int_thresh));
+ t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
+ V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
+ V_HP_INT_THRESH_T5(dbfifo_int_thresh));
+ }
t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
F_ENABLE_DROP);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8049268ce0f..d02d4e8c441 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -282,6 +282,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
* t4_mc_read - read from MC through backdoor accesses
* @adap: the adapter
* @addr: address of first byte requested
+ * @idx: which MC to access
* @data: 64 bytes of data containing the requested address
* @ecc: where to store the corresponding 64-bit ECC word
*
@@ -289,22 +290,38 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
* that covers the requested address @addr. If @parity is not %NULL it
* is assigned the 64-bit ECC word for the read data.
*/
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
+ u32 mc_bist_status_rdata, mc_bist_data_pattern;
+
+ if (is_t4(adap->chip)) {
+ mc_bist_cmd = MC_BIST_CMD;
+ mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
+ mc_bist_cmd_len = MC_BIST_CMD_LEN;
+ mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
+ mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
+ } else {
+ mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
+ mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
+ mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
+ mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
+ mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+ }
- if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
+ if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
return -EBUSY;
- t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
- t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
- t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
- t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
+ t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
+ t4_write_reg(adap, mc_bist_cmd_len, 64);
+ t4_write_reg(adap, mc_bist_data_pattern, 0xc);
+ t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
BIST_CMD_GAP(1));
- i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
+ i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
if (i)
return i;
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
@@ -329,20 +346,39 @@ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
+ u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
+ u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
+
+ if (is_t4(adap->chip)) {
+ edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
+ edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
+ idx);
+ edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
+ idx);
+ } else {
+ edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern =
+ EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+ edc_bist_status_rdata =
+ EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+ }
- idx *= EDC_STRIDE;
- if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
+ if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
return -EBUSY;
- t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
- t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
- t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
- t4_write_reg(adap, EDC_BIST_CMD + idx,
+ t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
+ t4_write_reg(adap, edc_bist_cmd_len, 64);
+ t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
+ t4_write_reg(adap, edc_bist_cmd,
BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
- i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
+ i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
if (i)
return i;
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
@@ -366,6 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
{
int i;
+ u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
/*
* Setup offset into PCIE memory window. Address must be a
@@ -374,7 +411,7 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
* values.)
*/
t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
- addr & ~(MEMWIN0_APERTURE - 1));
+ (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
/* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
@@ -410,6 +447,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
__be32 *buf, int dir)
{
u32 pos, start, end, offset, memoffset;
+ u32 edc_size, mc_size;
int ret = 0;
__be32 *data;
@@ -423,13 +461,21 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
if (!data)
return -ENOMEM;
- /*
- * Offset into the region of memory which is being accessed
+ /* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
- * MEM_MC = 2
+ * MEM_MC = 2 -- T4
+ * MEM_MC0 = 2 -- For T5
+ * MEM_MC1 = 3 -- For T5
*/
- memoffset = (mtype * (5 * 1024 * 1024));
+ edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
+ MA_EXT_MEMORY_BAR));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
/* Determine the PCIE_MEM_ACCESS_OFFSET */
addr = addr + memoffset;
@@ -497,9 +543,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
}
#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_LEN 512
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
+#define VPD_LEN 1024
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
@@ -856,6 +902,7 @@ int t4_check_fw_version(struct adapter *adapter)
{
u32 api_vers[2];
int ret, major, minor, micro;
+ int exp_major, exp_minor, exp_micro;
ret = get_fw_version(adapter, &adapter->params.fw_vers);
if (!ret)
@@ -870,17 +917,35 @@ int t4_check_fw_version(struct adapter *adapter)
major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+
+ switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ case CHELSIO_T4:
+ exp_major = FW_VERSION_MAJOR;
+ exp_minor = FW_VERSION_MINOR;
+ exp_micro = FW_VERSION_MICRO;
+ break;
+ case CHELSIO_T5:
+ exp_major = FW_VERSION_MAJOR_T5;
+ exp_minor = FW_VERSION_MINOR_T5;
+ exp_micro = FW_VERSION_MICRO_T5;
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
+ adapter->chip);
+ return -EINVAL;
+ }
+
memcpy(adapter->params.api_vers, api_vers,
sizeof(adapter->params.api_vers));
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ if (major != exp_major) { /* major mismatch - fail */
dev_err(adapter->pdev_dev,
"card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR);
+ major, exp_major);
return -EINVAL;
}
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ if (minor == exp_minor && micro == exp_micro)
return 0; /* perfect match */
/* Minor/micro version mismatch. Report it but often it's OK. */
@@ -1246,6 +1311,45 @@ static void pcie_intr_handler(struct adapter *adapter)
{ 0 }
};
+ static struct intr_info t5_pcie_intr_info[] = {
+ { MSTGRPPERR, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
+ { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR, "Outbound read error", -1, 0 },
+ { 0 }
+ };
+
int fat;
fat = t4_handle_intr_status(adapter,
@@ -1254,7 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter)
t4_handle_intr_status(adapter,
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
- t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
+ t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
+ is_t4(adapter->chip) ?
+ pcie_intr_info : t5_pcie_intr_info);
+
if (fat)
t4_fatal_err(adapter);
}
@@ -1664,7 +1771,14 @@ static void ncsi_intr_handler(struct adapter *adap)
*/
static void xgmac_intr_handler(struct adapter *adap, int port)
{
- u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ u32 v, int_cause_reg;
+
+ if (is_t4(adap->chip))
+ int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
+ else
+ int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
+
+ v = t4_read_reg(adap, int_cause_reg);
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
if (!v)
@@ -2126,7 +2240,9 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
u32 bgmap = get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
- t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
+ t4_read_reg64(adap, \
+ (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
p->tx_octets = GET_STAT(TX_PORT_BYTES);
@@ -2205,14 +2321,26 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr)
{
+ u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
+
+ if (is_t4(adap->chip)) {
+ mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
+ }
+
if (addr) {
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
+ t4_write_reg(adap, mag_id_reg_l,
(addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | addr[5]);
- t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
+ t4_write_reg(adap, mag_id_reg_h,
(addr[0] << 8) | addr[1]);
}
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
+ t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
addr ? MAGICEN : 0);
}
@@ -2235,16 +2363,23 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
u64 mask0, u64 mask1, unsigned int crc, bool enable)
{
int i;
+ u32 port_cfg_reg;
+
+ if (is_t4(adap->chip))
+ port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
+ else
+ port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
if (!enable) {
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
- PATEN, 0);
+ t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
-#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
+#define EPIO_REG(name) \
+ (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
+ T5_PORT_REG(port, MAC_PORT_EPIO_##name))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2322,24 +2457,24 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
* @addr: address of first byte requested aligned on 32b.
* @data: len bytes to hold the data read
* @len: amount of data to read from window. Must be <=
- * MEMWIN0_APERATURE after adjusting for 16B alignment
- * requirements of the the memory window.
+ * MEMWIN0_APERATURE after adjusting for 16B for T4 and
+ * 128B for T5 alignment requirements of the the memory window.
*
* Read len bytes of data from MC starting at @addr.
*/
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
{
- int i;
- int off;
+ int i, off;
+ u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
- /*
- * Align on a 16B boundary.
+ /* Align on a 2KB boundary.
*/
- off = addr & 15;
+ off = addr & MEMWIN0_APERTURE;
if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
return -EINVAL;
- t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15);
+ t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
+ (addr & ~MEMWIN0_APERTURE) | win_pf);
t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
for (i = 0; i < len; i += 4)
@@ -3162,6 +3297,9 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
int i, ret;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
+ unsigned int max_naddr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (naddr > 7)
return -EINVAL;
@@ -3187,8 +3325,8 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
if (idx)
- idx[i] = index >= NEXACT_MAC ? 0xffff : index;
- if (index < NEXACT_MAC)
+ idx[i] = index >= max_naddr ? 0xffff : index;
+ if (index < max_naddr)
ret++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[i]));
@@ -3221,6 +3359,9 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
+ unsigned int max_mac_addr = is_t4(adap->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
@@ -3238,7 +3379,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
- if (ret >= NEXACT_MAC)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -3547,7 +3688,8 @@ static int get_flash_params(struct adapter *adap)
*/
int t4_prep_adapter(struct adapter *adapter)
{
- int ret;
+ int ret, ver;
+ uint16_t device_id;
ret = t4_wait_dev_ready(adapter);
if (ret < 0)
@@ -3562,6 +3704,28 @@ int t4_prep_adapter(struct adapter *adapter)
return ret;
}
+ /* Retrieve adapter's device ID
+ */
+ pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
+ ver = device_id >> 12;
+ switch (ver) {
+ case CHELSIO_T4:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
+ adapter->params.rev);
+ break;
+ case CHELSIO_T5:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
+ adapter->params.rev);
+ break;
+ default:
+ dev_err(adapter->pdev_dev, "Device %d is not supported\n",
+ device_id);
+ return -EINVAL;
+ }
+
+ /* Reassign the updated revision field */
+ adapter->params.rev = adapter->chip;
+
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index f534ed7e10e..1d1623be9f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
- NEXACT_MAC = 336, /* # of exact MAC address filters */
L2T_SIZE = 4096, /* # of L2T entries */
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 261d17703ad..47656ac1ac2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -74,6 +74,7 @@ enum {
CPL_PASS_ESTABLISH = 0x41,
CPL_RX_DATA_DDP = 0x42,
CPL_PASS_ACCEPT_REQ = 0x44,
+ CPL_TRACE_PKT_T5 = 0x48,
CPL_RDMA_READ_REQ = 0x60,
@@ -287,6 +288,23 @@ struct cpl_act_open_req {
__be32 opt2;
};
+#define S_FILTER_TUPLE 24
+#define M_FILTER_TUPLE 0xFFFFFFFFFF
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+struct cpl_t5_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+};
+
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -566,6 +584,11 @@ struct cpl_rx_pkt {
#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
+#define S_RX_T5_ETHHDR_LEN 0
+#define M_RX_T5_ETHHDR_LEN 0x3F
+#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
+#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
+
#define S_RX_MACIDX 8
#define M_RX_MACIDX 0x1FF
#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
@@ -612,6 +635,28 @@ struct cpl_trace_pkt {
__be64 tstamp;
};
+struct cpl_t5_trace_pkt {
+ __u8 opcode;
+ __u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 runt:4;
+ __u8 filter_hit:4;
+ __u8:6;
+ __u8 err:1;
+ __u8 trunc:1;
+#else
+ __u8 filter_hit:4;
+ __u8 runt:4;
+ __u8 trunc:1;
+ __u8 err:1;
+ __u8:6;
+#endif
+ __be16 rsvd;
+ __be16 len;
+ __be64 tstamp;
+ __be64 rsvd1;
+};
+
struct cpl_l2t_write_req {
WR_HDR;
union opcode_tid ot;
@@ -742,4 +787,12 @@ struct ulp_mem_io {
#define ULP_MEMIO_LOCK(x) ((x) << 31)
};
+#define S_T5_ULP_MEMIO_IMM 23
+#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
+#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
+
+#define S_T5_ULP_MEMIO_ORDER 22
+#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
+#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
+
#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 83ec5f7844a..ef146c0ba48 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -68,9 +68,14 @@
#define QID_SHIFT 15
#define QID(x) ((x) << QID_SHIFT)
#define DBPRIO(x) ((x) << 14)
+#define DBTYPE(x) ((x) << 13)
#define PIDX_MASK 0x00003fffU
#define PIDX_SHIFT 0
#define PIDX(x) ((x) << PIDX_SHIFT)
+#define S_PIDX_T5 0
+#define M_PIDX_T5 0x1fffU
+#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+
#define SGE_PF_GTS 0x4
#define INGRESSQID_MASK 0xffff0000U
@@ -152,6 +157,8 @@
#define QUEUESPERPAGEPF0_MASK 0x0000000fU
#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+#define QUEUESPERPAGEPF1 4
+
#define SGE_INT_CAUSE1 0x1024
#define SGE_INT_CAUSE2 0x1030
#define SGE_INT_CAUSE3 0x103c
@@ -234,6 +241,10 @@
#define SGE_DOORBELL_CONTROL 0x10a8
#define ENABLE_DROP (1 << 13)
+#define S_NOCOALESCE 26
+#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
+#define F_NOCOALESCE V_NOCOALESCE(1U)
+
#define SGE_TIMER_VALUE_0_AND_1 0x10b8
#define TIMERVALUE0_MASK 0xffff0000U
#define TIMERVALUE0_SHIFT 16
@@ -272,17 +283,36 @@
#define S_HP_INT_THRESH 28
#define M_HP_INT_THRESH 0xfU
#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
+#define S_LP_INT_THRESH_T5 18
+#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
+#define M_LP_COUNT_T5 0x3ffffU
+#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
#define M_HP_COUNT 0x7ffU
#define S_HP_COUNT 16
#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
#define S_LP_INT_THRESH 12
#define M_LP_INT_THRESH 0xfU
+#define M_LP_INT_THRESH_T5 0xfffU
#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
#define M_LP_COUNT 0x7ffU
#define S_LP_COUNT 0
#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
#define A_SGE_DBFIFO_STATUS 0x10a4
+#define SGE_STAT_TOTAL 0x10e4
+#define SGE_STAT_MATCH 0x10e8
+
+#define SGE_STAT_CFG 0x10ec
+#define S_STATSOURCE_T5 9
+#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
+
+#define SGE_DBFIFO_STATUS2 0x1118
+#define M_HP_COUNT_T5 0x3ffU
+#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
+#define S_HP_INT_THRESH_T5 10
+#define M_HP_INT_THRESH_T5 0xfU
+#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
+
#define S_ENABLE_DROP 13
#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
#define F_ENABLE_DROP V_ENABLE_DROP(1U)
@@ -331,8 +361,27 @@
#define MSIADDRHPERR 0x00000002U
#define MSIADDRLPERR 0x00000001U
+#define READRSPERR 0x20000000U
+#define TRGT1GRPPERR 0x10000000U
+#define IPSOTPERR 0x08000000U
+#define IPRXDATAGRPPERR 0x02000000U
+#define IPRXHDRGRPPERR 0x01000000U
+#define MAGRPPERR 0x00400000U
+#define VFIDPERR 0x00200000U
+#define HREQWRPERR 0x00010000U
+#define DREQWRPERR 0x00002000U
+#define MSTTAGQPERR 0x00000400U
+#define PIOREQGRPPERR 0x00000100U
+#define PIOCPLGRPPERR 0x00000080U
+#define MSIXSTIPERR 0x00000004U
+#define MSTTIMEOUTPERR 0x00000002U
+#define MSTGRPPERR 0x00000001U
+
#define PCIE_NONFAT_ERR 0x3010
#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
+#define S_PCIEOFST 10
+#define M_PCIEOFST 0x3fffffU
+#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
#define PCIEOFST_MASK 0xfffffc00U
#define BIR_MASK 0x00000300U
#define BIR_SHIFT 8
@@ -342,6 +391,9 @@
#define WINDOW(x) ((x) << WINDOW_SHIFT)
#define PCIE_MEM_ACCESS_OFFSET 0x306c
+#define S_PFNUM 0
+#define V_PFNUM(x) ((x) << S_PFNUM)
+
#define PCIE_FW 0x30b8
#define PCIE_FW_ERR 0x80000000U
#define PCIE_FW_INIT 0x40000000U
@@ -407,12 +459,18 @@
#define MC_BIST_STATUS_RDATA 0x7688
+#define MA_EDRAM0_BAR 0x77c0
+#define MA_EDRAM1_BAR 0x77c4
+#define EDRAM_SIZE_MASK 0xfffU
+#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK)
+
#define MA_EXT_MEMORY_BAR 0x77c8
#define EXT_MEM_SIZE_MASK 0x00000fffU
#define EXT_MEM_SIZE_SHIFT 0
#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
#define MA_TARGET_MEM_ENABLE 0x77d8
+#define EXT_MEM1_ENABLE 0x00000010U
#define EXT_MEM_ENABLE 0x00000004U
#define EDRAM1_ENABLE 0x00000002U
#define EDRAM0_ENABLE 0x00000001U
@@ -431,6 +489,7 @@
#define MA_PCIE_FW 0x30b8
#define MA_PARITY_ERROR_STATUS 0x77f4
+#define MA_EXT_MEMORY1_BAR 0x7808
#define EDC_0_BASE_ADDR 0x7900
#define EDC_BIST_CMD 0x7904
@@ -801,6 +860,15 @@
#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define MAC_PORT_CFG2 0x818
+#define MAC_PORT_MAGIC_MACID_LO 0x824
+#define MAC_PORT_MAGIC_MACID_HI 0x828
+#define MAC_PORT_EPIO_DATA0 0x8c0
+#define MAC_PORT_EPIO_DATA1 0x8c4
+#define MAC_PORT_EPIO_DATA2 0x8c8
+#define MAC_PORT_EPIO_DATA3 0x8cc
+#define MAC_PORT_EPIO_OP 0x8d0
+
#define MPS_CMN_CTL 0x9000
#define NUMPORTS_MASK 0x00000003U
#define NUMPORTS_SHIFT 0
@@ -1063,6 +1131,7 @@
#define ADDRESS_SHIFT 0
#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
+#define MAC_PORT_INT_CAUSE 0x8dc
#define XGMAC_PORT_INT_CAUSE 0x10dc
#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
@@ -1101,4 +1170,33 @@
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
+#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MC_0_BASE_ADDR 0x40000
+#define MC_1_BASE_ADDR 0x48000
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_P_BIST_CMD 0x41400
+#define MC_P_BIST_CMD_ADDR 0x41404
+#define MC_P_BIST_CMD_LEN 0x41408
+#define MC_P_BIST_DATA_PATTERN 0x4140c
+#define MC_P_BIST_STATUS_RDATA 0x41488
+#define EDC_T50_BASE_ADDR 0x50000
+#define EDC_H_BIST_CMD 0x50004
+#define EDC_H_BIST_CMD_ADDR 0x50008
+#define EDC_H_BIST_CMD_LEN 0x5000c
+#define EDC_H_BIST_DATA_PATTERN 0x50010
+#define EDC_H_BIST_STATUS_RDATA 0x50028
+
+#define EDC_T51_BASE_ADDR 0x50800
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a0dcccd846c..93444325b1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -574,7 +574,7 @@ struct fw_eth_tx_pkt_vm_wr {
__be16 vlantci;
};
-#define FW_CMD_MAX_TIMEOUT 3000
+#define FW_CMD_MAX_TIMEOUT 10000
/*
* If a host driver does a HELLO and discovers that there's already a MASTER
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7..be5c7ef6ca9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,6 +344,7 @@ struct adapter {
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
+ enum chip_type chip;
struct adapter_params params;
/* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 56b46ab2d4c..7fcac200376 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -54,8 +54,8 @@
/*
* Generic information about the driver.
*/
-#define DRV_VERSION "1.0.0"
-#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
+#define DRV_VERSION "2.0.0-ko"
+#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
/*
* Module Parameters.
@@ -1050,7 +1050,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
/*
* Chip version 4, revision 0x3f (cxgb4vf).
*/
- return 4 | (0x3f << 10);
+ return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
}
/*
@@ -2099,6 +2099,15 @@ static int adap_init0(struct adapter *adapter)
return err;
}
+ switch (adapter->pdev->device >> 12) {
+ case CHELSIO_T4:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ break;
+ case CHELSIO_T5:
+ adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
+ break;
+ }
+
/*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
@@ -2888,6 +2897,26 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x480a, 0), /* T404-bt */
CH_DEVICE(0x480d, 0), /* T480-cr */
CH_DEVICE(0x480e, 0), /* T440-lp-cr */
+ CH_DEVICE(0x5800, 0), /* T580-dbg */
+ CH_DEVICE(0x5801, 0), /* T520-cr */
+ CH_DEVICE(0x5802, 0), /* T522-cr */
+ CH_DEVICE(0x5803, 0), /* T540-cr */
+ CH_DEVICE(0x5804, 0), /* T520-bch */
+ CH_DEVICE(0x5805, 0), /* T540-bch */
+ CH_DEVICE(0x5806, 0), /* T540-ch */
+ CH_DEVICE(0x5807, 0), /* T520-so */
+ CH_DEVICE(0x5808, 0), /* T520-cx */
+ CH_DEVICE(0x5809, 0), /* T520-bt */
+ CH_DEVICE(0x580a, 0), /* T504-bt */
+ CH_DEVICE(0x580b, 0), /* T520-sr */
+ CH_DEVICE(0x580c, 0), /* T504-bt */
+ CH_DEVICE(0x580d, 0), /* T580-cr */
+ CH_DEVICE(0x580e, 0), /* T540-lp-cr */
+ CH_DEVICE(0x580f, 0), /* Amsterdam */
+ CH_DEVICE(0x5810, 0), /* T580-lp-cr */
+ CH_DEVICE(0x5811, 0), /* T520-lp-cr */
+ CH_DEVICE(0x5812, 0), /* T560-cr */
+ CH_DEVICE(0x5813, 0), /* T580-cr */
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9488032d6d2..61dfb2a4792 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -528,17 +528,21 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
+ u32 val;
+
/*
* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
* of multiples of Free List Entries per Egress Queue Units ...
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
+ val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
+ if (!is_t4(adapter->chip))
+ val |= DBTYPE(1);
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
DBPRIO(1) |
- QID(fl->cntxt_id) |
- PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
+ QID(fl->cntxt_id) | val);
fl->pend_cred %= FL_PER_EQ_UNIT;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 283f9d0d37f..53cbfed21d0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -38,6 +38,25 @@
#include "../cxgb4/t4fw_api.h"
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A3,
+
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_FIRST_REV = T5_A1,
+ T5_LAST_REV = T5_A1,
+};
+
/*
* The "len16" field of a Firmware Command Structure ...
*/
@@ -232,6 +251,11 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
}
+static inline int is_t4(enum chip_type chip)
+{
+ return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+}
+
int t4vf_wait_dev_ready(struct adapter *);
int t4vf_port_init(struct adapter *, int);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 7127c7b9efd..9f96dc3bb11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,8 +1027,11 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
+ unsigned int max_naddr = is_t4(adapter->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
- if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
+ if (naddr > max_naddr)
return -EINVAL;
for (offset = 0; offset < naddr; /**/) {
@@ -1069,10 +1072,10 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
if (idx)
idx[offset+i] =
- (index >= FW_CLS_TCAM_NUM_ENTRIES
+ (index >= max_naddr
? 0xffff
: index);
- if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ if (index < max_naddr)
nfilters++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
@@ -1118,6 +1121,9 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
+ unsigned int max_naddr = is_t4(adapter->chip) ?
+ NUM_MPS_CLS_SRAM_L_INSTANCES :
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
/*
* If this is a new allocation, determine whether it should be
@@ -1140,7 +1146,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
- if (ret >= FW_CLS_TCAM_NUM_ENTRIES)
+ if (ret >= max_naddr)
ret = -ENOMEM;
}
return ret;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 13844695778..19f642a45f4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -101,23 +101,6 @@ static char version[] __initdata =
* them to system IRQ numbers. This mapping is card specific and is set to
* the configuration of the Cirrus Eval board for this chip.
*/
-#if defined(CONFIG_MACH_IXDP2351)
-#define CS89x0_NONISA_IRQ
-static unsigned int netcard_portlist[] __used __initdata = {
- IXDP2351_VIRT_CS8900_BASE, 0
-};
-static unsigned int cs8900_irq_map[] = {
- IRQ_IXDP2351_CS8900, 0, 0, 0
-};
-#elif defined(CONFIG_ARCH_IXDP2X01)
-#define CS89x0_NONISA_IRQ
-static unsigned int netcard_portlist[] __used __initdata = {
- IXDP2X01_CS8900_VIRT_BASE, 0
-};
-static unsigned int cs8900_irq_map[] = {
- IRQ_IXDP2X01_CS8900, 0, 0, 0
-};
-#else
#ifndef CONFIG_CS89x0_PLATFORM
static unsigned int netcard_portlist[] __used __initdata = {
0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
@@ -127,7 +110,6 @@ static unsigned int cs8900_irq_map[] = {
10, 11, 12, 5
};
#endif
-#endif
#if DEBUGGING
static unsigned int net_debug = DEBUGGING;
@@ -210,32 +192,6 @@ static int __init media_fn(char *str)
__setup("cs89x0_media=", media_fn);
#endif
-#if defined(CONFIG_MACH_IXDP2351)
-static u16
-readword(unsigned long base_addr, int portno)
-{
- return __raw_readw(base_addr + (portno << 1));
-}
-
-static void
-writeword(unsigned long base_addr, int portno, u16 value)
-{
- __raw_writew(value, base_addr + (portno << 1));
-}
-#elif defined(CONFIG_ARCH_IXDP2X01)
-static u16
-readword(unsigned long base_addr, int portno)
-{
- return __raw_readl(base_addr + (portno << 1));
-}
-
-static void
-writeword(unsigned long base_addr, int portno, u16 value)
-{
- __raw_writel(value, base_addr + (portno << 1));
-}
-#endif
-
static void readwords(struct net_local *lp, int portno, void *buf, int length)
{
u8 *buf8 = (u8 *)buf;
@@ -478,9 +434,6 @@ dma_rx(struct net_device *dev)
/* Malloc up new buffer. */
skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
- /* I don't think we want to do this to a stressed system */
- cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n",
- dev->name);
dev->stats.rx_dropped++;
/* AKPM: advance bp to the next frame */
@@ -731,9 +684,6 @@ net_rx(struct net_device *dev)
/* Malloc up new buffer. */
skb = netdev_alloc_skb(dev, length + 2);
if (skb == NULL) {
-#if 0 /* Again, this seems a cruel thing to do */
- pr_warn("%s: Memory squeeze, dropping packet\n", dev->name);
-#endif
dev->stats.rx_dropped++;
return;
}
@@ -908,7 +858,7 @@ net_open(struct net_device *dev)
goto bad_out;
}
} else {
-#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM)
+#if !defined(CONFIG_CS89x0_PLATFORM)
if (((1 << dev->irq) & lp->irq_map) == 0) {
pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
dev->name, dev->irq, lp->irq_map);
@@ -1321,9 +1271,7 @@ static const struct net_device_ops net_ops = {
static void __init reset_chip(struct net_device *dev)
{
#if !defined(CONFIG_MACH_MX31ADS)
-#if !defined(CS89x0_NONISA_IRQ)
struct net_local *lp = netdev_priv(dev);
-#endif /* CS89x0_NONISA_IRQ */
int reset_start_time;
writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -1331,7 +1279,6 @@ static void __init reset_chip(struct net_device *dev)
/* wait 30 ms */
msleep(30);
-#if !defined(CS89x0_NONISA_IRQ)
if (lp->chip_type != CS8900) {
/* Hardware problem requires PNP registers to be reconfigured after a reset */
iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
@@ -1344,7 +1291,6 @@ static void __init reset_chip(struct net_device *dev)
iowrite8((dev->mem_start >> 8) & 0xff,
lp->virt_addr + DATA_PORT + 1);
}
-#endif /* CS89x0_NONISA_IRQ */
/* Wait until the chip is reset */
reset_start_time = jiffies;
@@ -1579,9 +1525,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
i = lp->isa_config & INT_NO_MASK;
#ifndef CONFIG_CS89x0_PLATFORM
if (lp->chip_type == CS8900) {
-#ifdef CS89x0_NONISA_IRQ
- i = cs8900_irq_map[0];
-#else
/* Translate the IRQ using the IRQ mapping table. */
if (i >= ARRAY_SIZE(cs8900_irq_map))
pr_err("invalid ISA interrupt number %d\n", i);
@@ -1599,7 +1542,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
lp->irq_map = ((irq_map_buff[0] >> 8) |
(irq_map_buff[1] << 8));
}
-#endif
}
#endif
if (!dev->irq)
@@ -1978,18 +1920,6 @@ static struct platform_driver cs89x0_driver = {
.remove = cs89x0_platform_remove,
};
-static int __init cs89x0_init(void)
-{
- return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe);
-}
-
-module_init(cs89x0_init);
-
-static void __exit cs89x0_cleanup(void)
-{
- platform_driver_unregister(&cs89x0_driver);
-}
-
-module_exit(cs89x0_cleanup);
+module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
#endif /* CONFIG_CS89x0_PLATFORM */
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 354cbb78ed5..67b0388b6e6 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -887,18 +887,7 @@ static struct platform_driver ep93xx_eth_driver = {
},
};
-static int __init ep93xx_eth_init_module(void)
-{
- printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n");
- return platform_driver_register(&ep93xx_eth_driver);
-}
-
-static void __exit ep93xx_eth_cleanup_module(void)
-{
- platform_driver_unregister(&ep93xx_eth_driver);
-}
+module_platform_driver(ep93xx_eth_driver);
-module_init(ep93xx_eth_init_module);
-module_exit(ep93xx_eth_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9eada8e8607..9105465b2a1 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1693,22 +1693,7 @@ static struct platform_driver dm9000_driver = {
.remove = dm9000_drv_remove,
};
-static int __init
-dm9000_init(void)
-{
- printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
-
- return platform_driver_register(&dm9000_driver);
-}
-
-static void __exit
-dm9000_cleanup(void)
-{
- platform_driver_unregister(&dm9000_driver);
-}
-
-module_init(dm9000_init);
-module_exit(dm9000_cleanup);
+module_platform_driver(dm9000_driver);
MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
MODULE_DESCRIPTION("Davicom DM9000 network driver");
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 88feced9a62..cdbcd164314 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
private->rx_buffer = dma_alloc_coherent(d, 8192,
&private->rx_dma_handle,
GFP_KERNEL);
- if (private->rx_buffer == NULL) {
- pr_err("%s: no memory for rx buffer\n", __func__);
+ if (private->rx_buffer == NULL)
goto rx_buf_fail;
- }
+
private->tx_buffer = dma_alloc_coherent(d, 8192,
&private->tx_dma_handle,
GFP_KERNEL);
- if (private->tx_buffer == NULL) {
- pr_err("%s: no memory for tx buffer\n", __func__);
+ if (private->tx_buffer == NULL)
goto tx_buf_fail;
- }
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 110d26f4c60..afa8e3af2c4 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -580,12 +580,9 @@ alloc_list (struct net_device *dev)
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
- if (skb == NULL) {
- printk (KERN_ERR
- "%s: alloc_list: allocate Rx buffer error! ",
- dev->name);
+ if (skb == NULL)
break;
- }
+
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 29aff55f2ee..2e2700e3a5a 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 3c9b4f12e3e..f286ad2da1f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -2667,10 +2667,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
&cmd.dma, GFP_KERNEL);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ if (!cmd.va)
return -ENOMEM;
- }
spin_lock_bh(&adapter->mcc_lock);
@@ -3202,6 +3200,31 @@ err:
return status;
}
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_intr_set *req;
+ int status;
+
+ if (mutex_lock_interruptible(&adapter->mbox_lock))
+ return -1;
+
+ wrb = wrb_from_mbox(adapter);
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
+ wrb, NULL);
+
+ req->intr_enabled = intr_enable;
+
+ status = be_mbox_notify_wait(adapter);
+
+ mutex_unlock(&adapter->mbox_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 96970860c91..f2af8551721 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -188,6 +188,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
+#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1791,6 +1792,12 @@ struct be_cmd_enable_disable_vf {
u8 rsvd[3];
};
+struct be_cmd_req_intr_set {
+ struct be_cmd_req_hdr hdr;
+ u8 intr_enabled;
+ u8 rsvd[3];
+};
+
static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
{
return flags & adapter->cmd_privileges ? true : false;
@@ -1938,3 +1945,4 @@ extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
extern int be_cmd_get_if_id(struct be_adapter *adapter,
struct be_vf_cfg *vf_cfg, int vf_num);
extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 76b302f30c8..07b7f27cb0b 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -719,10 +719,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
&ddrdma_cmd.dma, GFP_KERNEL);
- if (!ddrdma_cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ if (!ddrdma_cmd.va)
return -ENOMEM;
- }
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@@ -845,11 +843,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
&eeprom_cmd.dma, GFP_KERNEL);
- if (!eeprom_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure. Could not read eeprom\n");
+ if (!eeprom_cmd.va)
return -ENOMEM;
- }
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 62dc220695f..89e6d8cfaf0 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08e54f3d288..536afa2fb94 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -146,20 +146,16 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->entry_size = entry_size;
mem->size = len * entry_size;
mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!mem->va)
return -ENOMEM;
- memset(mem->va, 0, mem->size);
return 0;
}
-static void be_intr_set(struct be_adapter *adapter, bool enable)
+static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
{
u32 reg, enabled;
- if (adapter->eeh_error)
- return;
-
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
&reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
@@ -175,6 +171,22 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
+static void be_intr_set(struct be_adapter *adapter, bool enable)
+{
+ int status = 0;
+
+ /* On lancer interrupts can't be controlled via this register */
+ if (lancer_chip(adapter))
+ return;
+
+ if (adapter->eeh_error)
+ return;
+
+ status = be_cmd_intr_set(adapter, enable);
+ if (status)
+ be_reg_intr_set(adapter, enable);
+}
+
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
{
u32 val = 0;
@@ -2435,9 +2447,6 @@ static int be_close(struct net_device *netdev)
be_roce_dev_close(adapter);
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
-
for_all_evt_queues(adapter, eqo, i)
napi_disable(&eqo->napi);
@@ -2525,9 +2534,6 @@ static int be_open(struct net_device *netdev)
be_irq_register(adapter);
- if (!lancer_chip(adapter))
- be_intr_set(adapter, true);
-
for_all_rx_queues(adapter, rxo, i)
be_cq_notify(adapter, rxo->cq.id, true, 0);
@@ -2562,10 +2568,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (cmd.va == NULL)
return -1;
- memset(cmd.va, 0, cmd.size);
if (enable) {
status = pci_write_config_dword(adapter->pdev,
@@ -3457,11 +3462,9 @@ static int lancer_fw_download(struct be_adapter *adapter,
flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ LANCER_FW_DOWNLOAD_CHUNK;
flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
- &flash_cmd.dma, GFP_KERNEL);
+ &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
goto lancer_fw_exit;
}
@@ -3563,8 +3566,6 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
&flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) {
status = -ENOMEM;
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure while flashing\n");
goto be_fw_exit;
}
@@ -3791,12 +3792,13 @@ static int be_ctrl_init(struct be_adapter *adapter)
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- &rx_filter->dma, GFP_KERNEL);
+ &rx_filter->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (rx_filter->va == NULL) {
status = -ENOMEM;
goto free_mbox;
}
- memset(rx_filter->va, 0, rx_filter->size);
+
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
@@ -3838,10 +3840,9 @@ static int be_stats_init(struct be_adapter *adapter)
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (cmd->va == NULL)
return -1;
- memset(cmd->va, 0, cmd->size);
return 0;
}
@@ -3853,6 +3854,7 @@ static void be_remove(struct pci_dev *pdev)
return;
be_roce_dev_remove(adapter);
+ be_intr_set(adapter, false);
cancel_delayed_work_sync(&adapter->func_recovery_work);
@@ -4142,11 +4144,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
goto ctrl_clean;
}
- /* The INTR bit may be set in the card when probed by a kdump kernel
- * after a crash.
- */
- if (!lancer_chip(adapter))
- be_intr_set(adapter, false);
+ /* Wait for interrupts to quiesce after an FLR */
+ msleep(100);
+
+ /* Allow interrupts for other ULPs running on NIC function */
+ be_intr_set(adapter, true);
status = be_stats_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 55d32aa0a09..f3d126dcc10 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index db4ea8081c0..27657299846 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2013 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 7c361d1db94..21b85fb7d05 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
priv->descs = dma_alloc_coherent(priv->dev,
sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
+ &priv->descs_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!priv->descs)
return -ENOMEM;
- memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
-
/* initialize RX ring */
ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
@@ -1350,22 +1349,7 @@ static struct platform_driver ftgmac100_driver = {
},
};
-/******************************************************************************
- * initialization / finalization
- *****************************************************************************/
-static int __init ftgmac100_init(void)
-{
- pr_info("Loading version " DRV_VERSION " ...\n");
- return platform_driver_register(&ftgmac100_driver);
-}
-
-static void __exit ftgmac100_exit(void)
-{
- platform_driver_unregister(&ftgmac100_driver);
-}
-
-module_init(ftgmac100_init);
-module_exit(ftgmac100_exit);
+module_platform_driver(ftgmac100_driver);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
MODULE_DESCRIPTION("FTGMAC100 driver");
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b5ea8fbd8a7..a6eda8d8313 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr, GFP_KERNEL);
+ priv->descs = dma_alloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
if (!priv->descs)
return -ENOMEM;
- memset(priv->descs, 0, sizeof(struct ftmac100_descs));
-
/* initialize RX ring */
ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index b7d58fe6f53..549ce13b92a 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -2,7 +2,8 @@
# Makefile for the Freescale network device drivers.
#
-obj-$(CONFIG_FEC) += fec.o fec_ptp.o
+obj-$(CONFIG_FEC) += fec.o
+fec-objs :=fec_main.o fec_ptp.o
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec_main.c
index f292c3aa423..a82a70345bb 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,7 +29,6 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
@@ -791,8 +790,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
if (unlikely(!skb)) {
- printk("%s: Memory squeeze, dropping packet.\n",
- ndev->name);
ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
@@ -1442,7 +1439,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_esc = BD_ENET_RX_INT;
+ ebdp->cbd_esc = BD_ENET_TX_INT;
}
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
@@ -1607,7 +1604,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
* Polled functionality used by netconsole and others in non interrupt mode
*
*/
-void fec_poll_controller(struct net_device *dev)
+static void fec_poll_controller(struct net_device *dev)
{
int i;
struct fec_enet_private *fep = netdev_priv(dev);
@@ -1648,11 +1645,9 @@ static int fec_enet_init(struct net_device *ndev)
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
- GFP_KERNEL);
- if (!cbd_base) {
- printk("FEC: allocate descriptor memory failed?\n");
+ GFP_KERNEL);
+ if (!cbd_base)
return -ENOMEM;
- }
memset(cbd_base, 0, PAGE_SIZE);
spin_lock_init(&fep->hw_lock);
@@ -1757,16 +1752,10 @@ fec_probe(struct platform_device *pdev)
if (!r)
return -ENXIO;
- r = request_mem_region(r->start, resource_size(r), pdev->name);
- if (!r)
- return -EBUSY;
-
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
- if (!ndev) {
- ret = -ENOMEM;
- goto failed_alloc_etherdev;
- }
+ if (!ndev)
+ return -ENOMEM;
SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1778,7 +1767,7 @@ fec_probe(struct platform_device *pdev)
(pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
- fep->hwp = ioremap(r->start, resource_size(r));
+ fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
fep->pdev = pdev;
fep->dev_id = dev_id++;
@@ -1900,11 +1889,8 @@ failed_regulator:
clk_disable_unprepare(fep->clk_ptp);
failed_pin:
failed_clk:
- iounmap(fep->hwp);
failed_ioremap:
free_netdev(ndev);
-failed_alloc_etherdev:
- release_mem_region(r->start, resource_size(r));
return ret;
}
@@ -1914,7 +1900,6 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- struct resource *r;
int i;
unregister_netdev(ndev);
@@ -1930,13 +1915,8 @@ fec_drv_remove(struct platform_device *pdev)
if (irq > 0)
free_irq(irq, ndev);
}
- iounmap(fep->hwp);
free_netdev(ndev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- BUG_ON(!r);
- release_mem_region(r->start, resource_size(r));
-
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 0d8df400a47..1f17ca0f220 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -128,7 +128,6 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
-EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
/**
* fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -319,7 +318,6 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
-EXPORT_SYMBOL(fec_ptp_ioctl);
/**
* fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -385,4 +383,3 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
pr_info("registered PHC device on %s\n", ndev->name);
}
}
-EXPORT_SYMBOL(fec_ptp_init);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 46df28893c1..edc120094c3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -177,8 +177,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
received++;
netif_receive_skb(skb);
} else {
- dev_warn(fep->dev,
- "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -309,8 +307,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
received++;
netif_rx(skb);
} else {
- dev_warn(fep->dev,
- "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -505,11 +501,9 @@ void fs_init_bds(struct net_device *dev)
*/
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
- if (skb == NULL) {
- dev_warn(fep->dev,
- "Memory squeeze, unable to allocate skb\n");
+ if (skb == NULL)
break;
- }
+
skb_align(skb, ENET_RX_ALIGN);
fep->rx_skbuff[i] = skb;
CBDW_BUFADDR(bdp,
@@ -593,13 +587,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
- if (!new_skb) {
- if (net_ratelimit()) {
- dev_warn(fep->dev,
- "Memory squeeze, dropping tx packet.\n");
- }
+ if (!new_skb)
return NULL;
- }
/* Make sure new skb is properly aligned */
skb_align(new_skb, 4);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2c5441d1bf..96fbe354824 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -132,7 +132,7 @@ static int gfar_poll(struct napi_struct *napi, int budget);
static void gfar_netpoll(struct net_device *dev);
#endif
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
-static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
void gfar_halt(struct net_device *dev);
@@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev,
- sizeof(struct txbd8) * priv->total_tx_ring_size +
- sizeof(struct rxbd8) * priv->total_rx_ring_size,
- &addr, GFP_KERNEL);
- if (!vaddr) {
- netif_err(priv, ifup, ndev,
- "Could not allocate buffer descriptors!\n");
+ (priv->total_tx_ring_size *
+ sizeof(struct txbd8)) +
+ (priv->total_rx_ring_size *
+ sizeof(struct rxbd8)),
+ &addr, GFP_KERNEL);
+ if (!vaddr)
return -ENOMEM;
- }
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
@@ -342,7 +341,7 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_init_tx_rx_base(priv);
/* Configure the coalescing support */
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ gfar_configure_coalescing_all(priv);
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
@@ -691,7 +690,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
}
for (i = 0; i < priv->num_tx_queues; i++)
- priv->tx_queue[i] = NULL;
+ priv->tx_queue[i] = NULL;
for (i = 0; i < priv->num_rx_queues; i++)
priv->rx_queue[i] = NULL;
@@ -1817,25 +1816,15 @@ void gfar_start(struct net_device *dev)
dev->trans_start = jiffies; /* prevent tx timeout */
}
-void gfar_configure_coalescing(struct gfar_private *priv,
+static void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 __iomem *baddr;
- int i = 0;
-
- /* Backward compatible case ---- even if we enable
- * multiple queues, there's only single reg to program
- */
- gfar_write(&regs->txic, 0);
- if (likely(priv->tx_queue[0]->txcoalescing))
- gfar_write(&regs->txic, priv->tx_queue[0]->txic);
-
- gfar_write(&regs->rxic, 0);
- if (unlikely(priv->rx_queue[0]->rxcoalescing))
- gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
if (priv->mode == MQ_MG_MODE) {
+ int i = 0;
+
baddr = &regs->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
gfar_write(baddr + i, 0);
@@ -1849,9 +1838,25 @@ void gfar_configure_coalescing(struct gfar_private *priv,
if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
}
+ } else {
+ /* Backward compatible case -- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if (likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+
+ gfar_write(&regs->rxic, 0);
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
}
}
+void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
+}
+
static int register_grp_irqs(struct gfar_priv_grp *grp)
{
struct gfar_private *priv = grp->priv;
@@ -1941,7 +1946,7 @@ int startup_gfar(struct net_device *ndev)
phy_start(priv->phydev);
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ gfar_configure_coalescing_all(priv);
return 0;
@@ -2469,12 +2474,11 @@ static void gfar_align_skb(struct sk_buff *skb)
}
/* Interrupt Handler for Transmit complete */
-static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
+static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
{
struct net_device *dev = tx_queue->dev;
struct netdev_queue *txq;
struct gfar_private *priv = netdev_priv(dev);
- struct gfar_priv_rx_q *rx_queue = NULL;
struct txbd8 *bdp, *next = NULL;
struct txbd8 *lbdp = NULL;
struct txbd8 *base = tx_queue->tx_bd_base;
@@ -2489,7 +2493,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
u32 lstatus;
size_t buflen;
- rx_queue = priv->rx_queue[tqi];
txq = netdev_get_tx_queue(dev, tqi);
bdp = tx_queue->dirty_tx;
skb_dirtytx = tx_queue->skb_dirtytx;
@@ -2571,8 +2574,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->dirty_tx = bdp;
netdev_tx_completed_queue(txq, howmany, bytes_sent);
-
- return howmany;
}
static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
@@ -2694,8 +2695,6 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
struct gfar_private *priv = netdev_priv(dev);
struct rxfcb *fcb = NULL;
- gro_result_t ret;
-
/* fcb is at the beginning if exists */
fcb = (struct rxfcb *)skb->data;
@@ -2734,10 +2733,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
__vlan_hwaccel_put_tag(skb, fcb->vlctl);
/* Send the packet up the stack */
- ret = napi_gro_receive(napi, skb);
+ napi_gro_receive(napi, skb);
- if (unlikely(GRO_DROP == ret))
- atomic64_inc(&priv->extra_stats.kernel_dropped);
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2835,62 +2832,82 @@ static int gfar_poll(struct napi_struct *napi, int budget)
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
struct gfar_priv_rx_q *rx_queue = NULL;
- int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
- int tx_cleaned = 0, i, left_over_budget = budget;
- unsigned long serviced_queues = 0;
- int num_queues = 0;
-
- num_queues = gfargrp->num_rx_queues;
- budget_per_queue = budget/num_queues;
+ int work_done = 0, work_done_per_q = 0;
+ int i, budget_per_q = 0;
+ int has_tx_work;
+ unsigned long rstat_rxf;
+ int num_act_queues;
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived
*/
gfar_write(&regs->ievent, IEVENT_RTX_MASK);
- while (num_queues && left_over_budget) {
- budget_per_queue = left_over_budget/num_queues;
- left_over_budget = 0;
+ rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
+
+ num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
+ if (num_act_queues)
+ budget_per_q = budget/num_act_queues;
+
+ while (1) {
+ has_tx_work = 0;
+ for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+ tx_queue = priv->tx_queue[i];
+ /* run Tx cleanup to completion */
+ if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+ gfar_clean_tx_ring(tx_queue);
+ has_tx_work = 1;
+ }
+ }
for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
- if (test_bit(i, &serviced_queues))
+ /* skip queue if not active */
+ if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
continue;
+
rx_queue = priv->rx_queue[i];
- tx_queue = priv->tx_queue[rx_queue->qindex];
-
- tx_cleaned += gfar_clean_tx_ring(tx_queue);
- rx_cleaned_per_queue =
- gfar_clean_rx_ring(rx_queue, budget_per_queue);
- rx_cleaned += rx_cleaned_per_queue;
- if (rx_cleaned_per_queue < budget_per_queue) {
- left_over_budget = left_over_budget +
- (budget_per_queue -
- rx_cleaned_per_queue);
- set_bit(i, &serviced_queues);
- num_queues--;
+ work_done_per_q =
+ gfar_clean_rx_ring(rx_queue, budget_per_q);
+ work_done += work_done_per_q;
+
+ /* finished processing this queue */
+ if (work_done_per_q < budget_per_q) {
+ /* clear active queue hw indication */
+ gfar_write(&regs->rstat,
+ RSTAT_CLEAR_RXF0 >> i);
+ rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
+ num_act_queues--;
+
+ if (!num_act_queues)
+ break;
+ /* recompute budget per Rx queue */
+ budget_per_q =
+ (budget - work_done) / num_act_queues;
}
}
- }
- if (tx_cleaned)
- return budget;
+ if (work_done >= budget)
+ break;
- if (rx_cleaned < budget) {
- napi_complete(napi);
+ if (!num_act_queues && !has_tx_work) {
- /* Clear the halt bit in RSTAT */
- gfar_write(&regs->rstat, gfargrp->rstat);
+ napi_complete(napi);
- gfar_write(&regs->imask, IMASK_DEFAULT);
+ /* Clear the halt bit in RSTAT */
+ gfar_write(&regs->rstat, gfargrp->rstat);
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
+ break;
+ }
}
- return rx_cleaned;
+ return work_done;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 63a28d294e2..04b552cd419 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -291,7 +291,9 @@ extern const char gfar_driver_version[];
#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
-#define RSTAT_CLEAR_RHALT 0x00800000
+#define RSTAT_CLEAR_RHALT 0x00800000
+#define RSTAT_CLEAR_RXF0 0x00000080
+#define RSTAT_RXF_MASK 0x000000ff
#define TCTRL_IPCSEN 0x00004000
#define TCTRL_TUCSEN 0x00002000
@@ -627,7 +629,6 @@ struct rmon_mib
};
struct gfar_extra_stats {
- atomic64_t kernel_dropped;
atomic64_t rx_large;
atomic64_t rx_short;
atomic64_t rx_nonoctet;
@@ -1180,8 +1181,7 @@ extern void stop_gfar(struct net_device *dev);
extern void gfar_halt(struct net_device *dev);
extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask);
+extern void gfar_configure_coalescing_all(struct gfar_private *priv);
void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 75e89acf491..4e7118f9f07 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -66,7 +66,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
- "rx-dropped-by-kernel",
"rx-large-frame-errors",
"rx-short-frame-errors",
"rx-non-octet-errors",
@@ -436,7 +435,7 @@ static int gfar_scoalesce(struct net_device *dev,
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
}
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ gfar_configure_coalescing_all(priv);
return 0;
}
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 2418faf2251..84125707f32 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1003,8 +1003,6 @@ static void fjn_rx(struct net_device *dev)
}
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
- pkt_len);
outb(F_SKP_PKT, ioaddr + RX_SKIP);
dev->stats.rx_dropped++;
break;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 1c54e229e3c..e3881614539 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -798,16 +798,14 @@ static inline int i596_rx(struct net_device *dev)
#ifdef __mc68000__
cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
#endif
- }
- else
+ } else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
+ }
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
- }
- else {
+ } else {
if (!rx_in_place) {
/* 16 byte align the data fields */
skb_reserve(skb, 2);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f045ea4dc51..d653bac4cfc 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -715,14 +715,12 @@ static inline int i596_rx(struct net_device *dev)
rbd->v_data = newskb->data;
rbd->b_data = SWAP32(dma_addr);
DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
- } else
+ } else {
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
+ }
memory_squeeze:
if (skb == NULL) {
/* XXX tulip.c can defer packets here!! */
- printk(KERN_ERR
- "%s: i596_rx Memory squeeze, dropping packet.\n",
- dev->name);
dev->stats.rx_dropped++;
} else {
if (!rx_in_place) {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 328f47c92e2..02963343447 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -402,7 +402,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
- netdev_info(dev, "Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -432,10 +431,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i]) {
- netdev_info(dev, "Not enough memory to allocate skb array\n");
+ if (!skb_arr_rq1[i])
break;
- }
}
/* Ring doorbell */
ehea_update_rq1a(pr->qp, i - 1);
@@ -695,10 +692,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb) {
- netdev_err(dev, "Not enough memory to allocate skb\n");
+ if (!skb)
break;
- }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 1f7ecf57181..610ed223d1d 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,17 +637,12 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt =
- dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (mal->bd_virt == NULL) {
- printk(KERN_ERR
- "mal%d: out of memory allocating RX/TX descriptors!\n",
- index);
err = -ENOMEM;
goto fail_unmap;
}
- memset(mal->bd_virt, 0, bd_size);
for (i = 0; i < mal->num_tx_chans; ++i)
set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c859771a990..302d5940106 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -556,11 +556,9 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
rxq_entries;
adapter->rx_queue.queue_addr =
- dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
- &adapter->rx_queue.queue_dma, GFP_KERNEL);
-
+ dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
+ &adapter->rx_queue.queue_dma, GFP_KERNEL);
if (!adapter->rx_queue.queue_addr) {
- netdev_err(netdev, "unable to allocate rx queue pages\n");
rc = -ENOMEM;
goto err_out;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ffd287196bf..82a967c9559 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
}
- memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = txdr->next_to_clean = 0;
ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
@@ -1079,12 +1078,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
}
- memset(rxdr->desc, 0, rxdr->size);
rxdr->next_to_use = rxdr->next_to_clean = 0;
rctl = er32(RCTL);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8502c625dbe..d98e1d0996d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1516,8 +1516,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- e_err(probe, "Unable to allocate memory for the Tx descriptor "
- "ring\n");
return -ENOMEM;
}
@@ -1707,10 +1705,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
-
if (!rxdr->desc) {
- e_err(probe, "Unable to allocate memory for the Rx descriptor "
- "ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1729,8 +1724,6 @@ setup_rx_desc_die:
if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma);
- e_err(probe, "Unable to allocate memory for the Rx "
- "descriptor ring\n");
goto setup_rx_desc_die;
}
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e0991388664..b71c8502a2b 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -37,7 +37,9 @@
* "index + 5".
*/
static const u16 e1000_gg82563_cable_length_table[] = {
- 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+ 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
+};
+
#define GG82563_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_gg82563_cable_length_table)
@@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
@@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
- udelay(200);
+ usleep_range(200, 400);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY;
}
- udelay(200);
+ usleep_range(200, 400);
ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
- udelay(200);
+ usleep_range(200, 400);
} else {
ret_val = e1000e_read_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS & offset,
+ data);
}
e1000_release_phy_80003es2lan(hw);
@@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
- udelay(200);
+ usleep_range(200, 400);
/* ...and verify the command was successful. */
ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return -E1000_ERR_PHY;
}
- udelay(200);
+ usleep_range(200, 400);
ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
- udelay(200);
+ usleep_range(200, 400);
} else {
ret_val = e1000e_write_phy_reg_mdic(hw,
- MAX_PHY_REG_ADDRESS & offset,
- data);
+ MAX_PHY_REG_ADDRESS &
+ offset, data);
}
e1000_release_phy_80003es2lan(hw);
@@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
@@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
/* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
}
@@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
s32 ret_val;
if (hw->phy.media_type == e1000_media_type_copper) {
- ret_val = e1000e_get_speed_and_duplex_copper(hw,
- speed,
- duplex);
+ ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
hw->phy.ops.cfg_on_link_up(hw);
} else {
ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
- speed,
- duplex);
+ speed,
+ duplex);
}
return ret_val;
@@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
@@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
reg_data = er32(TXDCTL(1));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
/* Enable retransmit on late collisions */
@@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
/* default to true to enable the MDIC W/A */
hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET >>
- E1000_KMRNCTRLSTA_OFFSET_SHIFT,
- &i);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
+ E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
if (!ret_val) {
if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
- E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+ E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
}
@@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
- u32 ctrl_ext;
+ u32 reg;
u16 data;
ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
@@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
}
/* Bypass Rx and Tx FIFO's */
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
- E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
- E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
+ data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+ E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- &data);
+ reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
+ ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
if (ret_val)
return ret_val;
data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
- data);
+ ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
if (ret_val)
return ret_val;
@@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
- ew32(CTRL_EXT, ctrl_ext);
+ reg = er32(CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+ ew32(CTRL_EXT, reg);
ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
if (ret_val)
@@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
- 0xFFFF);
+ 0xFFFF);
if (ret_val)
return ret_val;
ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- &reg_data);
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
- reg_data);
+ reg_data);
if (ret_val)
return ret_val;
- ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- &reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
- &duplex);
+ &duplex);
if (ret_val)
return ret_val;
@@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
u16 reg_data, reg_data2;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
u32 i = 0;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
- ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
- E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
- reg_data);
+ ret_val =
+ e1000_write_kmrn_reg_80003es2lan(hw,
+ E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+ reg_data);
if (ret_val)
return ret_val;
@@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+ E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
return ret_val;
kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
- E1000_KMRNCTRLSTA_OFFSET) | data;
+ E1000_KMRNCTRLSTA_OFFSET) | data;
ew32(KMRNCTRLSTA, kmrnctrlsta);
e1e_flush();
@@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = {
.phy_ops = &es2_phy_ops,
.nvm_ops = &es2_nvm_ops,
};
-
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 2faffbde179..7380442a382 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
default:
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
- E1000_EECD_SIZE_EX_SHIFT);
+ E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
@@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
return ret_val;
phy->id = (u32)(phy_id << 16);
- udelay(20);
+ usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- udelay(50);
+ usleep_range(50, 100);
i++;
}
@@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- udelay(50);
+ usleep_range(50, 100);
}
if (i == fw_timeout) {
@@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
+
/**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure
@@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
}
for (i = 0; i < words; i++) {
- eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
- ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
- E1000_NVM_RW_REG_START;
+ eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
+ ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
+ E1000_NVM_RW_REG_START);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
@@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
s32 timeout = PHY_CFG_TIMEOUT;
while (timeout) {
- if (er32(EEMNGCTL) &
- E1000_NVM_CFG_DONE_PORT_0)
+ if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
break;
usleep_range(1000, 2000);
timeout--;
@@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
}
if (hw->nvm.type == e1000_nvm_flash_hw) {
- udelay(10);
+ usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
@@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
@@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB |
- E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
@@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
break;
default:
reg_data = er32(TXDCTL(1));
- reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB |
- E1000_TXDCTL_COUNT_DESC;
+ reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB |
+ E1000_TXDCTL_COUNT_DESC);
ew32(TXDCTL(1), reg_data);
break;
}
@@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
status = er32(STATUS);
er32(RXCW);
/* SYNCH bit and IV bit are sticky */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
@@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* the IV bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) &&
(rxcw & E1000_RXCW_C))
@@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = {
.phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops,
};
-
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 85cb1a3b7cd..08e24dc3dc0 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -44,6 +44,8 @@
#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
#define E1000_EIAC_MASK_82574 0x01F00000
+#define E1000_IVAR_INT_ALLOC_VALID 0x8
+
/* Manageability Operation Mode mask */
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index fc3a4fe1ac7..351c94a0cf7 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -66,7 +66,7 @@
#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
#define E1000_CTRL_EXT_EIAME 0x01000000
#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
-#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
#define E1000_CTRL_EXT_LSECCK 0x00001000
#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -216,6 +216,8 @@
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
@@ -234,17 +236,17 @@
#define E1000_STATUS_FUNC_SHIFT 2
#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
-#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
-
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
@@ -311,6 +313,7 @@
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
/* Receive Checksum Control */
#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
@@ -400,7 +403,8 @@
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
-#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED 0x80000000
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
@@ -583,13 +587,13 @@
#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
-#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */
-#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
-#define E1000_NVM_RW_REG_START 1 /* Start operation */
-#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
-#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
-#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define E1000_FLASH_UPDATES 2000
+#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
+#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START 1 /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
+#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
+#define E1000_FLASH_UPDATES 2000
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
@@ -785,6 +789,7 @@
GG82563_REG(194, 18) /* Inband Control */
/* MDI Control */
+#define E1000_MDIC_REG_MASK 0x001F0000
#define E1000_MDIC_REG_SHIFT 16
#define E1000_MDIC_PHY_SHIFT 21
#define E1000_MDIC_OP_WRITE 0x04000000
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index fcc758138b8..82f1c84282d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -46,6 +46,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/ptp_classify.h>
#include <linux/mii.h>
+#include <linux/mdio.h>
#include "hw.h"
struct e1000_info;
@@ -61,7 +62,6 @@ struct e1000_info;
#define e_notice(format, arg...) \
netdev_notice(adapter->netdev, format, ## arg)
-
/* Interrupt modes, as used by the IntMode parameter */
#define E1000E_INT_MODE_LEGACY 0
#define E1000E_INT_MODE_MSI 1
@@ -239,9 +239,8 @@ struct e1000_adapter {
u16 tx_itr;
u16 rx_itr;
- /* Tx */
- struct e1000_ring *tx_ring /* One per active queue */
- ____cacheline_aligned_in_smp;
+ /* Tx - one ring per active queue */
+ struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
struct napi_struct napi;
@@ -352,6 +351,8 @@ struct e1000_adapter {
struct timecounter tc;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
+
+ u16 eee_advert;
};
struct e1000_info {
@@ -487,8 +488,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
extern void e1000e_free_rx_resources(struct e1000_ring *ring);
extern void e1000e_free_tx_resources(struct e1000_ring *ring);
extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64
- *stats);
+ struct rtnl_link_stats64
+ *stats);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
@@ -558,12 +559,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
return hw->nvm.ops.update(hw);
}
-static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
return hw->nvm.ops.read(hw, offset, words, data);
}
-static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
return hw->nvm.ops.write(hw, offset, words, data);
}
@@ -597,7 +600,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- udelay(50);
+ usleep_range(50, 100);
return i;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f91a8f3f9d4..7c8ca658d55 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -35,12 +35,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
-#include <linux/mdio.h>
#include <linux/pm_runtime.h>
#include "e1000.h"
-enum {NETDEV_STATS, E1000_STATS};
+enum { NETDEV_STATS, E1000_STATS };
struct e1000_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -121,6 +120,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
+
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
static int e1000_get_settings(struct net_device *netdev,
@@ -197,8 +197,7 @@ static int e1000_get_settings(struct net_device *netdev,
/* MDI-X => 2; MDI =>1; Invalid =>0 */
if ((hw->phy.media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
- ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X :
- ETH_TP_MDI;
+ ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -224,8 +223,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
- spd != SPEED_1000 &&
- dplx != DUPLEX_FULL) {
+ (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
goto err_inval;
}
@@ -298,12 +296,10 @@ static int e1000_set_settings(struct net_device *netdev,
hw->mac.autoneg = 1;
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE |
- ADVERTISED_Autoneg;
+ ADVERTISED_FIBRE | ADVERTISED_Autoneg;
else
hw->phy.autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP | ADVERTISED_Autoneg;
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
@@ -346,7 +342,7 @@ static void e1000_get_pauseparam(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
pause->autoneg =
- (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+ (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
if (hw->fc.current_mode == e1000_fc_rx_pause) {
pause->rx_pause = 1;
@@ -435,7 +431,7 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -503,8 +499,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
@@ -515,7 +511,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
} else {
for (i = 0; i < last_word - first_word + 1; i++) {
ret_val = e1000_read_nvm(hw, first_word + i, 1,
- &eeprom_buff[i]);
+ &eeprom_buff[i]);
if (ret_val)
break;
}
@@ -553,7 +549,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
if (eeprom->len == 0)
return -EOPNOTSUPP;
- if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16)))
+ if (eeprom->magic !=
+ (adapter->pdev->vendor | (adapter->pdev->device << 16)))
return -EFAULT;
if (adapter->flags & FLAG_READ_ONLY_NVM)
@@ -579,7 +576,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
ret_val = e1000_read_nvm(hw, last_word, 1,
- &eeprom_buff[last_word - first_word]);
+ &eeprom_buff[last_word - first_word]);
if (ret_val)
goto out;
@@ -618,8 +615,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver));
+ strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, e1000e_driver_version,
sizeof(drvinfo->version));
@@ -627,10 +623,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
* PCI-E controllers
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d-%d",
- (adapter->eeprom_vers & 0xF000) >> 12,
- (adapter->eeprom_vers & 0x0FF0) >> 4,
- (adapter->eeprom_vers & 0x000F));
+ "%d.%d-%d",
+ (adapter->eeprom_vers & 0xF000) >> 12,
+ (adapter->eeprom_vers & 0x0FF0) >> 4,
+ (adapter->eeprom_vers & 0x000F));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
@@ -756,7 +752,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
{
u32 pat, val;
static const u32 test[] = {
- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+ };
for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
(test[pat] & write));
@@ -786,6 +783,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
}
return 0;
}
+
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
do { \
if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
@@ -813,16 +811,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 wlock_mac = 0;
/* The status register is Read Only, so a write should fail.
- * Some bits that get toggled are ignored.
+ * Some bits that get toggled are ignored. There are several bits
+ * on newer hardware that are r/w.
*/
switch (mac->type) {
- /* there are several bits on newer hardware that are r/w */
case e1000_82571:
case e1000_82572:
case e1000_80003es2lan:
toggle = 0x7FFFF3FF;
break;
- default:
+ default:
toggle = 0x7FFFF033;
break;
}
@@ -928,7 +926,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
}
/* If Checksum is not Correct return error else test passed */
- if ((checksum != (u16) NVM_SUM) && !(*data))
+ if ((checksum != (u16)NVM_SUM) && !(*data))
*data = 2;
return *data;
@@ -936,7 +934,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
{
- struct net_device *netdev = (struct net_device *) data;
+ struct net_device *netdev = (struct net_device *)data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -969,8 +967,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
netdev)) {
shared_int = 0;
- } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
+ netdev)) {
*data = 1;
ret_val = -1;
goto out;
@@ -1080,28 +1078,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
struct e1000_ring *tx_ring = &adapter->test_tx_ring;
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
+ struct e1000_buffer *buffer_info;
int i;
if (tx_ring->desc && tx_ring->buffer_info) {
for (i = 0; i < tx_ring->count; i++) {
- if (tx_ring->buffer_info[i].dma)
+ buffer_info = &tx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
dma_unmap_single(&pdev->dev,
- tx_ring->buffer_info[i].dma,
- tx_ring->buffer_info[i].length,
- DMA_TO_DEVICE);
- if (tx_ring->buffer_info[i].skb)
- dev_kfree_skb(tx_ring->buffer_info[i].skb);
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
}
}
if (rx_ring->desc && rx_ring->buffer_info) {
for (i = 0; i < rx_ring->count; i++) {
- if (rx_ring->buffer_info[i].dma)
+ buffer_info = &rx_ring->buffer_info[i];
+
+ if (buffer_info->dma)
dma_unmap_single(&pdev->dev,
- rx_ring->buffer_info[i].dma,
- 2048, DMA_FROM_DEVICE);
- if (rx_ring->buffer_info[i].skb)
- dev_kfree_skb(rx_ring->buffer_info[i].skb);
+ buffer_info->dma,
+ 2048, DMA_FROM_DEVICE);
+ if (buffer_info->skb)
+ dev_kfree_skb(buffer_info->skb);
}
}
@@ -1138,8 +1141,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->count = E1000_DEFAULT_TXD;
tx_ring->buffer_info = kcalloc(tx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
+ sizeof(struct e1000_buffer), GFP_KERNEL);
if (!tx_ring->buffer_info) {
ret_val = 1;
goto err_nomem;
@@ -1156,8 +1158,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
- ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
+ ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
+ ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
ew32(TDH(0), 0);
ew32(TDT(0), 0);
@@ -1179,8 +1181,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
tx_ring->buffer_info[i].skb = skb;
tx_ring->buffer_info[i].length = skb->len;
tx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
+ dma_map_single(&pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev,
tx_ring->buffer_info[i].dma)) {
ret_val = 4;
@@ -1200,8 +1202,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rx_ring->count = E1000_DEFAULT_RXD;
rx_ring->buffer_info = kcalloc(rx_ring->count,
- sizeof(struct e1000_buffer),
- GFP_KERNEL);
+ sizeof(struct e1000_buffer), GFP_KERNEL);
if (!rx_ring->buffer_info) {
ret_val = 5;
goto err_nomem;
@@ -1220,16 +1221,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rctl = er32(RCTL);
if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
ew32(RCTL, rctl & ~E1000_RCTL_EN);
- ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
- ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
+ ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
+ ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
ew32(RDLEN(0), rx_ring->size);
ew32(RDH(0), 0);
ew32(RDT(0), 0);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
- E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
- E1000_RCTL_SBP | E1000_RCTL_SECRC |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
+ E1000_RCTL_SBP | E1000_RCTL_SECRC |
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
ew32(RCTL, rctl);
for (i = 0; i < rx_ring->count; i++) {
@@ -1244,8 +1245,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb_reserve(skb, NET_IP_ALIGN);
rx_ring->buffer_info[i].skb = skb;
rx_ring->buffer_info[i].dma =
- dma_map_single(&pdev->dev, skb->data, 2048,
- DMA_FROM_DEVICE);
+ dma_map_single(&pdev->dev, skb->data, 2048,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev,
rx_ring->buffer_info[i].dma)) {
ret_val = 8;
@@ -1296,7 +1297,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl_reg);
e1e_flush();
- udelay(500);
+ usleep_range(500, 1000);
return 0;
}
@@ -1322,7 +1323,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
/* Assert SW reset for above settings to take effect */
hw->phy.ops.commit(hw);
- mdelay(1);
+ usleep_range(1000, 2000);
/* Force Full Duplex */
e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
@@ -1363,7 +1364,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
/* force 1000, set loopback */
e1e_wphy(hw, MII_BMCR, 0x4140);
- mdelay(250);
+ msleep(250);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
@@ -1395,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
if (hw->phy.type == e1000_phy_m88)
e1000_phy_disable_receiver(adapter);
- udelay(500);
+ usleep_range(500, 1000);
return 0;
}
@@ -1431,8 +1432,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* special write to serdes control register to enable SerDes analog
* loopback
*/
-#define E1000_SERDES_LB_ON 0x410
- ew32(SCTL, E1000_SERDES_LB_ON);
+ ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
@@ -1526,8 +1526,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_82572:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
-#define E1000_SERDES_LB_OFF 0x400
- ew32(SCTL, E1000_SERDES_LB_OFF);
+ ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
e1e_flush();
usleep_range(10000, 20000);
break;
@@ -1564,7 +1563,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb,
frame_size &= ~1;
if (*(skb->data + 3) == 0xFF)
if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
- (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
return 0;
return 13;
}
@@ -1575,6 +1574,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
struct e1000_ring *rx_ring = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
+ struct e1000_buffer *buffer_info;
int i, j, k, l;
int lc;
int good_cnt;
@@ -1595,14 +1595,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
k = 0;
l = 0;
- for (j = 0; j <= lc; j++) { /* loop count loop */
- for (i = 0; i < 64; i++) { /* send the packets */
- e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
- 1024);
+ /* loop count loop */
+ for (j = 0; j <= lc; j++) {
+ /* send the packets */
+ for (i = 0; i < 64; i++) {
+ buffer_info = &tx_ring->buffer_info[k];
+
+ e1000_create_lbtest_frame(buffer_info->skb, 1024);
dma_sync_single_for_device(&pdev->dev,
- tx_ring->buffer_info[k].dma,
- tx_ring->buffer_info[k].length,
- DMA_TO_DEVICE);
+ buffer_info->dma,
+ buffer_info->length,
+ DMA_TO_DEVICE);
k++;
if (k == tx_ring->count)
k = 0;
@@ -1612,13 +1615,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
- do { /* receive the sent packets */
+ /* receive the sent packets */
+ do {
+ buffer_info = &rx_ring->buffer_info[l];
+
dma_sync_single_for_cpu(&pdev->dev,
- rx_ring->buffer_info[l].dma, 2048,
- DMA_FROM_DEVICE);
+ buffer_info->dma, 2048,
+ DMA_FROM_DEVICE);
- ret_val = e1000_check_lbtest_frame(
- rx_ring->buffer_info[l].skb, 1024);
+ ret_val = e1000_check_lbtest_frame(buffer_info->skb,
+ 1024);
if (!ret_val)
good_cnt++;
l++;
@@ -1637,7 +1643,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 14; /* error code for time out error */
break;
}
- } /* end loop count loop */
+ }
return ret_val;
}
@@ -1696,7 +1702,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
/* On some Phy/switch combinations, link establishment
* can take a few seconds more than expected.
*/
- msleep(5000);
+ msleep_interruptible(5000);
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
@@ -1980,12 +1986,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
- p = (char *) &net_stats +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)&net_stats +
+ e1000_gstrings_stats[i].stat_offset;
break;
case E1000_STATS:
- p = (char *) adapter +
- e1000_gstrings_stats[i].stat_offset;
+ p = (char *)adapter +
+ e1000_gstrings_stats[i].stat_offset;
break;
default:
data[i] = 0;
@@ -1993,7 +1999,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
@@ -2069,23 +2075,20 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl;
- u32 status, ret_val;
+ u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data;
+ u32 ret_val;
- if (!(adapter->flags & FLAG_IS_ICH) ||
- !(adapter->flags2 & FLAG2_HAS_EEE))
+ if (!(adapter->flags2 & FLAG2_HAS_EEE))
return -EOPNOTSUPP;
switch (hw->phy.type) {
case e1000_phy_82579:
cap_addr = I82579_EEE_CAPABILITY;
- adv_addr = I82579_EEE_ADVERTISEMENT;
lpa_addr = I82579_EEE_LP_ABILITY;
pcs_stat_addr = I82579_EEE_PCS_STATUS;
break;
case e1000_phy_i217:
cap_addr = I217_EEE_CAPABILITY;
- adv_addr = I217_EEE_ADVERTISEMENT;
lpa_addr = I217_EEE_LP_ABILITY;
pcs_stat_addr = I217_EEE_PCS_STATUS;
break;
@@ -2104,10 +2107,7 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
/* EEE Advertised */
- ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data);
- if (ret_val)
- goto release;
- edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
+ edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
/* EEE Link Partner Advertised */
ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
@@ -2125,25 +2125,11 @@ release:
if (ret_val)
return -ENODATA;
- e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl);
- status = er32(STATUS);
-
/* Result of the EEE auto negotiation - there is no register that
* has the status of the EEE negotiation so do a best-guess based
- * on whether both Tx and Rx LPI indications have been received or
- * base it on the link speed, the EEE advertised speeds on both ends
- * and the speeds on which EEE is enabled locally.
+ * on whether Tx or Rx LPI indications have been received.
*/
- if (((phy_data & E1000_EEE_TX_LPI_RCVD) &&
- (phy_data & E1000_EEE_RX_LPI_RCVD)) ||
- ((status & E1000_STATUS_SPEED_100) &&
- (edata->advertised & ADVERTISED_100baseT_Full) &&
- (edata->lp_advertised & ADVERTISED_100baseT_Full) &&
- (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) ||
- ((status & E1000_STATUS_SPEED_1000) &&
- (edata->advertised & ADVERTISED_1000baseT_Full) &&
- (edata->lp_advertised & ADVERTISED_1000baseT_Full) &&
- (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE)))
+ if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD))
edata->eee_active = true;
edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
@@ -2160,19 +2146,10 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
struct ethtool_eee eee_curr;
s32 ret_val;
- if (!(adapter->flags & FLAG_IS_ICH) ||
- !(adapter->flags2 & FLAG2_HAS_EEE))
- return -EOPNOTSUPP;
-
ret_val = e1000e_get_eee(netdev, &eee_curr);
if (ret_val)
return ret_val;
- if (eee_curr.advertised != edata->advertised) {
- e_err("Setting EEE advertisement is not supported\n");
- return -EINVAL;
- }
-
if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
e_err("Setting EEE tx-lpi is not supported\n");
return -EINVAL;
@@ -2183,16 +2160,21 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EINVAL;
}
- if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) {
- hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
-
- /* reset the link */
- if (netif_running(netdev))
- e1000e_reinit_locked(adapter);
- else
- e1000e_reset(adapter);
+ if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
+ e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
+ return -EINVAL;
}
+ adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+
+ hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+
+ /* reset the link */
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1e6b889aee8..84850f7a23e 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -167,7 +167,7 @@ enum e1000_1000t_rx_status {
e1000_1000t_rx_status_undefined = 0xFF
};
-enum e1000_rev_polarity{
+enum e1000_rev_polarity {
e1000_rev_polarity_normal = 0,
e1000_rev_polarity_reversed,
e1000_rev_polarity_undefined = 0xFF
@@ -545,7 +545,7 @@ struct e1000_mac_info {
u16 mta_reg_count;
/* Maximum size of the MTA register table in all supported adapters */
- #define MAX_MTA_REG 128
+#define MAX_MTA_REG 128
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 121a865c7fb..ad9d8f2dd86 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -61,15 +61,15 @@
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
struct ich8_hsfsts {
- u16 flcdone :1; /* bit 0 Flash Cycle Done */
- u16 flcerr :1; /* bit 1 Flash Cycle Error */
- u16 dael :1; /* bit 2 Direct Access error Log */
- u16 berasesz :2; /* bit 4:3 Sector Erase Size */
- u16 flcinprog :1; /* bit 5 flash cycle in Progress */
- u16 reserved1 :2; /* bit 13:6 Reserved */
- u16 reserved2 :6; /* bit 13:6 Reserved */
- u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
- u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
+ u16 flcdone:1; /* bit 0 Flash Cycle Done */
+ u16 flcerr:1; /* bit 1 Flash Cycle Error */
+ u16 dael:1; /* bit 2 Direct Access error Log */
+ u16 berasesz:2; /* bit 4:3 Sector Erase Size */
+ u16 flcinprog:1; /* bit 5 flash cycle in Progress */
+ u16 reserved1:2; /* bit 13:6 Reserved */
+ u16 reserved2:6; /* bit 13:6 Reserved */
+ u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
+ u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
} hsf_status;
u16 regval;
};
@@ -78,11 +78,11 @@ union ich8_hws_flash_status {
/* Offset 06h FLCTL */
union ich8_hws_flash_ctrl {
struct ich8_hsflctl {
- u16 flcgo :1; /* 0 Flash Cycle Go */
- u16 flcycle :2; /* 2:1 Flash Cycle */
- u16 reserved :5; /* 7:3 Reserved */
- u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
- u16 flockdn :6; /* 15:10 Reserved */
+ u16 flcgo:1; /* 0 Flash Cycle Go */
+ u16 flcycle:2; /* 2:1 Flash Cycle */
+ u16 reserved:5; /* 7:3 Reserved */
+ u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
+ u16 flockdn:6; /* 15:10 Reserved */
} hsf_ctrl;
u16 regval;
};
@@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl {
/* ICH Flash Region Access Permissions */
union ich8_hws_flash_regacc {
struct ich8_flracc {
- u32 grra :8; /* 0:7 GbE region Read Access */
- u32 grwa :8; /* 8:15 GbE region Write Access */
- u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
- u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
+ u32 grra:8; /* 0:7 GbE region Read Access */
+ u32 grwa:8; /* 8:15 GbE region Write Access */
+ u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
+ u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
} hsf_flregacc;
u16 regval;
};
@@ -142,6 +142,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
{
@@ -312,7 +313,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
ew32(CTRL, mac_reg);
e1e_flush();
- udelay(10);
+ usleep_range(10, 20);
mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
ew32(CTRL, mac_reg);
e1e_flush();
@@ -548,8 +549,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
- nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT;
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
nvm->flash_bank_size /= 2;
/* Adjust to word count */
nvm->flash_bank_size /= sizeof(u16);
@@ -636,6 +637,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_pch_lpt) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
+ mac->ops.setup_physical_interface =
+ e1000_setup_copper_link_pch_lpt;
}
/* Enable PCS Lock-loss workaround for ICH8 */
@@ -692,7 +695,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
*
* Assumes the SW/FW/HW Semaphore is already acquired.
**/
-static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
{
return __e1000_access_emi_reg_locked(hw, addr, &data, false);
}
@@ -709,11 +712,22 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
s32 ret_val;
- u16 lpi_ctrl;
+ u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
- if ((hw->phy.type != e1000_phy_82579) &&
- (hw->phy.type != e1000_phy_i217))
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ lpa = I82579_EEE_LP_ABILITY;
+ pcs_status = I82579_EEE_PCS_STATUS;
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ lpa = I217_EEE_LP_ABILITY;
+ pcs_status = I217_EEE_PCS_STATUS;
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
return 0;
+ }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
@@ -728,34 +742,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
/* Enable EEE if not disabled by user */
if (!dev_spec->eee_disable) {
- u16 lpa, pcs_status, data;
-
/* Save off link partner's EEE ability */
- switch (hw->phy.type) {
- case e1000_phy_82579:
- lpa = I82579_EEE_LP_ABILITY;
- pcs_status = I82579_EEE_PCS_STATUS;
- break;
- case e1000_phy_i217:
- lpa = I217_EEE_LP_ABILITY;
- pcs_status = I217_EEE_PCS_STATUS;
- break;
- default:
- ret_val = -E1000_ERR_PHY;
- goto release;
- }
ret_val = e1000_read_emi_reg_locked(hw, lpa,
&dev_spec->eee_lp_ability);
if (ret_val)
goto release;
+ /* Read EEE advertisement */
+ ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
+ if (ret_val)
+ goto release;
+
/* Enable EEE only for speeds in which the link partner is
- * EEE capable.
+ * EEE capable and for which we advertise EEE.
*/
- if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
- if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
+ if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
e1e_rphy_locked(hw, MII_LPA, &data);
if (data & LPA_100FULL)
lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
@@ -767,13 +771,13 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
dev_spec->eee_lp_ability &=
~I82579_EEE_100_SUPPORTED;
}
-
- /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
- ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
- if (ret_val)
- goto release;
}
+ /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
+ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
+ if (ret_val)
+ goto release;
+
ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
release:
hw->phy.ops.release(hw);
@@ -835,6 +839,94 @@ release:
}
/**
+ * e1000_platform_pm_pch_lpt - Set platform power management values
+ * @hw: pointer to the HW structure
+ * @link: bool indicating link status
+ *
+ * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
+ * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
+ * when link is up (which must not exceed the maximum latency supported
+ * by the platform), otherwise specify there is no LTR requirement.
+ * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
+ * latencies in the LTR Extended Capability Structure in the PCIe Extended
+ * Capability register set, on this device LTR is set by writing the
+ * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
+ * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
+ * message to the PMC.
+ **/
+static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
+{
+ u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
+ link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
+ u16 lat_enc = 0; /* latency encoded */
+
+ if (link) {
+ u16 speed, duplex, scale = 0;
+ u16 max_snoop, max_nosnoop;
+ u16 max_ltr_enc; /* max LTR latency encoded */
+ s64 lat_ns; /* latency (ns) */
+ s64 value;
+ u32 rxa;
+
+ if (!hw->adapter->max_frame_size) {
+ e_dbg("max_frame_size not set.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
+ if (!speed) {
+ e_dbg("Speed not set.\n");
+ return -E1000_ERR_CONFIG;
+ }
+
+ /* Rx Packet Buffer Allocation size (KB) */
+ rxa = er32(PBA) & E1000_PBA_RXA_MASK;
+
+ /* Determine the maximum latency tolerated by the device.
+ *
+ * Per the PCIe spec, the tolerated latencies are encoded as
+ * a 3-bit encoded scale (only 0-5 are valid) multiplied by
+ * a 10-bit value (0-1023) to provide a range from 1 ns to
+ * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
+ * 1=2^5ns, 2=2^10ns,...5=2^25ns.
+ */
+ lat_ns = ((s64)rxa * 1024 -
+ (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
+ if (lat_ns < 0)
+ lat_ns = 0;
+ else
+ do_div(lat_ns, speed);
+
+ value = lat_ns;
+ while (value > PCI_LTR_VALUE_MASK) {
+ scale++;
+ value = DIV_ROUND_UP(value, (1 << 5));
+ }
+ if (scale > E1000_LTRV_SCALE_MAX) {
+ e_dbg("Invalid LTR latency scale %d\n", scale);
+ return -E1000_ERR_CONFIG;
+ }
+ lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
+
+ /* Determine the maximum latency tolerated by the platform */
+ pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
+ &max_snoop);
+ pci_read_config_word(hw->adapter->pdev,
+ E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
+ max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
+
+ if (lat_enc > max_ltr_enc)
+ lat_enc = max_ltr_enc;
+ }
+
+ /* Set Snoop and No-Snoop latencies the same */
+ reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
+ ew32(LTRV, reg);
+
+ return 0;
+}
+
+/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
*
@@ -871,6 +963,34 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+ /* When connected at 10Mbps half-duplex, 82579 parts are excessively
+ * aggressive resulting in many collisions. To avoid this, increase
+ * the IPG and reduce Rx latency in the PHY.
+ */
+ if ((hw->mac.type == e1000_pch2lan) && link) {
+ u32 reg;
+ reg = er32(STATUS);
+ if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
+ reg = er32(TIPG);
+ reg &= ~E1000_TIPG_IPGT_MASK;
+ reg |= 0xFF;
+ ew32(TIPG, reg);
+
+ /* Reduce Rx latency in analog PHY */
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val =
+ e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
+
+ hw->phy.ops.release(hw);
+
+ if (ret_val)
+ return ret_val;
+ }
+ }
+
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
@@ -879,6 +999,15 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Set platform power management values for
+ * Latency Tolerance Reporting (LTR)
+ */
+ ret_val = e1000_platform_pm_pch_lpt(hw, link);
+ if (ret_val)
+ return ret_val;
+ }
+
/* Clear link partner's EEE ability */
hw->dev_spec.ich8lan.eee_lp_ability = 0;
@@ -1002,10 +1131,6 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
- /* Disable EEE by default until IEEE802.3az spec is finalized */
- if (adapter->flags2 & FLAG2_HAS_EEE)
- adapter->hw.dev_spec.ich8lan.eee_disable = true;
-
return 0;
}
@@ -1134,9 +1259,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
- return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
- ((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+ return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ ((fwsm & E1000_FWSM_MODE_MASK) ==
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
}
/**
@@ -1153,7 +1278,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
fwsm = er32(FWSM);
return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
- (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+ (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -1440,8 +1565,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
word_addr = (u16)(cnf_base_addr << 1);
for (i = 0; i < cnf_size; i++) {
- ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
- &reg_data);
+ ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
if (ret_val)
goto release;
@@ -1501,13 +1625,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ status_reg &= (BM_CS_STATUS_LINK_UP |
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
if (status_reg == (BM_CS_STATUS_LINK_UP |
- BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_1000))
+ BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_1000))
k1_enable = false;
}
@@ -1516,13 +1640,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
if (ret_val)
goto release;
- status_reg &= HV_M_STATUS_LINK_UP |
- HV_M_STATUS_AUTONEG_COMPLETE |
- HV_M_STATUS_SPEED_MASK;
+ status_reg &= (HV_M_STATUS_LINK_UP |
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_MASK);
if (status_reg == (HV_M_STATUS_LINK_UP |
- HV_M_STATUS_AUTONEG_COMPLETE |
- HV_M_STATUS_SPEED_1000))
+ HV_M_STATUS_AUTONEG_COMPLETE |
+ HV_M_STATUS_SPEED_1000))
k1_enable = false;
}
@@ -1579,7 +1703,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
if (ret_val)
return ret_val;
- udelay(20);
+ usleep_range(20, 40);
ctrl_ext = er32(CTRL_EXT);
ctrl_reg = er32(CTRL);
@@ -1589,11 +1713,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
e1e_flush();
- udelay(20);
+ usleep_range(20, 40);
ew32(CTRL, ctrl_reg);
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
- udelay(20);
+ usleep_range(20, 40);
return 0;
}
@@ -1667,7 +1791,6 @@ release:
return ret_val;
}
-
/**
* e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
* @hw: pointer to the HW structure
@@ -1834,7 +1957,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
* SHRAL/H) and initial CRC values to the MAC
*/
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
- u8 mac_addr[ETH_ALEN] = {0};
+ u8 mac_addr[ETH_ALEN] = { 0 };
u32 addr_high, addr_low;
addr_high = er32(RAH(i));
@@ -1865,8 +1988,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_CTRL_OFFSET,
- &data);
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
if (ret_val)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
@@ -1875,8 +1998,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_HD_CTRL,
- &data);
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
if (ret_val)
return ret_val;
data &= ~(0xF << 8);
@@ -1923,8 +2046,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
ew32(RCTL, mac_reg);
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_CTRL_OFFSET,
- &data);
+ E1000_KMRNCTRLSTA_CTRL_OFFSET,
+ &data);
if (ret_val)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
@@ -1933,8 +2056,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
- E1000_KMRNCTRLSTA_HD_CTRL,
- &data);
+ E1000_KMRNCTRLSTA_HD_CTRL,
+ &data);
if (ret_val)
return ret_val;
data &= ~(0xF << 8);
@@ -2100,7 +2223,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
do {
data = er32(STATUS);
data &= E1000_STATUS_LAN_INIT_DONE;
- udelay(100);
+ usleep_range(100, 200);
} while ((!data) && --loop);
/* If basic configuration is incomplete before the above loop
@@ -2445,7 +2568,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 0 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
- &sig_byte);
+ &sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2456,8 +2579,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
/* Check bank 1 */
ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
- bank1_offset,
- &sig_byte);
+ bank1_offset,
+ &sig_byte);
if (ret_val)
return ret_val;
if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2510,8 +2633,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
ret_val = 0;
for (i = 0; i < words; i++) {
- if (dev_spec->shadow_ram[offset+i].modified) {
- data[i] = dev_spec->shadow_ram[offset+i].value;
+ if (dev_spec->shadow_ram[offset + i].modified) {
+ data[i] = dev_spec->shadow_ram[offset + i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw,
act_offset + i,
@@ -2696,8 +2819,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
udelay(1);
@@ -2714,8 +2837,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_READ_COMMAND_TIMEOUT);
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
/* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else
@@ -2774,8 +2898,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
nvm->ops.acquire(hw);
for (i = 0; i < words; i++) {
- dev_spec->shadow_ram[offset+i].modified = true;
- dev_spec->shadow_ram[offset+i].value = data[i];
+ dev_spec->shadow_ram[offset + i].modified = true;
+ dev_spec->shadow_ram[offset + i].value = data[i];
}
nvm->ops.release(hw);
@@ -2844,8 +2968,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
data = dev_spec->shadow_ram[i].value;
} else {
ret_val = e1000_read_flash_word_ich8lan(hw, i +
- old_bank_offset,
- &data);
+ old_bank_offset,
+ &data);
if (ret_val)
break;
}
@@ -2863,7 +2987,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
/* Convert offset to bytes. */
act_offset = (i + new_bank_offset) << 1;
- udelay(100);
+ usleep_range(100, 200);
/* Write the bytes to the new bank. */
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
act_offset,
@@ -2871,10 +2995,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
break;
- udelay(100);
+ usleep_range(100, 200);
ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
- act_offset + 1,
- (u8)(data >> 8));
+ act_offset + 1,
+ (u8)(data >> 8));
if (ret_val)
break;
}
@@ -3050,8 +3174,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
offset > ICH_FLASH_LINEAR_ADDR_MASK)
return -E1000_ERR_NVM;
- flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
- hw->nvm.flash_base_addr;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
do {
udelay(1);
@@ -3062,7 +3186,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
- hsflctl.hsf_ctrl.fldbcount = size -1;
+ hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
@@ -3078,8 +3202,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
/* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done
*/
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+ ret_val =
+ e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
if (!ret_val)
break;
@@ -3138,7 +3263,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
for (program_retries = 0; program_retries < 100; program_retries++) {
e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
- udelay(100);
+ usleep_range(100, 200);
ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
if (!ret_val)
break;
@@ -3209,8 +3334,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
flash_linear_addr = hw->nvm.flash_base_addr;
flash_linear_addr += (bank) ? flash_bank_size : 0;
- for (j = 0; j < iteration ; j++) {
+ for (j = 0; j < iteration; j++) {
do {
+ u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
+
/* Steps */
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val)
@@ -3230,8 +3357,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
flash_linear_addr += (j * sector_size);
ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
- ret_val = e1000_flash_cycle_ich8lan(hw,
- ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+ ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
if (!ret_val)
break;
@@ -3270,8 +3396,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
return ret_val;
}
- if (*data == ID_LED_RESERVED_0000 ||
- *data == ID_LED_RESERVED_FFFF)
+ if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT_ICH8LAN;
return 0;
@@ -3511,9 +3636,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Initialize identification LED */
ret_val = mac->ops.id_led_init(hw);
+ /* An error is not fatal and we should not stop init due to this */
if (ret_val)
e_dbg("Error initializing identification LED\n");
- /* This is not fatal and we should not stop init due to this */
/* Setup the receive address. */
e1000e_init_rx_addrs(hw, mac->rar_entry_count);
@@ -3541,16 +3666,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
/* Set the transmit descriptor write-back policy for both queues */
txdctl = er32(TXDCTL(0));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(0), txdctl);
txdctl = er32(TXDCTL(1));
- txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
- E1000_TXDCTL_FULL_TX_DESC_WB;
- txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
- E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+ txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
+ E1000_TXDCTL_FULL_TX_DESC_WB);
+ txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
+ E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
ew32(TXDCTL(1), txdctl);
/* ICH8 has opposite polarity of no_snoop bits.
@@ -3559,7 +3684,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
snoop = PCIE_ICH8_SNOOP_ALL;
else
- snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+ snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
ctrl_ext = er32(CTRL_EXT);
@@ -3575,6 +3700,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
return ret_val;
}
+
/**
* e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
* @hw: pointer to the HW structure
@@ -3686,8 +3812,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
*/
hw->fc.current_mode = hw->fc.requested_mode;
- e_dbg("After fix-ups FlowControl is now = %x\n",
- hw->fc.current_mode);
+ e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
/* Continue to configure the copper link. */
ret_val = hw->mac.ops.setup_physical_interface(hw);
@@ -3737,12 +3862,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &reg_data);
+ &reg_data);
if (ret_val)
return ret_val;
reg_data |= 0x3F;
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- reg_data);
+ reg_data);
if (ret_val)
return ret_val;
@@ -3760,7 +3885,6 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
break;
case e1000_phy_82577:
case e1000_phy_82579:
- case e1000_phy_i217:
ret_val = e1000_copper_link_setup_82577(hw);
if (ret_val)
return ret_val;
@@ -3796,6 +3920,31 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
}
/**
+ * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
+ * @hw: pointer to the HW structure
+ *
+ * Calls the PHY specific link setup function and then calls the
+ * generic setup_copper_link to finish configuring the link for
+ * Lynxpoint PCH devices
+ **/
+static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
+{
+ u32 ctrl;
+ s32 ret_val;
+
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_SLU;
+ ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+ ew32(CTRL, ctrl);
+
+ ret_val = e1000_copper_link_setup_82577(hw);
+ if (ret_val)
+ return ret_val;
+
+ return e1000e_setup_copper_link(hw);
+}
+
+/**
* e1000_get_link_up_info_ich8lan - Get current link speed and duplex
* @hw: pointer to the HW structure
* @speed: pointer to store current link speed
@@ -3815,8 +3964,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
return ret_val;
if ((hw->mac.type == e1000_ich8lan) &&
- (hw->phy.type == e1000_phy_igp_3) &&
- (*speed == SPEED_1000)) {
+ (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
}
@@ -3899,7 +4047,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
* /disabled - false).
**/
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
- bool state)
+ bool state)
{
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
@@ -3981,12 +4129,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
return;
ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- &reg_data);
+ &reg_data);
if (ret_val)
return;
reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
- reg_data);
+ reg_data);
if (ret_val)
return;
reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 8bf4655c2e1..80034a2b297 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -211,7 +211,8 @@
#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
-#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */
+#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
+#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
@@ -249,13 +250,6 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
-/* OBFF Control & Threshold Defines */
-#define E1000_SVCR_OFF_EN 0x00000001
-#define E1000_SVCR_OFF_MASKINT 0x00001000
-#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
-#define E1000_SVCR_OFF_TIMER_SHIFT 16
-#define E1000_SVT_OFF_HWM_MASK 0x0000001F
-
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
@@ -267,4 +261,5 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
+s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b78e0217460..2480c109187 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
* serdes media type.
*/
/* SYNCH bit and IV bit are sticky. */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
@@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
/* SYNCH bit and IV bit are sticky, so reread rxcw. */
- udelay(10);
+ usleep_range(10, 20);
rxcw = er32(RXCW);
if (rxcw & E1000_RXCW_SYNCH) {
if (!(rxcw & E1000_RXCW_IV)) {
@@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (!(swsm & E1000_SWSM_SMBI))
break;
- udelay(50);
+ usleep_range(50, 100);
i++;
}
@@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
- udelay(50);
+ usleep_range(50, 100);
}
if (i == timeout) {
@@ -1600,15 +1600,28 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else {
- /* set the blink bit for each LED that's "on" (0x0E)
- * in ledctl_mode2
+ /* Set the blink bit for each LED that's "on" (0x0E)
+ * (or "off" if inverted) in ledctl_mode2. The blink
+ * logic in hardware only works when mode is set to "on"
+ * so it must be changed accordingly when the mode is
+ * "off" and inverted.
*/
ledctl_blink = hw->mac.ledctl_mode2;
- for (i = 0; i < 4; i++)
- if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
- E1000_LEDCTL_MODE_LED_ON)
- ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
- (i * 8));
+ for (i = 0; i < 32; i += 8) {
+ u32 mode = (hw->mac.ledctl_mode2 >> i) &
+ E1000_LEDCTL_LED0_MODE_MASK;
+ u32 led_default = hw->mac.ledctl_default >> i;
+
+ if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+ ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+ (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+ ledctl_blink &=
+ ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+ ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+ E1000_LEDCTL_MODE_LED_ON) << i;
+ }
+ }
}
ew32(LEDCTL, ledctl_blink);
@@ -1712,7 +1725,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
while (timeout) {
if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
break;
- udelay(100);
+ usleep_range(100, 200);
timeout--;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7e615e2bf7e..b18fad5b579 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -55,7 +55,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION
+#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
}
/* Print Registers */
@@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
cpu_to_le64(ps_page->dma);
}
- skb = __netdev_alloc_skb_ip_align(netdev,
- adapter->rx_ps_bsize0,
+ skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
gfp);
if (!skb) {
@@ -850,8 +848,8 @@ check_page:
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
- PAGE_SIZE,
+ buffer_info->page, 0,
+ PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
adapter->alloc_rx_buff_failed++;
@@ -942,10 +940,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
cleaned = true;
cleaned_count++;
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ adapter->rx_buffer_len, DMA_FROM_DEVICE);
buffer_info->dma = 0;
length = le16_to_cpu(rx_desc->wb.upper.length);
@@ -1073,8 +1069,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
static void e1000_print_hw_hang(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter,
- print_hang_task);
+ struct e1000_adapter,
+ print_hang_task);
struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
@@ -1087,8 +1083,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
- if (!adapter->tx_hang_recheck &&
- (adapter->flags2 & FLAG2_DMA_BURST)) {
+ if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
/* May be block on write-back, flush and detect again
* flush pending descriptor writebacks to memory
*/
@@ -1130,19 +1125,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
"PHY 1000BASE-T Status <%x>\n"
"PHY Extended Status <%x>\n"
"PCI Status <%x>\n",
- readl(tx_ring->head),
- readl(tx_ring->tail),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->upper.fields.status,
- er32(STATUS),
- phy_status,
- phy_1000t_status,
- phy_ext_status,
- pci_status);
+ readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
+ tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
+ eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
+ phy_status, phy_1000t_status, phy_ext_status, pci_status);
/* Suggest workaround for known h/w issue */
if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
@@ -1435,7 +1421,7 @@ copydone:
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
if (rx_desc->wb.upper.header_status &
- cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
+ cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
adapter->rx_hdr_split++;
e1000_receive_skb(adapter, netdev, skb, staterr,
@@ -1473,7 +1459,7 @@ next_desc:
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -1500,7 +1486,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
unsigned int i;
int cleaned_count = 0;
bool cleaned = false;
- unsigned int total_rx_bytes=0, total_rx_packets=0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct skb_shared_info *shinfo;
i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
@@ -1546,7 +1533,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
rx_ring->rx_skb_top = NULL;
goto next_desc;
}
-
#define rxtop (rx_ring->rx_skb_top)
if (!(staterr & E1000_RXD_STAT_EOP)) {
/* this descriptor is only the beginning (or middle) */
@@ -1554,12 +1540,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
/* re-use the skb, only consumed the page */
buffer_info->skb = skb;
}
@@ -1568,9 +1555,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
} else {
if (rxtop) {
/* end of the chain */
- skb_fill_page_desc(rxtop,
- skb_shinfo(rxtop)->nr_frags,
- buffer_info->page, 0, length);
+ shinfo = skb_shinfo(rxtop);
+ skb_fill_page_desc(rxtop, shinfo->nr_frags,
+ buffer_info->page, 0,
+ length);
/* re-use the current skb, we only consumed the
* page
*/
@@ -1595,10 +1583,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
@@ -1671,8 +1659,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
dma_unmap_page(&pdev->dev, buffer_info->dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ PAGE_SIZE, DMA_FROM_DEVICE);
else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0,
@@ -1725,7 +1712,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
static void e1000e_downshift_workaround(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, downshift_task);
+ struct e1000_adapter,
+ downshift_task);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -1918,7 +1906,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
struct e1000_hw *hw = &adapter->hw;
struct e1000_ring *tx_ring = adapter->tx_ring;
-
adapter->total_tx_bytes = 0;
adapter->total_tx_packets = 0;
@@ -1975,7 +1962,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
ew32(RFCTL, rfctl);
}
-#define E1000_IVAR_INT_ALLOC_VALID 0x8
/* Configure Rx vector */
rx_ring->ims_val = E1000_IMS_RXQ0;
adapter->eiac_mask |= rx_ring->ims_val;
@@ -2050,8 +2036,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_HAS_MSIX) {
adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
adapter->msix_entries = kcalloc(adapter->num_vectors,
- sizeof(struct msix_entry),
- GFP_KERNEL);
+ sizeof(struct
+ msix_entry),
+ GFP_KERNEL);
if (adapter->msix_entries) {
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
@@ -2495,7 +2482,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
switch (itr_setting) {
case lowest_latency:
/* handle TSO and jumbo frames */
- if (bytes/packets > 8000)
+ if (bytes / packets > 8000)
retval = bulk_latency;
else if ((packets < 5) && (bytes > 512))
retval = low_latency;
@@ -2503,13 +2490,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000)
+ if (bytes / packets > 8000)
retval = bulk_latency;
- else if ((packets < 10) || ((bytes/packets) > 1200))
+ else if ((packets < 10) || ((bytes / packets) > 1200))
retval = bulk_latency;
else if ((packets > 35))
retval = lowest_latency;
- } else if (bytes/packets > 2000) {
+ } else if (bytes / packets > 2000) {
retval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
retval = lowest_latency;
@@ -2561,8 +2548,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
current_itr = max(adapter->rx_itr, adapter->tx_itr);
- switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
+ switch (current_itr) {
case lowest_latency:
new_itr = 70000;
break;
@@ -2583,8 +2570,7 @@ set_itr_now:
* increasing
*/
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
adapter->itr = new_itr;
adapter->rx_ring->itr_val = new_itr;
if (adapter->msix_entries)
@@ -2815,8 +2801,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
u16 vid = adapter->hw.mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
e1000_vlan_rx_add_vid(netdev, vid);
adapter->mng_vlan_id = vid;
}
@@ -2832,7 +2817,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
e1000_vlan_rx_add_vid(adapter->netdev, 0);
for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- e1000_vlan_rx_add_vid(adapter->netdev, vid);
+ e1000_vlan_rx_add_vid(adapter->netdev, vid);
}
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@@ -3007,8 +2992,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rctl = er32(RCTL);
rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
- E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
- (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+ E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+ (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
/* Do not Store bad packets */
rctl &= ~E1000_RCTL_SBP;
@@ -3094,19 +3079,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* Enable Packet split descriptors */
rctl |= E1000_RCTL_DTYP_PS;
- psrctl |= adapter->rx_ps_bsize0 >>
- E1000_PSRCTL_BSIZE0_SHIFT;
+ psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
switch (adapter->rx_ps_pages) {
case 3:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE3_SHIFT;
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
+ /* fall-through */
case 2:
- psrctl |= PAGE_SIZE <<
- E1000_PSRCTL_BSIZE2_SHIFT;
+ psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
+ /* fall-through */
case 1:
- psrctl |= PAGE_SIZE >>
- E1000_PSRCTL_BSIZE1_SHIFT;
+ psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
break;
}
@@ -3280,7 +3263,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
/* update_mc_addr_list expects a packed array of only addresses. */
i = 0;
netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
@@ -3757,8 +3740,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
- sizeof(struct e1000_tx_desc) -
- ETH_FCS_LEN) * 2;
+ sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
min_tx_space = ALIGN(min_tx_space, 1024);
min_tx_space >>= 10;
/* software strips receive CRC, so leave room for it */
@@ -3861,13 +3843,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
if ((adapter->max_frame_size * 2) > (pba << 10)) {
if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned off\n");
+ "Interrupt Throttle Rate off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM;
e1000e_write_itr(adapter, 0);
}
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(&adapter->pdev->dev,
- "Interrupt Throttle Rate turned on\n");
+ "Interrupt Throttle Rate on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000;
e1000e_write_itr(adapter, adapter->itr);
@@ -3898,6 +3880,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* initialize systim and reset the ns time counter */
e1000e_config_hwtstamp(adapter);
+ /* Set EEE advertisement as appropriate */
+ if (adapter->flags2 & FLAG2_HAS_EEE) {
+ s32 ret_val;
+ u16 adv_addr;
+
+ switch (hw->phy.type) {
+ case e1000_phy_82579:
+ adv_addr = I82579_EEE_ADVERTISEMENT;
+ break;
+ case e1000_phy_i217:
+ adv_addr = I217_EEE_ADVERTISEMENT;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "Invalid PHY type setting EEE advertisement\n");
+ return;
+ }
+
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val) {
+ dev_err(&adapter->pdev->dev,
+ "EEE advertisement - unable to acquire PHY\n");
+ return;
+ }
+
+ e1000_write_emi_reg_locked(hw, adv_addr,
+ hw->dev_spec.ich8lan.eee_disable ?
+ 0 : adapter->eee_advert);
+
+ hw->phy.ops.release(hw);
+ }
+
if (!netif_running(adapter->netdev) &&
!test_bit(__E1000_TESTING, &adapter->state)) {
e1000_power_down_phy(adapter);
@@ -4266,8 +4280,7 @@ static int e1000_open(struct net_device *netdev)
e1000e_power_up_phy(adapter);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if ((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+ if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
e1000_update_mng_vlan(adapter);
/* DMA latency requirement to workaround jumbo issue */
@@ -4370,8 +4383,7 @@ static int e1000_close(struct net_device *netdev)
/* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it)
*/
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+ if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
/* If AMT is enabled, let the firmware know that the network
@@ -4387,6 +4399,7 @@ static int e1000_close(struct net_device *netdev)
return 0;
}
+
/**
* e1000_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -4437,7 +4450,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
static void e1000e_update_phy_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, update_phy_task);
+ struct e1000_adapter,
+ update_phy_task);
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4454,7 +4468,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
**/
static void e1000_update_phy_info(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
if (test_bit(__E1000_DOWN, &adapter->state))
return;
@@ -4621,18 +4635,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
* our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
netdev->stats.rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
+ adapter->stats.roc;
netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
netdev->stats.rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
- netdev->stats.tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
+ netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
netdev->stats.tx_aborted_errors = adapter->stats.ecol;
netdev->stats.tx_window_errors = adapter->stats.latecol;
netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
@@ -4790,7 +4802,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
**/
static void e1000_watchdog(unsigned long data)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = (struct e1000_adapter *)data;
/* Do the rest outside of interrupt context */
schedule_work(&adapter->watchdog_task);
@@ -4801,7 +4813,8 @@ static void e1000_watchdog(unsigned long data)
static void e1000_watchdog_task(struct work_struct *work)
{
struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, watchdog_task);
+ struct e1000_adapter,
+ watchdog_task);
struct net_device *netdev = adapter->netdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -4835,8 +4848,8 @@ static void e1000_watchdog_task(struct work_struct *work)
/* update snapshot of PHY registers on LSC */
e1000_phy_read_status(adapter);
mac->ops.get_link_up_info(&adapter->hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
e1000_print_link_info(adapter);
/* check if SmartSpeed worked */
@@ -4949,7 +4962,7 @@ static void e1000_watchdog_task(struct work_struct *work)
adapter->flags |= FLAG_RESTART_NOW;
else
pm_schedule_suspend(netdev->dev.parent,
- LINK_TIMEOUT);
+ LINK_TIMEOUT);
}
}
@@ -4984,8 +4997,8 @@ link_up:
*/
u32 goc = (adapter->gotc + adapter->gorc) / 10000;
u32 dif = (adapter->gotc > adapter->gorc ?
- adapter->gotc - adapter->gorc :
- adapter->gorc - adapter->gotc) / 10000;
+ adapter->gotc - adapter->gorc :
+ adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
e1000e_write_itr(adapter, itr);
@@ -5064,14 +5077,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
iph->tot_len = 0;
iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- 0, IPPROTO_TCP, 0);
+ 0, IPPROTO_TCP, 0);
cmd_length = E1000_TXD_CMD_IP;
ipcse = skb_transport_offset(skb) - 1;
} else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
ipcse = 0;
}
ipcss = skb_network_offset(skb);
@@ -5080,7 +5093,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
- E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+ E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
i = tx_ring->next_to_use;
context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
@@ -5150,8 +5163,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
context_desc->lower_setup.ip_config = 0;
context_desc->upper_setup.tcp_fields.tucss = css;
- context_desc->upper_setup.tcp_fields.tucso =
- css + skb->csum_offset;
+ context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
context_desc->upper_setup.tcp_fields.tucse = 0;
context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(cmd_len);
@@ -5224,7 +5236,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
buffer_info->time_stamp = jiffies;
buffer_info->next_to_watch = i;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
- offset, size, DMA_TO_DEVICE);
+ offset, size,
+ DMA_TO_DEVICE);
buffer_info->mapped_as_page = true;
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
@@ -5273,7 +5286,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (tx_flags & E1000_TX_FLAGS_TSO) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (tx_flags & E1000_TX_FLAGS_IPV4)
@@ -5304,8 +5317,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
buffer_info = &tx_ring->buffer_info[i];
tx_desc = E1000_TX_DESC(*tx_ring, i);
tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->lower.data =
- cpu_to_le32(txd_lower | buffer_info->length);
+ tx_desc->lower.data = cpu_to_le32(txd_lower |
+ buffer_info->length);
tx_desc->upper.data = cpu_to_le32(txd_upper);
i++;
@@ -5355,11 +5368,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
return 0;
- if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
+ if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
return 0;
{
- const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
+ const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
struct udphdr *udp;
if (ip->protocol != IPPROTO_UDP)
@@ -5584,7 +5597,7 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
**/
struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5605,18 +5618,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
* our own version based on RUC and ROC
*/
stats->rx_errors = adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.cexterr;
- stats->rx_length_errors = adapter->stats.ruc +
- adapter->stats.roc;
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
+ stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
stats->rx_crc_errors = adapter->stats.crcerrs;
stats->rx_frame_errors = adapter->stats.algnerrc;
stats->rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */
- stats->tx_errors = adapter->stats.ecol +
- adapter->stats.latecol;
+ stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
stats->tx_aborted_errors = adapter->stats.ecol;
stats->tx_window_errors = adapter->stats.latecol;
stats->tx_carrier_errors = adapter->stats.tncrs;
@@ -5685,9 +5695,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
/* adjust allocation if LPE protects us, and we aren't using SBP */
if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
- (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+ (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
- + ETH_FCS_LEN;
+ + ETH_FCS_LEN;
if (netif_running(netdev))
e1000e_up(adapter);
@@ -5866,7 +5876,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
phy_reg &= ~(BM_RCTL_MO_MASK);
if (mac_reg & E1000_RCTL_MO_3)
phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
- << BM_RCTL_MO_SHIFT);
+ << BM_RCTL_MO_SHIFT);
if (mac_reg & E1000_RCTL_BAM)
phy_reg |= BM_RCTL_BAM;
if (mac_reg & E1000_RCTL_PMCF)
@@ -5935,10 +5945,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
}
ctrl = er32(CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
@@ -5982,8 +5988,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
*/
e1000e_release_hw_control(adapter);
- pci_clear_master(pdev);
-
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
@@ -6082,24 +6086,24 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
if (phy_data) {
e_info("PHY Wakeup cause - %s\n",
- phy_data & E1000_WUS_EX ? "Unicast Packet" :
- phy_data & E1000_WUS_MC ? "Multicast Packet" :
- phy_data & E1000_WUS_BC ? "Broadcast Packet" :
- phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ?
- "Link Status Change" : "other");
+ phy_data & E1000_WUS_EX ? "Unicast Packet" :
+ phy_data & E1000_WUS_MC ? "Multicast Packet" :
+ phy_data & E1000_WUS_BC ? "Broadcast Packet" :
+ phy_data & E1000_WUS_MAG ? "Magic Packet" :
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
}
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
if (wus) {
e_info("MAC Wakeup cause - %s\n",
- wus & E1000_WUS_EX ? "Unicast Packet" :
- wus & E1000_WUS_MC ? "Multicast Packet" :
- wus & E1000_WUS_BC ? "Broadcast Packet" :
- wus & E1000_WUS_MAG ? "Magic Packet" :
- wus & E1000_WUS_LNKC ? "Link Status Change" :
- "other");
+ wus & E1000_WUS_EX ? "Unicast Packet" :
+ wus & E1000_WUS_MC ? "Multicast Packet" :
+ wus & E1000_WUS_BC ? "Broadcast Packet" :
+ wus & E1000_WUS_MAG ? "Magic Packet" :
+ wus & E1000_WUS_LNKC ? "Link Status Change" :
+ "other");
}
ew32(WUS, ~0);
}
@@ -6374,7 +6378,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
e_info("(PCI Express:2.5GT/s:%s) %pM\n",
/* bus width */
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
- "Width x1"),
+ "Width x1"),
/* MAC address */
netdev->dev_addr);
e_info("Intel(R) PRO/%s Network Connection\n",
@@ -6484,7 +6488,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
resource_size_t flash_start, flash_len;
static int cards_found;
u16 aspm_disable_flag = 0;
- int i, err, pci_using_dac;
+ int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
@@ -6511,15 +6515,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
}
- err = pci_request_selected_regions_exclusive(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM),
- e1000e_driver_name);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ err = pci_request_selected_regions_exclusive(pdev, bars,
+ e1000e_driver_name);
if (err)
goto err_pci_reg;
@@ -6572,6 +6577,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_flashmap;
}
+ /* Set default EEE advertisement */
+ if (adapter->flags2 & FLAG2_HAS_EEE)
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
/* construct the net_device struct */
netdev->netdev_ops = &e1000e_netdev_ops;
e1000e_set_ethtool_ops(netdev);
@@ -6688,11 +6697,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
init_timer(&adapter->watchdog_timer);
adapter->watchdog_timer.function = e1000_watchdog;
- adapter->watchdog_timer.data = (unsigned long) adapter;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
init_timer(&adapter->phy_info_timer);
adapter->phy_info_timer.function = e1000_update_phy_info;
- adapter->phy_info_timer.data = (unsigned long) adapter;
+ adapter->phy_info_timer.data = (unsigned long)adapter;
INIT_WORK(&adapter->reset_task, e1000_reset_task);
INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
@@ -6800,7 +6809,7 @@ err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -6870,7 +6879,7 @@ static void e1000_remove(struct pci_dev *pdev)
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_select_bars(pdev, IORESOURCE_MEM));
free_netdev(netdev);
@@ -6891,7 +6900,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
+ board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
@@ -6967,8 +6977,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
#ifdef CONFIG_PM
static const struct dev_pm_ops e1000_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
- SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
- e1000_runtime_resume, e1000_idle)
+ SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
+ e1000_idle)
};
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 84fecc26816..44ddc0a0ee0 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw)
{
u32 ctrl_ext;
- udelay(10);
+ usleep_range(10, 20);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 98da75dff93..c16bd75b6ca 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -45,7 +45,7 @@
unsigned int copybreak = COPYBREAK_DEFAULT;
module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
+ "Maximum size of packet that is copied to a new buffer on receive");
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
@@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
*
* Default Value: 1 (enabled)
*/
-E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+E1000_PARAM(WriteProtectNVM,
+ "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
/* Enable CRC Stripping
*
@@ -160,13 +161,18 @@ struct e1000_option {
const char *err;
int def;
union {
- struct { /* range_option info */
+ /* range_option info */
+ struct {
int min;
int max;
} r;
- struct { /* list_option info */
+ /* list_option info */
+ struct {
int nr;
- struct e1000_opt_list { int i; char *str; } *p;
+ struct e1000_opt_list {
+ int i;
+ char *str;
+ } *p;
} l;
} arg;
};
@@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
"Using defaults for all values\n");
}
- { /* Transmit Interrupt Delay */
+ /* Transmit Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
@@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_int_delay = opt.def;
}
}
- { /* Transmit Absolute Interrupt Delay */
+ /* Transmit Absolute Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Transmit Absolute Interrupt Delay",
@@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->tx_abs_int_delay = opt.def;
}
}
- { /* Receive Interrupt Delay */
+ /* Receive Interrupt Delay */
+ {
static struct e1000_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
@@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_int_delay = opt.def;
}
}
- { /* Receive Absolute Interrupt Delay */
+ /* Receive Absolute Interrupt Delay */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
@@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->rx_abs_int_delay = opt.def;
}
}
- { /* Interrupt Throttling Rate */
+ /* Interrupt Throttling Rate */
+ {
static const struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Throttling Rate (ints/sec)",
@@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
break;
}
}
- { /* Interrupt Mode */
+ /* Interrupt Mode */
+ {
static struct e1000_option opt = {
.type = range_option,
.name = "Interrupt Mode",
@@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
kfree(opt.err);
#endif
}
- { /* Smart Power Down */
+ /* Smart Power Down */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "PHY Smart Power Down",
@@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
- { /* CRC Stripping */
+ /* CRC Stripping */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "CRC Stripping",
@@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
}
}
- { /* Kumeran Lock Loss Workaround */
+ /* Kumeran Lock Loss Workaround */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "Kumeran Lock Loss Workaround",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
+ bool enabled = opt.def;
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
- if (hw->mac.type == e1000_ich8lan)
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
- kmrn_lock_loss);
- } else {
- if (hw->mac.type == e1000_ich8lan)
- e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
- opt.def);
+ enabled = kmrn_lock_loss;
}
+
+ if (hw->mac.type == e1000_ich8lan)
+ e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+ enabled);
}
- { /* Write-protect NVM */
+ /* Write-protect NVM */
+ {
static const struct e1000_option opt = {
.type = enable_option,
.name = "Write-protect NVM",
@@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
if (num_WriteProtectNVM > bd) {
- unsigned int write_protect_nvm = WriteProtectNVM[bd];
+ unsigned int write_protect_nvm =
+ WriteProtectNVM[bd];
e1000_validate_option(&write_protect_nvm, &opt,
adapter);
if (write_protect_nvm)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0930c136aa3..59c76a6815a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
/* Cable length tables */
static const u16 e1000_m88_cable_length_table[] = {
- 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
+};
+
#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_m88_cable_length_table)
@@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
- 124};
+ 124
+};
+
#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
ARRAY_SIZE(e1000_igp_2_cable_length_table)
@@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
manc = er32(MANC);
- return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
- E1000_BLK_PHY_RESET : 0;
+ return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
}
/**
@@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
return ret_val;
phy->id = (u32)(phy_id << 16);
- udelay(20);
+ usleep_range(20, 40);
ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
if (ret_val)
return ret_val;
@@ -175,7 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
- *data = (u16) mdic;
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ e_dbg("MDI Read offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
+ *data = (u16)mdic;
/* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
@@ -233,6 +242,12 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
e_dbg("MDI Error\n");
return -E1000_ERR_PHY;
}
+ if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
+ e_dbg("MDI Write offset error - requested %d, returned %d\n",
+ offset,
+ (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
+ return -E1000_ERR_PHY;
+ }
/* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
@@ -324,7 +339,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
* semaphores before exiting.
**/
static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
+ bool locked)
{
s32 ret_val = 0;
@@ -391,7 +406,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
* at the offset. Release any acquired semaphores before exiting.
**/
static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
+ bool locked)
{
s32 ret_val = 0;
@@ -410,8 +425,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
(u16)offset);
if (!ret_val)
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
- offset,
- data);
+ offset, data);
if (!locked)
hw->phy.ops.release(hw);
@@ -458,7 +472,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
* Release any acquired semaphores before exiting.
**/
static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
- bool locked)
+ bool locked)
{
u32 kmrnctrlsta;
@@ -531,7 +545,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
* before exiting.
**/
static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
- bool locked)
+ bool locked)
{
u32 kmrnctrlsta;
@@ -772,8 +786,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
phy_data |= M88E1000_EPSCR_TX_CLK_25;
- if ((phy->revision == 2) &&
- (phy->id == M88E1111_I_PHY_ID)) {
+ if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
/* 82573L PHY - set the downshift counter to 5x. */
phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
@@ -1296,7 +1309,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
@@ -1319,7 +1332,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
/* Try once more */
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
- 100000, &link);
+ 100000, &link);
if (ret_val)
return ret_val;
}
@@ -1609,9 +1622,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
if (!ret_val)
- phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1653,9 +1666,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &data);
if (!ret_val)
- phy->cable_polarity = (data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1685,9 +1698,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, offset, &phy_data);
if (!ret_val)
- phy->cable_polarity = (phy_data & mask)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((phy_data & mask)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -1733,7 +1746,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
* Polls the PHY status register for link, 'iterations' number of times.
**/
s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
- u32 usec_interval, bool *success)
+ u32 usec_interval, bool *success)
{
s32 ret_val = 0;
u16 i, phy_status;
@@ -1756,7 +1769,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval/1000);
+ mdelay(usec_interval / 1000);
else
udelay(usec_interval);
}
@@ -1791,8 +1804,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
- M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+ index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+ M88E1000_PSSR_CABLE_LENGTH_SHIFT);
if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
return -E1000_ERR_PHY;
@@ -1824,10 +1837,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
u16 cur_agc_index, max_agc_index = 0;
u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
- IGP02E1000_PHY_AGC_A,
- IGP02E1000_PHY_AGC_B,
- IGP02E1000_PHY_AGC_C,
- IGP02E1000_PHY_AGC_D
+ IGP02E1000_PHY_AGC_A,
+ IGP02E1000_PHY_AGC_B,
+ IGP02E1000_PHY_AGC_C,
+ IGP02E1000_PHY_AGC_D
};
/* Read the AGC registers for all channels */
@@ -1841,8 +1854,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
* that can be put into the lookup table to obtain the
* approximate cable length.
*/
- cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
- IGP02E1000_AGC_LENGTH_MASK;
+ cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+ IGP02E1000_AGC_LENGTH_MASK);
/* Array index bound check. */
if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
@@ -1865,8 +1878,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
/* Calculate cable length with the error range of +/- 10 meters. */
- phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
- (agc_value - IGP02E1000_AGC_RANGE) : 0;
+ phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+ (agc_value - IGP02E1000_AGC_RANGE) : 0);
phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -2040,9 +2053,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
return ret_val;
} else {
/* Polarity is forced */
- phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
}
ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
@@ -2119,7 +2132,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
ew32(CTRL, ctrl);
e1e_flush();
- udelay(150);
+ usleep_range(150, 300);
phy->ops.release(hw);
@@ -2375,13 +2388,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
- (page << page_shift));
+ (page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ data);
release:
hw->phy.ops.release(hw);
@@ -2433,13 +2446,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
/* Page is shifted left, PHY expects (page x 32) */
ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
- (page << page_shift));
+ (page << page_shift));
if (ret_val)
goto release;
}
ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
- data);
+ data);
release:
hw->phy.ops.release(hw);
return ret_val;
@@ -2674,7 +2687,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
if (read) {
/* Read the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
- data);
+ data);
} else {
/* Write the Wakeup register page value using opcode 0x12 */
ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
@@ -2763,7 +2776,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
- data, true);
+ data, true);
goto out;
}
@@ -2786,8 +2799,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
page << IGP_PAGE_SHIFT, reg);
- ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
- data);
+ ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
out:
if (!locked)
hw->phy.ops.release(hw);
@@ -2871,7 +2883,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (page > 0 && page < HV_INTC_FC_PAGE_START) {
ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
- &data, false);
+ &data, false);
goto out;
}
@@ -2910,7 +2922,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
page << IGP_PAGE_SHIFT, reg);
ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
- data);
+ data);
out:
if (!locked)
@@ -2988,15 +3000,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
* These accesses done with PHY address 2 and without using pages.
**/
static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
- u16 *data, bool read)
+ u16 *data, bool read)
{
s32 ret_val;
u32 addr_reg;
u32 data_reg;
/* This takes care of the difference with desktop vs mobile phy */
- addr_reg = (hw->phy.type == e1000_phy_82578) ?
- I82578_ADDR_REG : I82577_ADDR_REG;
+ addr_reg = ((hw->phy.type == e1000_phy_82578) ?
+ I82578_ADDR_REG : I82577_ADDR_REG);
data_reg = addr_reg + 1;
/* All operations in this function are phy address 2 */
@@ -3050,8 +3062,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
- BM_CS_STATUS_SPEED_MASK;
+ data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
+ BM_CS_STATUS_SPEED_MASK);
if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
BM_CS_STATUS_SPEED_1000))
@@ -3086,9 +3098,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
if (!ret_val)
- phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
- ? e1000_rev_polarity_reversed
- : e1000_rev_polarity_normal;
+ phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
+ ? e1000_rev_polarity_reversed
+ : e1000_rev_polarity_normal);
return ret_val;
}
@@ -3215,8 +3227,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
- I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+ length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+ I82577_DSTATUS_CABLE_LENGTH_SHIFT);
if (length == E1000_CABLE_LENGTH_UNDEFINED)
return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d60cd439341..bea46bb2606 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
&tx_ring->dma, GFP_KERNEL);
-
if (!tx_ring->desc)
goto err;
@@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
-
if (!rx_ring->desc)
goto err;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index b5f94abe3cf..5dc119fd95a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -717,14 +717,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (!txdr->desc) {
vfree(txdr->buffer_info);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
- memset(txdr->desc, 0, txdr->size);
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
@@ -807,8 +804,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index db5611ae407..e56a3d169e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7007,7 +7007,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
int err;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
- return -EOPNOTSUPP;
+ return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
/* Hardware does not support aging addresses so if a
* ndm_state is given only allow permanent addresses
@@ -7038,44 +7038,6 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
}
-static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
-
- if (ndm->ndm_state & NUD_PERMANENT) {
- pr_info("%s: FDB only supports static addresses\n",
- ixgbe_driver_name);
- return -EINVAL;
- }
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev,
- int idx)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
-
- return idx;
-}
-
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh)
{
@@ -7171,8 +7133,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_features = ixgbe_set_features,
.ndo_fix_features = ixgbe_fix_features,
.ndo_fdb_add = ixgbe_ndo_fdb_add,
- .ndo_fdb_del = ixgbe_ndo_fdb_del,
- .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
.ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d44b4d21268..b3e6530637e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -661,13 +661,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
bool enable = ((event_mask & 0x10000000U) != 0);
if (enable) {
- eth_random_addr(vf_mac_addr);
- e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
- vfn, vf_mac_addr);
- /*
- * Store away the VF "permananet" MAC address, it will ask
- * for it later.
- */
+ eth_zero_addr(vf_mac_addr);
memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
}
@@ -688,7 +682,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_vf_reset_event(adapter, vf);
/* set vf mac address */
- ixgbe_set_vf_mac(adapter, vf, vf_mac);
+ if (!is_zero_ether_addr(vf_mac))
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32;
reg_offset = vf / 32;
@@ -729,8 +724,16 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/* reply to reset with ack and vf mac address */
- msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(addr, vf_mac, ETH_ALEN);
+ msgbuf[0] = IXGBE_VF_RESET;
+ if (!is_zero_ether_addr(vf_mac)) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+ } else {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ dev_warn(&adapter->pdev->dev,
+ "VF %d has no MAC address assigned, you may have to assign one manually\n",
+ vf);
+ }
/*
* Piggyback the multicast filter type so VF can compute the
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fc0af9a3bb3..fff0d986752 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
+ union ixgbe_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2b6cb5ca48e..eeae9349f78 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbevf_pci_tbl[] = {
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
- board_82599_vf},
- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
- board_X540_vf},
-
+static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
/* required last entry */
{0, }
};
@@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
return true;
i = tx_ring->next_to_clean;
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ eop_desc = tx_buffer_info->next_to_watch;
- while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
+ do {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
- /* eop could change between read and DD-check */
- if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
- goto cont_loop;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer_info->next_to_watch = NULL;
+
for ( ; !cleaned; count++) {
struct sk_buff *skb;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = tx_buffer_info->skb;
if (cleaned && skb) {
@@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
i++;
if (i == tx_ring->count)
i = 0;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
}
-cont_loop:
- eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
- }
+ eop_desc = tx_buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -2046,6 +2052,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
int err;
/* PCI config space info */
@@ -2065,18 +2072,26 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
- "PF still in reset state, assigning new address\n");
- eth_hw_addr_random(adapter->netdev);
- memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
- adapter->netdev->addr_len);
+ "PF still in reset state. Is the PF interface up?\n");
} else {
err = hw->mac.ops.init_hw(hw);
if (err) {
pr_err("init_shared_code failed: %d\n", err);
goto out;
}
- memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
- adapter->netdev->addr_len);
+ err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+ if (err)
+ dev_info(&pdev->dev, "Error reading MAC address\n");
+ else if (is_zero_ether_addr(adapter->hw.mac.addr))
+ dev_info(&pdev->dev,
+ "MAC address not assigned by administrator.\n");
+ memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ }
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ dev_info(&pdev->dev, "Assigning random MAC address\n");
+ eth_hw_addr_random(netdev);
+ memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
}
/* lock to protect mailbox accesses */
@@ -2425,9 +2440,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- hw_dbg(&adapter->hw,
- "Unable to allocate memory for "
- "the receive descriptor ring\n");
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
goto alloc_failed;
@@ -2822,8 +2834,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
}
static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ struct sk_buff *skb, u32 tx_flags)
{
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
@@ -2848,7 +2859,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
size, DMA_TO_DEVICE);
if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->next_to_watch = i;
len -= size;
total -= size;
@@ -2878,7 +2888,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_buffer_info->dma))
goto dma_error;
tx_buffer_info->mapped_as_page = true;
- tx_buffer_info->next_to_watch = i;
len -= size;
total -= size;
@@ -2897,8 +2906,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
else
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb;
- tx_ring->tx_buffer_info[first].next_to_watch = i;
- tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count;
@@ -2907,7 +2914,6 @@ dma_error:
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
- tx_buffer_info->next_to_watch = 0;
count--;
/* clear timestamp and dma mappings for remaining portion of packet */
@@ -2924,7 +2930,8 @@ dma_error:
}
static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
- int count, u32 paylen, u8 hdr_len)
+ int count, unsigned int first, u32 paylen,
+ u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -2975,6 +2982,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+ tx_ring->tx_buffer_info[first].time_stamp = jiffies;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
}
@@ -3066,15 +3083,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_flags |= IXGBE_TX_FLAGS_CSUM;
ixgbevf_tx_queue(tx_ring, tx_flags,
- ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
- skb->len, hdr_len);
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
+ ixgbevf_tx_map(tx_ring, skb, tx_flags),
+ first, skb->len, hdr_len);
writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c94557b53d..387b52635bc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -109,7 +109,12 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
if (ret_val)
return ret_val;
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ /* New versions of the PF may NACK the reset return message
+ * to indicate that no MAC address has yet been assigned for
+ * the VF.
+ */
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index edfba937092..5170ecb00ac 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -23,6 +23,7 @@ config MV643XX_ETH
depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
select INET_LRO
select PHYLIB
+ select MVMDIO
---help---
This driver supports the gigabit ethernet MACs in the
Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -38,9 +39,7 @@ config MVMDIO
interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
Dove, Armada 370 and Armada XP).
- For now, this driver is only needed for the MVNETA driver
- (used on Armada 370 and XP), but it could be used in the
- future by the MV643XX_ETH driver.
+ This driver is used by the MV643XX_ETH and MVNETA drivers.
config MVNETA
tristate "Marvell Armada 370/XP network interface support"
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 7f63b4aac43..5c4a7765ff0 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -2,8 +2,8 @@
# Makefile for the Marvell device drivers.
#
-obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_MVMDIO) += mvmdio.o
+obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
obj-$(CONFIG_MVNETA) += mvneta.o
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 6562c736a1d..aedbd8256ad 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -20,6 +20,8 @@
* Copyright (C) 2007-2008 Marvell Semiconductor
* Lennert Buytenhek <buytenh@marvell.com>
*
+ * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
@@ -67,14 +69,6 @@ static char mv643xx_eth_driver_version[] = "1.4";
* Registers shared between all ports.
*/
#define PHY_ADDR 0x0000
-#define SMI_REG 0x0004
-#define SMI_BUSY 0x10000000
-#define SMI_READ_VALID 0x08000000
-#define SMI_OPCODE_READ 0x04000000
-#define SMI_OPCODE_WRITE 0x00000000
-#define ERR_INT_CAUSE 0x0080
-#define ERR_INT_SMI_DONE 0x00000010
-#define ERR_INT_MASK 0x0084
#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
@@ -264,25 +258,6 @@ struct mv643xx_eth_shared_private {
void __iomem *base;
/*
- * Points at the right SMI instance to use.
- */
- struct mv643xx_eth_shared_private *smi;
-
- /*
- * Provides access to local SMI interface.
- */
- struct mii_bus *smi_bus;
-
- /*
- * If we have access to the error interrupt pin (which is
- * somewhat misnamed as it not only reflects internal errors
- * but also reflects SMI completion), use that to wait for
- * SMI access completion instead of polling the SMI busy bit.
- */
- int err_interrupt;
- wait_queue_head_t smi_busy_wait;
-
- /*
* Per-port MBUS window access register value.
*/
u32 win_protect;
@@ -1120,97 +1095,6 @@ out_write:
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
}
-static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
-{
- struct mv643xx_eth_shared_private *msp = dev_id;
-
- if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
- writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
- wake_up(&msp->smi_busy_wait);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static int smi_is_done(struct mv643xx_eth_shared_private *msp)
-{
- return !(readl(msp->base + SMI_REG) & SMI_BUSY);
-}
-
-static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
-{
- if (msp->err_interrupt == NO_IRQ) {
- int i;
-
- for (i = 0; !smi_is_done(msp); i++) {
- if (i == 10)
- return -ETIMEDOUT;
- msleep(10);
- }
-
- return 0;
- }
-
- if (!smi_is_done(msp)) {
- wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
- msecs_to_jiffies(100));
- if (!smi_is_done(msp))
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
-{
- struct mv643xx_eth_shared_private *msp = bus->priv;
- void __iomem *smi_reg = msp->base + SMI_REG;
- int ret;
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- ret = readl(smi_reg);
- if (!(ret & SMI_READ_VALID)) {
- pr_warn("SMI bus read not valid\n");
- return -ENODEV;
- }
-
- return ret & 0xffff;
-}
-
-static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
-{
- struct mv643xx_eth_shared_private *msp = bus->priv;
- void __iomem *smi_reg = msp->base + SMI_REG;
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- writel(SMI_OPCODE_WRITE | (reg << 21) |
- (addr << 16) | (val & 0xffff), smi_reg);
-
- if (smi_wait_ready(msp)) {
- pr_warn("SMI bus busy timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-
/* statistics ***************************************************************/
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
{
@@ -1523,6 +1407,34 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
return 0;
}
+static void
+mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ wol->supported = 0;
+ wol->wolopts = 0;
+ if (mp->phy)
+ phy_ethtool_get_wol(mp->phy, wol);
+}
+
+static int
+mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+ int err;
+
+ if (mp->phy == NULL)
+ return -EOPNOTSUPP;
+
+ err = phy_ethtool_set_wol(mp->phy, wol);
+ /* Given that mv643xx_eth works without the marvell-specific PHY driver,
+ * this debugging hint is useful to have.
+ */
+ if (err == -EOPNOTSUPP)
+ netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
+ return err;
+}
+
static int
mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
@@ -1708,6 +1620,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
.get_sset_count = mv643xx_eth_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_wol = mv643xx_eth_get_wol,
+ .set_wol = mv643xx_eth_set_wol,
};
@@ -2656,47 +2570,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
goto out_free;
/*
- * Set up and register SMI bus.
- */
- if (pd == NULL || pd->shared_smi == NULL) {
- msp->smi_bus = mdiobus_alloc();
- if (msp->smi_bus == NULL)
- goto out_unmap;
-
- msp->smi_bus->priv = msp;
- msp->smi_bus->name = "mv643xx_eth smi";
- msp->smi_bus->read = smi_bus_read;
- msp->smi_bus->write = smi_bus_write,
- snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
- pdev->name, pdev->id);
- msp->smi_bus->parent = &pdev->dev;
- msp->smi_bus->phy_mask = 0xffffffff;
- if (mdiobus_register(msp->smi_bus) < 0)
- goto out_free_mii_bus;
- msp->smi = msp;
- } else {
- msp->smi = platform_get_drvdata(pd->shared_smi);
- }
-
- msp->err_interrupt = NO_IRQ;
- init_waitqueue_head(&msp->smi_busy_wait);
-
- /*
- * Check whether the error interrupt is hooked up.
- */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res != NULL) {
- int err;
-
- err = request_irq(res->start, mv643xx_eth_err_irq,
- IRQF_SHARED, "mv643xx_eth", msp);
- if (!err) {
- writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
- msp->err_interrupt = res->start;
- }
- }
-
- /*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
@@ -2711,10 +2584,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
return 0;
-out_free_mii_bus:
- mdiobus_free(msp->smi_bus);
-out_unmap:
- iounmap(msp->base);
out_free:
kfree(msp);
out:
@@ -2724,14 +2593,7 @@ out:
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
{
struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
- struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
- if (pd == NULL || pd->shared_smi == NULL) {
- mdiobus_unregister(msp->smi_bus);
- mdiobus_free(msp->smi_bus);
- }
- if (msp->err_interrupt != NO_IRQ)
- free_irq(msp->err_interrupt, msp);
iounmap(msp->base);
kfree(msp);
@@ -2794,14 +2656,21 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->txq_count = pd->tx_queue_count ? : 1;
}
+static void mv643xx_eth_adjust_link(struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
+
+ mv643xx_adjust_pscr(mp);
+}
+
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
- struct mii_bus *bus = mp->shared->smi->smi_bus;
struct phy_device *phydev;
int start;
int num;
int i;
+ char phy_id[MII_BUS_ID_SIZE + 3];
if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
start = phy_addr_get(mp) & 0x1f;
@@ -2811,17 +2680,19 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
num = 1;
}
- phydev = NULL;
+ /* Attempt to connect to the PHY using orion-mdio */
+ phydev = ERR_PTR(-ENODEV);
for (i = 0; i < num; i++) {
int addr = (start + i) & 0x1f;
- if (bus->phy_map[addr] == NULL)
- mdiobus_scan(bus, addr);
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ "orion-mdio-mii", addr);
- if (phydev == NULL) {
- phydev = bus->phy_map[addr];
- if (phydev != NULL)
- phy_addr_set(mp, addr);
+ phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
+ PHY_INTERFACE_MODE_GMII);
+ if (!IS_ERR(phydev)) {
+ phy_addr_set(mp, addr);
+ break;
}
}
@@ -2834,8 +2705,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy_reset(mp);
- phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII);
-
if (speed == 0) {
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
@@ -2943,11 +2812,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
- if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
+ if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
mp->phy = phy_scan(mp, pd->phy_addr);
- if (mp->phy != NULL)
+ if (IS_ERR(mp->phy)) {
+ err = PTR_ERR(mp->phy);
+ if (err == -ENODEV)
+ err = -EPROBE_DEFER;
+ goto out;
+ }
phy_init(mp, pd->speed, pd->duplex);
+ }
SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 77b7c80262f..7b5158f654c 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -24,10 +24,13 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/phy.h>
-#include <linux/of_address.h>
-#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_mdio.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#define MVMDIO_SMI_DATA_SHIFT 0
#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
@@ -36,33 +39,58 @@
#define MVMDIO_SMI_WRITE_OPERATION 0
#define MVMDIO_SMI_READ_VALID BIT(27)
#define MVMDIO_SMI_BUSY BIT(28)
+#define MVMDIO_ERR_INT_CAUSE 0x007C
+#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
+#define MVMDIO_ERR_INT_MASK 0x0080
struct orion_mdio_dev {
struct mutex lock;
- void __iomem *smireg;
+ void __iomem *regs;
+ /*
+ * If we have access to the error interrupt pin (which is
+ * somewhat misnamed as it not only reflects internal errors
+ * but also reflects SMI completion), use that to wait for
+ * SMI access completion instead of polling the SMI busy bit.
+ */
+ int err_interrupt;
+ wait_queue_head_t smi_busy_wait;
};
+static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
+{
+ return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
+}
+
/* Wait for the SMI unit to be ready for another operation
*/
static int orion_mdio_wait_ready(struct mii_bus *bus)
{
struct orion_mdio_dev *dev = bus->priv;
int count;
- u32 val;
- count = 0;
- while (1) {
- val = readl(dev->smireg);
- if (!(val & MVMDIO_SMI_BUSY))
- break;
+ if (dev->err_interrupt <= 0) {
+ count = 0;
+ while (1) {
+ if (orion_mdio_smi_is_done(dev))
+ break;
- if (count > 100) {
- dev_err(bus->parent, "Timeout: SMI busy for too long\n");
- return -ETIMEDOUT;
- }
+ if (count > 100) {
+ dev_err(bus->parent,
+ "Timeout: SMI busy for too long\n");
+ return -ETIMEDOUT;
+ }
- udelay(10);
- count++;
+ udelay(10);
+ count++;
+ }
+ } else {
+ if (!orion_mdio_smi_is_done(dev)) {
+ wait_event_timeout(dev->smi_busy_wait,
+ orion_mdio_smi_is_done(dev),
+ msecs_to_jiffies(100));
+ if (!orion_mdio_smi_is_done(dev))
+ return -ETIMEDOUT;
+ }
}
return 0;
@@ -87,12 +115,12 @@ static int orion_mdio_read(struct mii_bus *bus, int mii_id,
writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_READ_OPERATION),
- dev->smireg);
+ dev->regs);
/* Wait for the value to become available */
count = 0;
while (1) {
- val = readl(dev->smireg);
+ val = readl(dev->regs);
if (val & MVMDIO_SMI_READ_VALID)
break;
@@ -129,7 +157,7 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
(regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
MVMDIO_SMI_WRITE_OPERATION |
(value << MVMDIO_SMI_DATA_SHIFT)),
- dev->smireg);
+ dev->regs);
mutex_unlock(&dev->lock);
@@ -141,13 +169,34 @@ static int orion_mdio_reset(struct mii_bus *bus)
return 0;
}
+static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
+{
+ struct orion_mdio_dev *dev = dev_id;
+
+ if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) &
+ MVMDIO_ERR_INT_SMI_DONE) {
+ writel(~MVMDIO_ERR_INT_SMI_DONE,
+ dev->regs + MVMDIO_ERR_INT_CAUSE);
+ wake_up(&dev->smi_busy_wait);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
static int orion_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
+ struct resource *r;
struct mii_bus *bus;
struct orion_mdio_dev *dev;
int i, ret;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No SMI register address given\n");
+ return -ENODEV;
+ }
+
bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
if (!bus) {
dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
@@ -172,33 +221,54 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->irq[i] = PHY_POLL;
dev = bus->priv;
- dev->smireg = of_iomap(pdev->dev.of_node, 0);
- if (!dev->smireg) {
- dev_err(&pdev->dev, "No SMI register address given in DT\n");
- kfree(bus->irq);
- mdiobus_free(bus);
- return -ENODEV;
+ dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!dev->regs) {
+ dev_err(&pdev->dev, "Unable to remap SMI register\n");
+ ret = -ENODEV;
+ goto out_mdio;
+ }
+
+ init_waitqueue_head(&dev->smi_busy_wait);
+
+ dev->err_interrupt = platform_get_irq(pdev, 0);
+ if (dev->err_interrupt != -ENXIO) {
+ ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
+ orion_mdio_err_irq,
+ IRQF_SHARED, pdev->name, dev);
+ if (ret)
+ goto out_mdio;
+
+ writel(MVMDIO_ERR_INT_SMI_DONE,
+ dev->regs + MVMDIO_ERR_INT_MASK);
}
mutex_init(&dev->lock);
- ret = of_mdiobus_register(bus, np);
+ if (pdev->dev.of_node)
+ ret = of_mdiobus_register(bus, pdev->dev.of_node);
+ else
+ ret = mdiobus_register(bus);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
- iounmap(dev->smireg);
- kfree(bus->irq);
- mdiobus_free(bus);
- return ret;
+ goto out_mdio;
}
platform_set_drvdata(pdev, bus);
return 0;
+
+out_mdio:
+ kfree(bus->irq);
+ mdiobus_free(bus);
+ return ret;
}
static int orion_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct orion_mdio_dev *dev = bus->priv;
+
+ writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
mdiobus_unregister(bus);
kfree(bus->irq);
mdiobus_free(bus);
@@ -225,3 +295,4 @@ module_platform_driver(orion_mdio_driver);
MODULE_DESCRIPTION("Marvell MDIO interface driver");
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:orion-mdio");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index cd345b8969b..e48261e468f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE,
&rxq->descs_phys, GFP_KERNEL);
- if (rxq->descs == NULL) {
- netdev_err(pp->dev,
- "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
- rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
- rxq->size);
+ if (rxq->descs == NULL)
return -ENOMEM;
- }
BUG_ON(rxq->descs !=
PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
@@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
&txq->descs_phys, GFP_KERNEL);
- if (txq->descs == NULL) {
- netdev_err(pp->dev,
- "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
- txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
- txq->size);
+ if (txq->descs == NULL)
return -ENOMEM;
- }
/* Make sure descriptor address is cache line size aligned */
BUG_ON(txq->descs !=
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 037ed866c22..339bb323cb0 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep)
*/
if (pep->htpr == NULL) {
pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma, GFP_KERNEL);
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (pep->htpr == NULL)
return -ENOMEM;
+ } else {
+ memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
}
- memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
wrl(pep, HTPR, pep->htpr_dma);
return 0;
}
@@ -1023,13 +1025,11 @@ static int rxq_init(struct net_device *dev)
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma, GFP_KERNEL);
- if (!pep->p_rx_desc_area) {
- printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
- dev->name, size);
+ &pep->rx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!pep->p_rx_desc_area)
goto out;
- }
- memset((void *)pep->p_rx_desc_area, 0, size);
+
/* initialize the next_desc_ptr links in the Rx descriptors ring */
p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
@@ -1086,13 +1086,10 @@ static int txq_init(struct net_device *dev)
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma, GFP_KERNEL);
- if (!pep->p_tx_desc_area) {
- printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
- dev->name, size);
+ &pep->tx_desc_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!pep->p_tx_desc_area)
goto out;
- }
- memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index fdc5f23d8e9..05267d716e8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1837,10 +1837,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma,
GFP_KERNEL);
- if (!priv->mfunc.vhcr) {
- mlx4_err(dev, "Couldn't allocate VHCR.\n");
+ if (!priv->mfunc.vhcr)
goto err_hcr;
- }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f278b10ef71..61b56781f7a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1931,79 +1931,6 @@ static int mlx4_en_set_features(struct net_device *netdev,
}
-static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr, u16 flags)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
- int err;
-
- if (!mlx4_is_mfunc(mdev))
- return -EOPNOTSUPP;
-
- /* Hardware does not support aging addresses, allow only
- * permanent addresses if ndm_state is given
- */
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- en_info(priv, "Add FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- err = dev_uc_add_excl(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_add_excl(dev, addr);
- else
- err = -EINVAL;
-
- /* Only return duplicate errors if NLM_F_EXCL is set */
- if (err == -EEXIST && !(flags & NLM_F_EXCL))
- err = 0;
-
- return err;
-}
-
-static int mlx4_en_fdb_del(struct ndmsg *ndm,
- struct nlattr *tb[],
- struct net_device *dev,
- const unsigned char *addr)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
- int err;
-
- if (!mlx4_is_mfunc(mdev))
- return -EOPNOTSUPP;
-
- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
- en_info(priv, "Del FDB only supports static addresses\n");
- return -EINVAL;
- }
-
- if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
- err = dev_uc_del(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_del(dev, addr);
- else
- err = -EINVAL;
-
- return err;
-}
-
-static int mlx4_en_fdb_dump(struct sk_buff *skb,
- struct netlink_callback *cb,
- struct net_device *dev, int idx)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_dev *mdev = priv->mdev->dev;
-
- if (mlx4_is_mfunc(mdev))
- idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
-
- return idx;
-}
-
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -2025,9 +1952,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
- .ndo_fdb_add = mlx4_en_fdb_add,
- .ndo_fdb_del = mlx4_en_fdb_del,
- .ndo_fdb_dump = mlx4_en_fdb_dump,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 3488c6d9e6b..2448f0d669e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -58,10 +58,9 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
/* build the pkt before xmit */
skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
- if (!skb) {
- en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
+ if (!skb)
return -ENOMEM;
- }
+
skb_reserve(skb, NET_IP_ALIGN);
ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 07a6ebc47c9..b6c60fdef4f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1622,25 +1622,7 @@ static struct platform_driver ks8695_driver = {
.resume = ks8695_drv_resume,
};
-/* Module interface */
-
-static int __init
-ks8695_init(void)
-{
- printk(KERN_INFO "%s Ethernet driver, V%s\n",
- MODULENAME, MODULEVERSION);
-
- return platform_driver_register(&ks8695_driver);
-}
-
-static void __exit
-ks8695_cleanup(void)
-{
- platform_driver_unregister(&ks8695_driver);
-}
-
-module_init(ks8695_init);
-module_exit(ks8695_cleanup);
+module_platform_driver(ks8695_driver);
MODULE_AUTHOR("Simtec Electronics");
MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index a343066f7b4..ddaf138ce0d 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -792,20 +792,35 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
frame_hdr = ks->frame_head_info;
while (ks->frame_cnt--) {
+ if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
+ frame_hdr->len >= RX_BUF_SIZE ||
+ frame_hdr->len <= 0)) {
+
+ /* discard an invalid packet */
+ ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
+ netdev->stats.rx_dropped++;
+ if (!(frame_hdr->sts & RXFSHR_RXFV))
+ netdev->stats.rx_frame_errors++;
+ else
+ netdev->stats.rx_length_errors++;
+ frame_hdr++;
+ continue;
+ }
+
skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
- if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) &&
- (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
+ if (likely(skb)) {
skb_reserve(skb, 2);
/* read data block including CRC 4 bytes */
ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
- skb_put(skb, frame_hdr->len);
+ skb_put(skb, frame_hdr->len - 4);
skb->protocol = eth_type_trans(skb, netdev);
netif_rx(skb);
+ /* exclude CRC size */
+ netdev->stats.rx_bytes += frame_hdr->len - 4;
+ netdev->stats.rx_packets++;
} else {
- pr_err("%s: err:skb alloc\n", __func__);
ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
- if (skb)
- dev_kfree_skb_irq(skb);
+ netdev->stats.rx_dropped++;
}
frame_hdr++;
}
@@ -877,6 +892,8 @@ static irqreturn_t ks_irq(int irq, void *pw)
ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
}
+ if (unlikely(status & IRQ_RXOI))
+ ks->netdev->stats.rx_over_errors++;
/* this should be the last in IRQ handler*/
ks_restore_cmd_reg(ks);
return IRQ_HANDLED;
@@ -1015,6 +1032,9 @@ static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
ks_write_qmu(ks, skb->data, skb->len);
+ /* add tx statistics */
+ netdev->stats.tx_bytes += skb->len;
+ netdev->stats.tx_packets++;
dev_kfree_skb(skb);
} else
retv = NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 4f9937e026e..d5ffdc8264e 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3592,10 +3592,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
&ss->rx_done.bus,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_ZERO);
if (ss->rx_done.entry == NULL)
goto abort;
- memset(ss->rx_done.entry, 0, bytes);
bytes = sizeof(*ss->fw_stats);
ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
&ss->fw_stats_bus,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index b0b36154636..c20766c2f65 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
- if ((lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
- dev_name(lp->device));
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
+ if (lp->descriptors == NULL)
goto out;
- }
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0ffde69c8d0..346a4e025c3 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -202,13 +202,13 @@ static int macsonic_init(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */
- if ((lp->descriptors = dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n",
- dev_name(lp->device));
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
+ if (lp->descriptors == NULL)
return -ENOMEM;
- }
/* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 46795e40346..1bd419dbda6 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -424,7 +424,6 @@ static void sonic_rx(struct net_device *dev)
/* Malloc up new buffer. */
new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
if (new_skb == NULL) {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 5e4748e855f..c2e0256fe3d 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -197,14 +197,12 @@ static int __init sonic_probe1(struct net_device *dev)
* We also allocate extra space for a pointer to allow freeing
* this structure later on (in xtsonic_cleanup_module()).
*/
- lp->descriptors =
- dma_alloc_coherent(lp->device,
- SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
- &lp->descriptors_laddr, GFP_KERNEL);
-
+ lp->descriptors = dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC *
+ SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr,
+ GFP_KERNEL);
if (lp->descriptors == NULL) {
- printk(KERN_ERR "%s: couldn't alloc DMA memory for "
- " descriptors.\n", dev_name(lp->device));
err = -ENOMEM;
goto out;
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bfd887382e1..3371ff41bb3 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -80,6 +80,7 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/tcp.h>
+#include <net/checksum.h>
#include <asm/div64.h>
#include <asm/irq.h>
@@ -8337,16 +8338,13 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
{
struct iphdr *ip = lro->iph;
struct tcphdr *tcp = lro->tcph;
- __sum16 nchk;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
/* Update L3 header */
+ csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
ip->tot_len = htons(lro->total_len);
- ip->check = 0;
- nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
- ip->check = nchk;
/* Update L4 header */
tcp->ack_seq = lro->tcp_ack;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 63e7af44366..cb9e6383150 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -152,8 +152,6 @@ static void netx_eth_receive(struct net_device *ndev)
skb = netdev_alloc_skb(ndev, len);
if (unlikely(skb == NULL)) {
- printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
- ndev->name);
ndev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 162da8975b0..3df8287b745 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -287,23 +287,16 @@ static int w90p910_init_desc(struct net_device *dev)
ether = netdev_priv(dev);
pdev = ether->pdev;
- ether->tdesc = (struct tran_pdesc *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- &ether->tdesc_phys, GFP_KERNEL);
-
- if (!ether->tdesc) {
- dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
+ ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
+ &ether->tdesc_phys, GFP_KERNEL);
+ if (!ether->tdesc)
return -ENOMEM;
- }
-
- ether->rdesc = (struct recv_pdesc *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
- &ether->rdesc_phys, GFP_KERNEL);
+ ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
+ &ether->rdesc_phys, GFP_KERNEL);
if (!ether->rdesc) {
- dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
- ether->tdesc, ether->tdesc_phys);
+ ether->tdesc, ether->tdesc_phys);
return -ENOMEM;
}
@@ -737,7 +730,6 @@ static void netdev_rx(struct net_device *dev)
data = ether->rdesc->recv_buf[ether->cur_rx];
skb = netdev_alloc_skb(dev, length + 2);
if (!skb) {
- dev_err(&pdev->dev, "get skb buffer error\n");
ether->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0b8de12bcbc..b62262cfe4d 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5025,7 +5025,6 @@ static int nv_loopback_test(struct net_device *dev)
pkt_len = ETH_DATA_LEN;
tx_skb = netdev_alloc_skb(dev, pkt_len);
if (!tx_skb) {
- netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
ret = 0;
goto out;
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index efa29b712d5..89d1b0eadf3 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1409,9 +1409,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
dma_alloc_coherent(&pldat->pdev->dev,
pldat->dma_buff_size, &dma_handle,
GFP_KERNEL);
-
if (pldat->dma_buff_base_v == NULL) {
- dev_err(&pdev->dev, "error getting DMA region.\n");
ret = -ENOMEM;
goto err_out_free_irq;
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 73ce7dd6b95..60eb890800e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1469,13 +1469,11 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic,
- GFP_KERNEL);
- if (!rx_ring->rx_buff_pool) {
- pr_err("Unable to allocate memory for the receive pool buffer\n");
+ &rx_ring->rx_buff_pool_logic,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!rx_ring->rx_buff_pool)
return -ENOMEM;
- }
- memset(rx_ring->rx_buff_pool, 0, size);
+
rx_ring->rx_buff_pool_size = size;
for (i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
@@ -1774,13 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma, GFP_KERNEL);
+ &tx_ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
- pr_err("Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM;
}
- memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -1820,14 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma, GFP_KERNEL);
-
+ &rx_ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!rx_ring->desc) {
- pr_err("Unable to allocate memory for the receive descriptor ring\n");
vfree(rx_ring->buffer_info);
return -ENOMEM;
}
- memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
for (desNo = 0; desNo < rx_ring->count; desNo++) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index b1cfbb75ff1..a5f0b5da614 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma, GFP_KERNEL);
+ &ring->buf_dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!ring->buffers)
goto out_ring_desc;
- memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
-
write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a8669adecc9..0e1797295a4 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -35,6 +35,16 @@ config QLCNIC
This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
devices.
+config QLCNIC_SRIOV
+ bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
+ depends on QLCNIC && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QLE83XX Converged Ethernet devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index eb3dfdbb642..322a36b7672 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -955,9 +955,10 @@ typedef struct nx_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
} nx_mac_list_t;
-struct nx_vlan_ip_list {
+struct nx_ip_list {
struct list_head list;
__be32 ip_addr;
+ bool master;
};
/*
@@ -1605,7 +1606,7 @@ struct netxen_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct list_head mac_list;
- struct list_head vlan_ip_list;
+ struct list_head ip_list;
spinlock_t tx_clean_lock;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 4782dcfde73..7692dfd4f26 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <net/checksum.h>
#include "netxen_nic.h"
#include "netxen_nic_hw.h"
@@ -1641,9 +1642,8 @@ netxen_process_lro(struct netxen_adapter *adapter,
th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
th->psh = push;
th->seq = htonl(seq_number);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 501f49207da..7867aebc05f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -90,7 +90,7 @@ static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
static irqreturn_t netxen_msix_intr(int irq, void *data);
-static void netxen_free_vlan_ip_list(struct netxen_adapter *);
+static void netxen_free_ip_list(struct netxen_adapter *, bool);
static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats);
@@ -1450,7 +1450,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
- INIT_LIST_HEAD(&adapter->vlan_ip_list);
+ INIT_LIST_HEAD(&adapter->ip_list);
err = netxen_setup_pci_map(adapter);
if (err)
@@ -1585,7 +1585,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->tx_timeout_task);
- netxen_free_vlan_ip_list(adapter);
+ netxen_free_ip_list(adapter, false);
netxen_nic_detach(adapter);
nx_decr_dev_ref_cnt(adapter);
@@ -3137,62 +3137,77 @@ netxen_destip_supported(struct netxen_adapter *adapter)
}
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{
- struct nx_vlan_ip_list *cur;
- struct list_head *head = &adapter->vlan_ip_list;
+ struct nx_ip_list *cur, *tmp_cur;
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct nx_vlan_ip_list, list);
- netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
- list_del(&cur->list);
- kfree(cur);
+ list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
+ if (master) {
+ if (cur->master) {
+ netxen_config_ipaddr(adapter, cur->ip_addr,
+ NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ } else {
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
}
-
}
-static void
-netxen_list_config_vlan_ip(struct netxen_adapter *adapter,
+
+static bool
+netxen_list_config_ip(struct netxen_adapter *adapter,
struct in_ifaddr *ifa, unsigned long event)
{
struct net_device *dev;
- struct nx_vlan_ip_list *cur, *tmp_cur;
+ struct nx_ip_list *cur, *tmp_cur;
struct list_head *head;
+ bool ret = false;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
if (dev == NULL)
- return;
-
- if (!is_vlan_dev(dev))
- return;
+ goto out;
switch (event) {
case NX_IP_UP:
- list_for_each(head, &adapter->vlan_ip_list) {
- cur = list_entry(head, struct nx_vlan_ip_list, list);
+ list_for_each(head, &adapter->ip_list) {
+ cur = list_entry(head, struct nx_ip_list, list);
if (cur->ip_addr == ifa->ifa_address)
- return;
+ goto out;
}
- cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC);
+ cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
if (cur == NULL)
- return;
-
+ goto out;
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ dev = vlan_dev_real_dev(dev);
+ cur->master = !!netif_is_bond_master(dev);
cur->ip_addr = ifa->ifa_address;
- list_add_tail(&cur->list, &adapter->vlan_ip_list);
+ list_add_tail(&cur->list, &adapter->ip_list);
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ ret = true;
break;
case NX_IP_DOWN:
list_for_each_entry_safe(cur, tmp_cur,
- &adapter->vlan_ip_list, list) {
+ &adapter->ip_list, list) {
if (cur->ip_addr == ifa->ifa_address) {
list_del(&cur->list);
kfree(cur);
+ netxen_config_ipaddr(adapter, ifa->ifa_address,
+ NX_IP_DOWN);
+ ret = true;
break;
}
}
}
+out:
+ return ret;
}
+
static void
netxen_config_indev_addr(struct netxen_adapter *adapter,
struct net_device *dev, unsigned long event)
@@ -3209,14 +3224,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
for_ifa(indev) {
switch (event) {
case NETDEV_UP:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
+ netxen_list_config_ip(adapter, ifa, NX_IP_UP);
break;
case NETDEV_DOWN:
- netxen_config_ipaddr(adapter,
- ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
+ netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
break;
default:
break;
@@ -3231,23 +3242,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
- struct nx_vlan_ip_list *pos, *tmp_pos;
+ struct nx_ip_list *pos, *tmp_pos;
unsigned long ip_event;
ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
netxen_config_indev_addr(adapter, netdev, event);
- list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) {
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
}
}
+static inline bool
+netxen_config_checkdev(struct net_device *dev)
+{
+ struct netxen_adapter *adapter;
+
+ if (!is_netxen_netdev(dev))
+ return false;
+ adapter = netdev_priv(dev);
+ if (!adapter)
+ return false;
+ if (!netxen_destip_supported(adapter))
+ return false;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return false;
+
+ return true;
+}
+
+/**
+ * netxen_config_master - configure addresses based on master
+ * @dev: netxen device
+ * @event: netdev event
+ */
+static void netxen_config_master(struct net_device *dev, unsigned long event)
+{
+ struct net_device *master, *slave;
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ /*
+ * This is the case where the netxen nic is being
+ * enslaved and is dev_open()ed in bond_enslave()
+ * Now we should program the bond's (and its vlans')
+ * addresses in the netxen NIC.
+ */
+ if (master && netif_is_bond_master(master) &&
+ !netif_is_bond_slave(dev)) {
+ netxen_config_indev_addr(adapter, master, event);
+ for_each_netdev_rcu(&init_net, slave)
+ if (slave->priv_flags & IFF_802_1Q_VLAN &&
+ vlan_dev_real_dev(slave) == master)
+ netxen_config_indev_addr(adapter, slave, event);
+ }
+ rcu_read_unlock();
+ /*
+ * This is the case where the netxen nic is being
+ * released and is dev_close()ed in bond_release()
+ * just before IFF_BONDING is stripped.
+ */
+ if (!master && dev->priv_flags & IFF_BONDING)
+ netxen_free_ip_list(adapter, true);
+}
+
static int netxen_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
struct net_device *dev = (struct net_device *)ptr;
struct net_device *orig_dev = dev;
+ struct net_device *slave;
recheck:
if (dev == NULL)
@@ -3257,19 +3323,28 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter)
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- netxen_config_indev_addr(adapter, orig_dev, event);
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_config_indev_addr(adapter,
+ orig_dev, event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ /* Act only if the actual netxen is the target */
+ if (orig_dev == dev)
+ netxen_config_master(dev, event);
+ netxen_config_indev_addr(adapter, orig_dev, event);
+ }
+ }
done:
return NOTIFY_DONE;
}
@@ -3279,12 +3354,12 @@ netxen_inetaddr_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netxen_adapter *adapter;
- struct net_device *dev;
-
+ struct net_device *dev, *slave;
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ unsigned long ip_event;
dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
-
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
recheck:
if (dev == NULL)
goto done;
@@ -3293,31 +3368,24 @@ recheck:
dev = vlan_dev_real_dev(dev);
goto recheck;
}
-
- if (!is_netxen_netdev(dev))
- goto done;
-
- adapter = netdev_priv(dev);
-
- if (!adapter || !netxen_destip_supported(adapter))
- goto done;
-
- if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
- goto done;
-
- switch (event) {
- case NETDEV_UP:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
- break;
- case NETDEV_DOWN:
- netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
- netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
- break;
- default:
- break;
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
}
-
done:
return NOTIFY_DONE;
}
@@ -3334,7 +3402,7 @@ static void
netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
{ }
static void
-netxen_free_vlan_ip_list(struct netxen_adapter *adapter)
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
{ }
#endif
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 8fd38cb6d26..91a8fcd6c24 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -312,7 +312,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
qdev->lrg_buffer_len);
if (unlikely(!lrg_buf_cb->skb)) {
- netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
qdev->lrg_buf_skb_check++;
} else {
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 7722a203e38..4b1fb3faa3b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -8,4 +8,6 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
- qlcnic_minidump.o
+ qlcnic_minidump.o qlcnic_sriov_common.o
+
+qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index ba3c72fce1f..e5af69df36e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,9 +37,9 @@
#include "qlcnic_83xx_hw.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 1
-#define _QLCNIC_LINUX_SUBVERSION 35
-#define QLCNIC_LINUX_VERSIONID "5.1.35"
+#define _QLCNIC_LINUX_MINOR 2
+#define _QLCNIC_LINUX_SUBVERSION 39
+#define QLCNIC_LINUX_VERSIONID "5.2.39"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -449,6 +449,7 @@ struct qlcnic_hardware_context {
struct qlc_83xx_idc idc;
struct qlc_83xx_fw_info fw_info;
struct qlcnic_intrpt_config *intr_tbl;
+ struct qlcnic_sriov *sriov;
u32 *reg_tbl;
u32 *ext_reg_tbl;
u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
@@ -896,6 +897,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_FW_HANG 0x4000
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
+#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -914,7 +916,9 @@ struct qlcnic_ipaddr {
#define __QLCNIC_AER 5
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
-#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_SRIOV_ENABLE 10
+#define __QLCNIC_SRIOV_CAPABLE 11
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -1009,6 +1013,7 @@ struct qlcnic_adapter {
struct qlcnic_filter_hash fhash;
struct qlcnic_filter_hash rx_fhash;
+ struct list_head vf_mc_list;
spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
@@ -1051,7 +1056,11 @@ struct qlcnic_info_le {
u8 total_pf;
u8 total_rss_engines;
__le16 max_vports;
- u8 reserved2[64];
+ __le16 linkstate_reg_offset;
+ __le16 bit_offsets;
+ __le16 max_local_ipv6_addrs;
+ __le16 max_remote_ipv6_addrs;
+ u8 reserved2[56];
} __packed;
struct qlcnic_info {
@@ -1083,6 +1092,10 @@ struct qlcnic_info {
u8 total_pf;
u8 total_rss_engines;
u16 max_vports;
+ u16 linkstate_reg_offset;
+ u16 bit_offsets;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
};
struct qlcnic_pci_info_le {
@@ -1348,6 +1361,7 @@ struct _cdrp_cmd {
struct qlcnic_cmd_args {
struct _cdrp_cmd req;
struct _cdrp_cmd rsp;
+ int op_type;
};
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1430,6 +1444,7 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
+void __qlcnic_set_multi(struct net_device *netdev);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
@@ -1511,6 +1526,12 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *);
int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int,
__le16);
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+int qlcnic_read_mac_addr(struct qlcnic_adapter *);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
+void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_vf_add_mc_list(struct net_device *);
+
/*
* QLOGIC Board information
*/
@@ -1567,6 +1588,9 @@ struct qlcnic_hardware_ops {
int (*create_rx_ctx) (struct qlcnic_adapter *);
int (*create_tx_ctx) (struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *, int);
+ void (*del_rx_ctx) (struct qlcnic_adapter *);
+ void (*del_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int (*setup_link_event) (struct qlcnic_adapter *, int);
int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
@@ -1635,7 +1659,10 @@ static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+ if (adapter->ahw->hw_ops->mbx_cmd)
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+
+ return -EIO;
}
static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
@@ -1655,12 +1682,14 @@ static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
{
- adapter->ahw->hw_ops->add_sysfs(adapter);
+ if (adapter->ahw->hw_ops->add_sysfs)
+ adapter->ahw->hw_ops->add_sysfs(adapter);
}
static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
{
- adapter->ahw->hw_ops->remove_sysfs(adapter);
+ if (adapter->ahw->hw_ops->remove_sysfs)
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
}
static inline void
@@ -1681,6 +1710,17 @@ static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
}
+static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->del_rx_ctx(adapter);
+}
+
+static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr)
+{
+ return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
+}
+
static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
int enable)
{
@@ -1778,12 +1818,14 @@ static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
- adapter->nic_ops->request_reset(adapter, key);
+ if (adapter->nic_ops->request_reset)
+ adapter->nic_ops->request_reset(adapter, key);
}
static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
{
- adapter->nic_ops->cancel_idc_work(adapter);
+ if (adapter->nic_ops->cancel_idc_work)
+ adapter->nic_ops->cancel_idc_work(adapter);
}
static inline irqreturn_t
@@ -1830,7 +1872,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
} while (0)
#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
@@ -1840,8 +1884,23 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
- return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+
+ return status;
+}
+
+static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
+{
+ return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
}
+static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index cd5ae8813cb..374fa8a3791 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -6,6 +6,7 @@
*/
#include "qlcnic.h"
+#include "qlcnic_sriov.h"
#include <linux/if_vlan.h>
#include <linux/ipv6.h>
#include <linux/ethtool.h>
@@ -13,100 +14,7 @@
#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
-
-/* status descriptor mailbox data
- * @phy_addr: physical address of buffer
- * @sds_ring_size: buffer size
- * @intrpt_id: interrupt id
- * @intrpt_val: source of interrupt
- */
-struct qlcnic_sds_mbx {
- u64 phy_addr;
- u8 rsvd1[16];
- u16 sds_ring_size;
- u16 rsvd2[3];
- u16 intrpt_id;
- u8 intrpt_val;
- u8 rsvd3[5];
-} __packed;
-
-/* receive descriptor buffer data
- * phy_addr_reg: physical address of regular buffer
- * phy_addr_jmb: physical address of jumbo buffer
- * reg_ring_sz: size of regular buffer
- * reg_ring_len: no. of entries in regular buffer
- * jmb_ring_len: no. of entries in jumbo buffer
- * jmb_ring_sz: size of jumbo buffer
- */
-struct qlcnic_rds_mbx {
- u64 phy_addr_reg;
- u64 phy_addr_jmb;
- u16 reg_ring_sz;
- u16 reg_ring_len;
- u16 jmb_ring_sz;
- u16 jmb_ring_len;
-} __packed;
-
-/* host producers for regular and jumbo rings */
-struct __host_producer_mbx {
- u32 reg_buf;
- u32 jmb_buf;
-} __packed;
-
-/* Receive context mailbox data outbox registers
- * @state: state of the context
- * @vport_id: virtual port id
- * @context_id: receive context id
- * @num_pci_func: number of pci functions of the port
- * @phy_port: physical port id
- */
-struct qlcnic_rcv_mbx_out {
- u8 rcv_num;
- u8 sts_num;
- u16 ctx_id;
- u8 state;
- u8 num_pci_func;
- u8 phy_port;
- u8 vport_id;
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
-} __packed;
-
-struct qlcnic_add_rings_mbx_out {
- u8 rcv_num;
- u8 sts_num;
- u16 ctx_id;
- u32 host_csmr[QLCNIC_MAX_RING_SETS];
- struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
-} __packed;
-
-/* Transmit context mailbox inbox registers
- * @phys_addr: DMA address of the transmit buffer
- * @cnsmr_index: host consumer index
- * @size: legth of transmit buffer ring
- * @intr_id: interrput id
- * @src: src of interrupt
- */
-struct qlcnic_tx_mbx {
- u64 phys_addr;
- u64 cnsmr_index;
- u16 size;
- u16 intr_id;
- u8 src;
- u8 rsvd[3];
-} __packed;
-
-/* Transmit context mailbox outbox registers
- * @host_prod: host producer index
- * @ctx_id: transmit context id
- * @state: state of the transmit context
- */
-struct qlcnic_tx_mbx_out {
- u32 host_prod;
- u16 ctx_id;
- u8 state;
- u8 rsvd;
-} __packed;
+#define QLC_83XX_FW_MBX_CMD 0
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -156,9 +64,11 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
{QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
{QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+ {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
+ {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
};
-static const u32 qlcnic_83xx_ext_reg_tbl[] = {
+const u32 qlcnic_83xx_ext_reg_tbl[] = {
0x38CC, /* Global Reset */
0x38F0, /* Wildcard */
0x38FC, /* Informant */
@@ -204,7 +114,7 @@ static const u32 qlcnic_83xx_ext_reg_tbl[] = {
0x34A4, /* QLC_83XX_ASIC_TEMP */
};
-static const u32 qlcnic_83xx_reg_tbl[] = {
+const u32 qlcnic_83xx_reg_tbl[] = {
0x34A8, /* PEG_HALT_STAT1 */
0x34AC, /* PEG_HALT_STAT2 */
0x34B0, /* FW_HEARTBEAT */
@@ -247,6 +157,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
.create_rx_ctx = qlcnic_83xx_create_rx_ctx,
.create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
.setup_link_event = qlcnic_83xx_setup_link_event,
.get_nic_info = qlcnic_83xx_get_nic_info,
.get_pci_info = qlcnic_83xx_get_pci_info,
@@ -355,14 +267,20 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
num_intr));
/* account for AEN interrupt MSI-X based interrupts */
num_msix += 1;
- num_msix += adapter->max_drv_tx_rings;
+
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ num_msix += adapter->max_drv_tx_rings;
+
err = qlcnic_enable_msix(adapter, num_msix);
if (err == -ENOMEM)
return err;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
num_msix = adapter->ahw->num_msix;
- else
+ else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
num_msix = 1;
+ }
/* setup interrupt mapping table for fw */
ahw->intr_tbl = vzalloc(num_msix *
sizeof(struct qlcnic_intrpt_config));
@@ -595,7 +513,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
{
u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
- adapter->ahw->pci_func = val & 0xf;
+ adapter->ahw->pci_func = (val >> 24) & 0xff;
}
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
@@ -707,6 +625,11 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
ahw->fw_hal_version = 2;
qlcnic_get_func_no(adapter);
+ if (qlcnic_sriov_vf_check(adapter)) {
+ qlcnic_sriov_vf_set_ops(adapter);
+ return;
+ }
+
/* Determine function privilege level */
op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
if (op_mode == QLC_83XX_DEFAULT_OPMODE)
@@ -722,6 +645,9 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
ahw->fw_hal_version);
adapter->nic_ops = &qlcnic_vf_ops;
} else {
+ if (pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV))
+ set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
adapter->nic_ops = &qlcnic_83xx_ops;
}
}
@@ -755,7 +681,7 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
}
/* Mailbox response for mac rcode */
-static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
{
u32 fw_data;
u8 mac_cmd_rcode;
@@ -769,7 +695,7 @@ static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
return 1;
}
-static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
{
u32 data;
unsigned long wait_time = 0;
@@ -884,6 +810,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
for (i = 0; i < size; i++) {
if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_83XX_FW_MBX_CMD;
mbx->req.num = mbx_tbl[i].in_args;
mbx->rsp.num = mbx_tbl[i].out_args;
mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
@@ -901,10 +828,10 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
- break;
+ return 0;
}
}
- return 0;
+ return -EINVAL;
}
void qlcnic_83xx_idc_aen_work(struct work_struct *work)
@@ -960,6 +887,9 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
break;
case QLCNIC_MBX_TIME_EXTEND_EVENT:
break;
+ case QLCNIC_MBX_BC_EVENT:
+ qlcnic_sriov_handle_bc_event(adapter, event[1]);
+ break;
case QLCNIC_MBX_SFP_INSERT_EVENT:
dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
@@ -1004,7 +934,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
- sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
sds_mbx.sds_ring_size = sds->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1050,6 +981,32 @@ out:
return err;
}
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = recv_ctx->context_id | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
+}
+
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
{
int i, err, index, sds_mbx_size, rds_mbx_size;
@@ -1080,9 +1037,17 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
/* set mailbox hdr and capabilities */
qlcnic_alloc_mbx_args(&cmd, adapter,
QLCNIC_CMD_CREATE_RX_CTX);
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
cmd.req.arg[1] = cap;
cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
(QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
+ &cmd.req.arg[6]);
/* set up status rings, mbx 8-57/87 */
index = QLC_83XX_HOST_SDS_MBX_IDX;
for (i = 0; i < num_sds; i++) {
@@ -1090,7 +1055,8 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
sds = &recv_ctx->sds_rings[i];
sds->consumer = 0;
memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
- sds_mbx.phy_addr = sds->phys_addr;
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
sds_mbx.sds_ring_size = sds->num_desc;
if (adapter->flags & QLCNIC_MSIX_ENABLED)
intrpt_id = ahw->intr_tbl[i].id;
@@ -1110,13 +1076,15 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
rds = &recv_ctx->rds_rings[0];
rds->producer = 0;
memset(&rds_mbx, 0, rds_mbx_size);
- rds_mbx.phy_addr_reg = rds->phys_addr;
+ rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
rds_mbx.reg_ring_sz = rds->dma_size;
rds_mbx.reg_ring_len = rds->num_desc;
/* Jumbo ring */
rds = &recv_ctx->rds_rings[1];
rds->producer = 0;
- rds_mbx.phy_addr_jmb = rds->phys_addr;
+ rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
rds_mbx.jmb_ring_sz = rds->dma_size;
rds_mbx.jmb_ring_len = rds->num_desc;
buf = &cmd.req.arg[index];
@@ -1163,16 +1131,39 @@ out:
return err;
}
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 temp = 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = tx_ring->ctx_id | temp;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx, int ring)
{
int err;
u16 msix_id;
- u32 *buf, intr_mask;
+ u32 *buf, intr_mask, temp = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_tx_mbx mbx;
struct qlcnic_tx_mbx_out *mbx_out;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 msix_vector;
/* Reset host resources */
tx->producer = 0;
@@ -1182,13 +1173,21 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
/* setup mailbox inbox registerss */
- mbx.phys_addr = tx->phys_addr;
- mbx.cnsmr_index = tx->hw_cons_phys_addr;
+ mbx.phys_addr_low = LSD(tx->phys_addr);
+ mbx.phys_addr_high = MSD(tx->phys_addr);
+ mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
+ mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
mbx.size = tx->num_desc;
- if (adapter->flags & QLCNIC_MSIX_ENABLED)
- msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id;
- else
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ msix_vector = adapter->max_sds_rings + ring;
+ else
+ msix_vector = adapter->max_sds_rings - 1;
+ msix_id = ahw->intr_tbl[msix_vector].id;
+ } else {
msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ }
+
if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
mbx.intr_id = msix_id;
else
@@ -1196,8 +1195,15 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
mbx.src = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
+
cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
- cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES;
+ cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
buf = &cmd.req.arg[6];
memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
/* send the mailbox command*/
@@ -1210,7 +1216,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
tx->ctx_id = mbx_out->ctx_id;
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
}
@@ -1373,12 +1380,60 @@ mbx_err:
}
}
+int qlcnic_83xx_set_led(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EIO, active = 1;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "LED test is not supported in non-privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to set LED blink state\n");
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to reset LED blink state\n");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
int enable)
{
struct qlcnic_cmd_args cmd;
int status;
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
if (enable) {
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
cmd.req.arg[1] = BIT_0 | BIT_31;
@@ -1441,24 +1496,35 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
return err;
}
+static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
int err;
- u32 temp;
+ u32 temp = 0;
struct qlcnic_cmd_args cmd;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
- temp = adapter->recv_ctx->context_id << 16;
+ qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
cmd.req.arg[1] = (mode ? 1 : 0) | temp;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
dev_info(&adapter->pdev->dev,
"Promiscous mode config failed\n");
- qlcnic_free_mbx_args(&cmd);
+ qlcnic_free_mbx_args(&cmd);
return err;
}
@@ -1598,21 +1664,31 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return status;
}
+static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
int mode)
{
int err;
- u32 temp, temp_ip;
+ u32 temp = 0, temp_ip;
struct qlcnic_cmd_args cmd;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
- if (mode == QLCNIC_IP_UP) {
- temp = adapter->recv_ctx->context_id << 16;
+ qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
+
+ if (mode == QLCNIC_IP_UP)
cmd.req.arg[1] = 1 | temp;
- } else {
- temp = adapter->recv_ctx->context_id << 16;
+ else
cmd.req.arg[1] = 2 | temp;
- }
/*
* Adapter needs IP address in network byte order.
@@ -1629,6 +1705,7 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
dev_err(&adapter->netdev->dev,
"could not notify %s IP 0x%x request\n",
(mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
qlcnic_free_mbx_args(&cmd);
}
@@ -1695,11 +1772,22 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
}
+static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
__le16 vlan_id, u8 op)
{
int err;
- u32 *buf;
+ u32 *buf, temp = 0;
struct qlcnic_cmd_args cmd;
struct qlcnic_macvlan_mbx mv;
@@ -1709,11 +1797,17 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
if (err)
return err;
- cmd.req.arg[1] = op | (1 << 8) |
- (adapter->recv_ctx->context_id << 16);
+ cmd.req.arg[1] = op | (1 << 8);
+ qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
+ cmd.req.arg[1] |= temp;
mv.vlan = le16_to_cpu(vlan_id);
- memcpy(&mv.mac, addr, ETH_ALEN);
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
buf = &cmd.req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -2002,14 +2096,17 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
{
int i, index, err;
- bool type;
u8 max_ints;
- u32 val, temp;
+ u32 val, temp, type;
struct qlcnic_cmd_args cmd;
max_ints = adapter->ahw->num_msix - 1;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
cmd.req.arg[1] = max_ints;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
+
for (i = 0, index = 2; i < max_ints; i++) {
type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
val = type | (adapter->ahw->intr_tbl[i].type << 4);
@@ -2163,7 +2260,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
{
int ret;
u32 cmd;
@@ -2181,7 +2278,7 @@ static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
{
int ret;
@@ -2255,7 +2352,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
return -EIO;
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
- ret = qlcnic_83xx_enable_flash_write_op(adapter);
+ ret = qlcnic_83xx_enable_flash_write(adapter);
if (ret) {
qlcnic_83xx_unlock_flash(adapter);
dev_err(&adapter->pdev->dev,
@@ -2297,7 +2394,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
}
if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
- ret = qlcnic_83xx_disable_flash_write_op(adapter);
+ ret = qlcnic_83xx_disable_flash_write(adapter);
if (ret) {
qlcnic_83xx_unlock_flash(adapter);
dev_err(&adapter->pdev->dev,
@@ -2337,8 +2434,8 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
u32 temp;
int ret = -EIO;
- if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) ||
- (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) {
+ if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_WRITE_MAX)) {
dev_err(&adapter->pdev->dev,
"%s: Invalid word count\n", __func__);
return -EIO;
@@ -2616,13 +2713,19 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
{
+ u8 pci_func;
int err;
u32 config = 0, state;
struct qlcnic_cmd_args cmd;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func));
- if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) {
+ if (qlcnic_sriov_vf_check(adapter))
+ pci_func = adapter->portnum;
+ else
+ pci_func = ahw->pci_func;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
dev_info(&adapter->pdev->dev, "link state down\n");
return config;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 61f81f6c84a..32ed4b4c497 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -12,6 +12,8 @@
#include <linux/etherdevice.h>
#include "qlcnic_hw.h"
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+
/* Directly mapped registers */
#define QLC_83XX_CRB_WIN_BASE 0x3800
#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
@@ -86,6 +88,153 @@
#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+/* status descriptor mailbox data
+ * @phy_addr_{low|high}: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u32 phy_addr_low;
+ u32 phy_addr_high;
+ u32 rsvd1[4];
+#if defined(__LITTLE_ENDIAN)
+ u16 sds_ring_size;
+ u16 rsvd2;
+ u16 rsvd3[2];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd4;
+#elif defined(__BIG_ENDIAN)
+ u16 rsvd2;
+ u16 sds_ring_size;
+ u16 rsvd3[2];
+ u8 rsvd4;
+ u8 intrpt_val;
+ u16 intrpt_id;
+#endif
+ u32 rsvd5;
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg_{low|high}: physical address of regular buffer
+ * phy_addr_jmb_{low|high}: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u32 phy_addr_reg_low;
+ u32 phy_addr_reg_high;
+ u32 phy_addr_jmb_low;
+ u32 phy_addr_jmb_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+#elif defined(__BIG_ENDIAN)
+ u16 reg_ring_len;
+ u16 reg_ring_sz;
+ u16 jmb_ring_len;
+ u16 jmb_ring_sz;
+#endif
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+ u8 vport_id;
+ u8 phy_port;
+ u8 num_pci_func;
+ u8 state;
+#endif
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+#endif
+ u32 host_csmr[QLCNIC_MAX_RING_SETS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr_{low|high}: DMA address of the transmit buffer
+ * @cnsmr_index_{low|high}: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrput id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u32 phys_addr_low;
+ u32 phys_addr_high;
+ u32 cnsmr_index_low;
+ u32 cnsmr_index_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+#elif defined(__BIG_ENDIAN)
+ u16 intr_id;
+ u16 size;
+ u8 rsvd[3];
+ u8 src;
+#endif
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+#if defined(__LITTLE_ENDIAN)
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 state;
+ u16 ctx_id;
+#endif
+} __packed;
+
struct qlcnic_intrpt_config {
u8 type;
u8 enabled;
@@ -94,8 +243,23 @@ struct qlcnic_intrpt_config {
};
struct qlcnic_macvlan_mbx {
- u8 mac[ETH_ALEN];
+#if defined(__LITTLE_ENDIAN)
+ u8 mac_addr0;
+ u8 mac_addr1;
+ u8 mac_addr2;
+ u8 mac_addr3;
+ u8 mac_addr4;
+ u8 mac_addr5;
u16 vlan;
+#elif defined(__BIG_ENDIAN)
+ u8 mac_addr3;
+ u8 mac_addr2;
+ u8 mac_addr1;
+ u8 mac_addr0;
+ u16 vlan;
+ u8 mac_addr5;
+ u8 mac_addr4;
+#endif
};
struct qlc_83xx_fw_info {
@@ -226,6 +390,7 @@ struct qlc_83xx_idc {
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
#define QLC_83XX_DEFAULT_MODE 0x0
+#define QLC_83XX_SRIOV_MODE 0x1
#define QLCNIC_BRDTYPE_83XX_10G 0x0083
#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
@@ -242,8 +407,8 @@ struct qlc_83xx_idc {
#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
#define QLC_83XX_FLASH_STATUS_READY 0x6
-#define QLC_83XX_FLASH_BULK_WRITE_MIN 2
-#define QLC_83XX_FLASH_BULK_WRITE_MAX 64
+#define QLC_83XX_FLASH_WRITE_MIN 2
+#define QLC_83XX_FLASH_WRITE_MAX 64
#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
#define QLC_83XX_ERASE_MODE 1
#define QLC_83XX_WRITE_MODE 2
@@ -351,6 +516,9 @@ int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *, int);
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
@@ -401,7 +569,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
u32, u8 *, int);
-int qlcnic_83xx_init(struct qlcnic_adapter *);
+int qlcnic_83xx_init(struct qlcnic_adapter *, int);
int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
@@ -434,5 +602,10 @@ int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
int qlcnic_83xx_loopback_test(struct net_device *, u8);
int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
+u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
+u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 5c033f268ca..c302d118a0d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -5,6 +5,7 @@
* See LICENSE.qlcnic for copyright and licensing details.
*/
+#include "qlcnic_sriov.h"
#include "qlcnic.h"
#include "qlcnic_hw.h"
@@ -25,12 +26,12 @@
#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
-static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
/* Template header */
struct qlc_83xx_reset_hdr {
+#if defined(__LITTLE_ENDIAN)
u16 version;
u16 signature;
u16 size;
@@ -39,14 +40,31 @@ struct qlc_83xx_reset_hdr {
u16 checksum;
u16 init_offset;
u16 start_offset;
+#elif defined(__BIG_ENDIAN)
+ u16 signature;
+ u16 version;
+ u16 entries;
+ u16 size;
+ u16 checksum;
+ u16 hdr_size;
+ u16 start_offset;
+ u16 init_offset;
+#endif
} __packed;
/* Command entry header. */
struct qlc_83xx_entry_hdr {
- u16 cmd;
- u16 size;
- u16 count;
- u16 delay;
+#if defined(__LITTLE_ENDIAN)
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+#elif defined(__BIG_ENDIAN)
+ u16 size;
+ u16 cmd;
+ u16 delay;
+ u16 count;
+#endif
} __packed;
/* Generic poll command */
@@ -60,10 +78,17 @@ struct qlc_83xx_rmw {
u32 mask;
u32 xor_value;
u32 or_value;
+#if defined(__LITTLE_ENDIAN)
u8 shl;
u8 shr;
u8 index_a;
u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 index_a;
+ u8 shr;
+ u8 shl;
+#endif
} __packed;
/* Generic command with 2 DWORD */
@@ -1893,6 +1918,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
qlcnic_get_func_no(adapter);
op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ op_mode = QLC_83XX_DEFAULT_OPMODE;
+
if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
@@ -1922,6 +1950,16 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
+ /* VNIC mode is detected by BIT_23 in capabilities. This bit is also
+ * set in case device is SRIOV capable. VNIC and SRIOV are mutually
+ * exclusive. So in case of sriov capable device load driver in
+ * default mode
+ */
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) {
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ return ahw->nic_mode;
+ }
+
if (ahw->capabilities & BIT_23)
ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
else
@@ -1930,7 +1968,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
return ahw->nic_mode;
}
-static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
{
int ret;
@@ -2008,10 +2046,13 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
}
}
-int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (qlcnic_sriov_vf_check(adapter))
+ return qlcnic_sriov_vf_init(adapter, pci_using_dac);
+
if (qlcnic_83xx_check_hw_status(adapter))
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index a69097c6b84..43562c25637 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -382,8 +382,7 @@ out_free_rq:
return err;
}
-static void
-qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
{
int err;
struct qlcnic_cmd_args cmd;
@@ -422,22 +421,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL);
+ &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL);
+ &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
- memset(rq_addr, 0, rq_size);
prq = rq_addr;
- memset(rsp_addr, 0, rsp_size);
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -486,13 +483,13 @@ out_free_rq:
return err;
}
-static void
-qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter,
- struct qlcnic_host_tx_ring *tx_ring)
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_args cmd;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+
cmd.req.arg[1] = tx_ring->ctx_id;
if (qlcnic_issue_cmd(adapter, &cmd))
dev_err(&adapter->pdev->dev,
@@ -532,20 +529,15 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr,
GFP_KERNEL);
-
- if (ptr == NULL) {
- dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ if (ptr == NULL)
return -ENOMEM;
- }
+
tx_ring->hw_consumer = ptr;
/* cmd desc ring */
addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
&tx_ring->phys_addr,
GFP_KERNEL);
-
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate tx desc ring\n");
err = -ENOMEM;
goto err_out_free;
}
@@ -556,11 +548,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr, GFP_KERNEL);
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate rds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -572,11 +562,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring];
addr = dma_alloc_coherent(&adapter->pdev->dev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr, GFP_KERNEL);
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
- dev_err(&pdev->dev,
- "failed to allocate sds ring [%d]\n", ring);
err = -ENOMEM;
goto err_out_free;
}
@@ -616,13 +604,12 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
&dev->tx_ring[ring],
ring);
if (err) {
- qlcnic_fw_cmd_destroy_rx_ctx(dev);
+ qlcnic_fw_cmd_del_rx_ctx(dev);
if (ring == 0)
goto err_out;
for (i = 0; i < ring; i++)
- qlcnic_fw_cmd_destroy_tx_ctx(dev,
- &dev->tx_ring[i]);
+ qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
goto err_out;
}
@@ -644,10 +631,10 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
int ring;
if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
- qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ qlcnic_fw_cmd_del_rx_ctx(adapter);
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
- qlcnic_fw_cmd_destroy_tx_ctx(adapter,
- &adapter->tx_ring[ring]);
+ qlcnic_fw_cmd_del_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
if (qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
@@ -655,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_83xx_config_intrpt(adapter, 0);
}
/* Allow dma queues to drain after context reset */
- mdelay(20);
+ msleep(20);
}
}
@@ -753,10 +740,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
size_t nic_size = sizeof(struct qlcnic_info_le);
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
@@ -804,11 +790,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
return err;
nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL);
+ &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
if (!nic_info_addr)
return -ENOMEM;
- memset(nic_info_addr, 0, nic_size);
nic_info = nic_info_addr;
nic_info->pci_func = cpu_to_le16(nic->pci_func);
@@ -854,10 +839,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t, GFP_KERNEL);
+ &pci_info_dma_t,
+ GFP_KERNEL | __GFP_ZERO);
if (!pci_info_addr)
return -ENOMEM;
- memset(pci_info_addr, 0, pci_size);
npar = pci_info_addr;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
@@ -949,12 +934,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
}
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
+ &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ if (!stats_addr)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
arg1 |= rx_tx << 15 | stats_size << 16;
@@ -1003,13 +985,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
return -ENOMEM;
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL);
- if (!stats_addr) {
- dev_err(&adapter->pdev->dev,
- "%s: Unable to allocate memory.\n", __func__);
+ &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ if (!stats_addr)
return -ENOMEM;
- }
- memset(stats_addr, 0, stats_size);
+
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
cmd.req.arg[1] = stats_size << 16;
cmd.req.arg[2] = MSD(stats_dma_t);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5641f8ec49a..f4f279d5cba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -149,7 +149,8 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
static inline int qlcnic_82xx_statistics(void)
{
- return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
}
static inline int qlcnic_83xx_statistics(void)
@@ -1070,8 +1071,7 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static void
-qlcnic_fill_stats(u64 *data, void *stats, int type)
+static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
{
if (type == QLCNIC_MAC_STATS) {
struct qlcnic_mac_statistics *mac_stats =
@@ -1120,6 +1120,7 @@ qlcnic_fill_stats(u64 *data, void *stats, int type)
*data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
*data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
}
+ return data;
}
static void qlcnic_get_ethtool_stats(struct net_device *dev,
@@ -1147,7 +1148,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
/* Retrieve MAC statistics from firmware */
memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
qlcnic_get_mac_stats(adapter, &mac_stats);
- qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
}
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -1159,7 +1160,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
if (ret)
return;
- qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
+ data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
@@ -1176,7 +1177,8 @@ static int qlcnic_set_led(struct net_device *dev,
int err = -EIO, active = 1;
if (qlcnic_83xx_check(adapter))
- return -EOPNOTSUPP;
+ return qlcnic_83xx_set_led(dev, state);
+
if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(dev, "LED test not supported for non "
"privilege function\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 44197ca1456..1cebd8900cf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -714,7 +714,9 @@ enum {
QLCNIC_MGMT_FUNC = 0,
QLCNIC_PRIV_FUNC = 1,
QLCNIC_NON_PRIV_FUNC = 2,
- QLCNIC_UNKNOWN_FUNC_MODE = 3
+ QLCNIC_SRIOV_PF_FUNC = 3,
+ QLCNIC_SRIOV_VF_FUNC = 4,
+ QLCNIC_UNKNOWN_FUNC_MODE = 5
};
enum {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index f89cc7a3fe6..ddc130b2337 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -496,7 +496,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
return 0;
}
-void qlcnic_set_multi(struct net_device *netdev)
+void __qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
@@ -508,7 +508,8 @@ void qlcnic_set_multi(struct net_device *netdev)
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- qlcnic_nic_add_mac(adapter, adapter->mac_addr);
+ if (!qlcnic_sriov_vf_check(adapter))
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr);
qlcnic_nic_add_mac(adapter, bcast_addr);
if (netdev->flags & IFF_PROMISC) {
@@ -523,23 +524,53 @@ void qlcnic_set_multi(struct net_device *netdev)
goto send_fw_cmd;
}
- if (!netdev_mc_empty(netdev)) {
+ if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) {
netdev_for_each_mc_addr(ha, netdev) {
qlcnic_nic_add_mac(adapter, ha->addr);
}
}
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev);
+
send_fw_cmd:
- if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) {
- qlcnic_alloc_lb_filters_mem(adapter);
- adapter->drv_mac_learn = true;
- } else {
- adapter->drv_mac_learn = false;
+ if (!qlcnic_sriov_vf_check(adapter)) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = false;
+ }
}
qlcnic_nic_set_promisc(adapter, mode);
}
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ struct qlcnic_mac_list_s *cur;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+ if (qlcnic_sriov_vf_check(adapter)) {
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev) {
+ cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
+ GFP_ATOMIC);
+ memcpy(cur->mac_addr,
+ ha->addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->vf_mc_list);
+ }
+ }
+ qlcnic_sriov_vf_schedule_multi(adapter->netdev);
+ return;
+ }
+ __qlcnic_set_multi(netdev);
+}
+
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
struct qlcnic_nic_req req;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 5b8749eda11..e862a77a626 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -83,6 +83,8 @@ enum qlcnic_regs {
#define QLCNIC_CMD_CONFIG_PORT 0x2e
#define QLCNIC_CMD_TEMP_SIZE 0x2f
#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
+#define QLCNIC_CMD_CONFIG_VPORT 0x32
#define QLCNIC_CMD_GET_MAC_STATS 0x37
#define QLCNIC_CMD_SET_DRV_VER 0x38
#define QLCNIC_CMD_CONFIGURE_RSS 0x41
@@ -114,6 +116,7 @@ enum qlcnic_regs {
#define QLCNIC_SET_FAC_DEF_MAC 5
#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_BC_EVENT 0x8002
#define QLCNIC_MBX_COMP_EVENT 0x8100
#define QLCNIC_MBX_REQUEST_EVENT 0x8101
#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
@@ -175,6 +178,9 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *tx_ring, int);
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0e630061bff..a85ca63a2c9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -9,6 +9,7 @@
#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
+#include <net/checksum.h>
#include "qlcnic.h"
@@ -146,7 +147,10 @@ static inline u8 qlcnic_mac_hash(u64 mac)
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
u16 handle, u8 ring_id)
{
- if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X)
+ unsigned short device = adapter->pdev->device;
+
+ if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
return handle | (ring_id << 15);
else
return handle;
@@ -1132,9 +1136,8 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
iph = (struct iphdr *)skb->data;
th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
th->psh = push;
@@ -1595,9 +1598,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
iph = (struct iphdr *)skb->data;
th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
iph->tot_len = htons(length);
- iph->check = 0;
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
}
th->psh = push;
@@ -1692,6 +1694,29 @@ skip:
return count;
}
+static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ qlcnic_83xx_enable_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
{
int tx_complete;
@@ -1769,7 +1794,8 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
qlcnic_83xx_enable_intr(adapter, sds_ring);
}
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
napi_enable(&tx_ring->napi);
@@ -1796,7 +1822,8 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
napi_disable(&sds_ring->napi);
}
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
@@ -1809,7 +1836,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings;
+ int ring, max_sds_rings, temp;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1820,14 +1847,23 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
max_sds_rings = adapter->max_sds_rings;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- if (adapter->flags & QLCNIC_MSIX_ENABLED)
- netif_napi_add(netdev, &sds_ring->napi,
- qlcnic_83xx_rx_poll,
- QLCNIC_NETDEV_WEIGHT * 2);
- else
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll,
+ QLCNIC_NETDEV_WEIGHT * 2);
+ } else {
+ temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_msix_sriov_vf_poll,
+ temp);
+ }
+
+ } else {
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_poll,
QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ }
}
if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1835,7 +1871,8 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
@@ -1861,7 +1898,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
qlcnic_free_sds_rings(adapter->recv_ctx);
- if ((adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
netif_napi_del(&tx_ring->napi);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 28a6d483836..3ee593ee13c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include "qlcnic.h"
+#include "qlcnic_sriov.h"
#include "qlcnic_hw.h"
#include <linux/swab.h>
@@ -109,6 +110,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
{0,}
};
@@ -198,8 +200,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
recv_ctx->sds_rings = NULL;
}
-static int
-qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
@@ -225,6 +226,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+
if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
return -EOPNOTSUPP;
@@ -253,11 +257,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = -EOPNOTSUPP;
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return err;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
if (is_unicast_ether_addr(addr))
@@ -277,11 +278,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int err = 0;
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return -EOPNOTSUPP;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
pr_info("%s: FDB e-switch is not enabled\n", __func__);
@@ -306,11 +304,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if (!adapter->fdb_mac_learn) {
- pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n",
- __func__);
- return -EOPNOTSUPP;
- }
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
@@ -387,6 +382,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
.create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
.create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx,
+ .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx,
.setup_link_event = qlcnic_82xx_linkevent_request,
.get_nic_info = qlcnic_82xx_get_nic_info,
.get_pci_info = qlcnic_82xx_get_pci_info,
@@ -408,7 +405,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
int err = -1, i;
- int max_tx_rings;
+ int max_tx_rings, tx_vector;
+
+ if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
+ max_tx_rings = 0;
+ tx_vector = 0;
+ } else {
+ max_tx_rings = adapter->max_drv_tx_rings;
+ tx_vector = 1;
+ }
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
@@ -431,7 +436,6 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
if (qlcnic_83xx_check(adapter)) {
adapter->ahw->num_msix = num_msix;
/* subtract mail box and tx ring vectors */
- max_tx_rings = adapter->max_drv_tx_rings;
adapter->max_sds_rings = num_msix -
max_tx_rings - 1;
} else {
@@ -444,11 +448,11 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
"Unable to allocate %d MSI-X interrupt vectors\n",
num_msix);
if (qlcnic_83xx_check(adapter)) {
- if (err < QLC_83XX_MINIMUM_VECTOR)
+ if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
return err;
- err -= (adapter->max_drv_tx_rings + 1);
+ err -= (max_tx_rings + 1);
num_msix = rounddown_pow_of_two(err);
- num_msix += (adapter->max_drv_tx_rings + 1);
+ num_msix += (max_tx_rings + 1);
} else {
num_msix = rounddown_pow_of_two(err);
}
@@ -721,6 +725,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -751,7 +756,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
return -EIO;
}
- dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+ dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
ahw->pci_base0 = mem_ptr0;
ahw->pci_len0 = pci_len0;
@@ -1292,7 +1297,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
}
}
if (qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
handler = qlcnic_msix_tx_intr;
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
@@ -1328,7 +1334,8 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
free_irq(sds_ring->irq, sds_ring);
}
}
- if (qlcnic_83xx_check(adapter)) {
+ if (qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
@@ -1418,9 +1425,12 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
+ adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
qlcnic_free_mac_list(adapter);
@@ -1685,7 +1695,7 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
return err;
}
-static int
+int
qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
int pci_using_dac)
{
@@ -1820,6 +1830,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 capab2;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ if (pdev->is_virtfn)
+ return -ENODEV;
+
err = pci_enable_device(pdev);
if (err)
return err;
@@ -1844,12 +1857,18 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!ahw)
goto err_out_free_res;
- if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) {
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
ahw->hw_ops = &qlcnic_hw_ops;
- ahw->reg_tbl = (u32 *)qlcnic_reg_tbl;
- } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
qlcnic_83xx_register_map(ahw);
- } else {
+ break;
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ qlcnic_sriov_vf_register_map(ahw);
+ break;
+ default:
goto err_out_free_hw_res;
}
@@ -1911,11 +1930,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
- err = qlcnic_83xx_init(adapter);
+ err = qlcnic_83xx_init(adapter, pci_using_dac);
if (err) {
dev_err(&pdev->dev, "%s: failed\n", __func__);
goto err_out_free_hw;
}
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
} else {
dev_err(&pdev->dev,
"%s: failed. Please Reboot\n", __func__);
@@ -1932,6 +1953,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
module_name(THIS_MODULE),
board_name, adapter->ahw->revision_id);
}
+
+ if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
+ !!qlcnic_use_msi)
+ dev_warn(&pdev->dev,
+ "83xx adapter do not support MSI interrupts\n");
+
err = qlcnic_setup_intr(adapter, 0);
if (err) {
dev_err(&pdev->dev, "Failed to setup interrupt\n");
@@ -2024,11 +2051,13 @@ static void qlcnic_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
+ qlcnic_sriov_pf_disable(adapter);
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
unregister_netdev(netdev);
+ qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
@@ -3432,7 +3461,10 @@ static struct pci_driver qlcnic_driver = {
.resume = qlcnic_resume,
#endif
.shutdown = qlcnic_shutdown,
- .err_handler = &qlcnic_err_handler
+ .err_handler = &qlcnic_err_handler,
+#ifdef CONFIG_QLCNIC_SRIOV
+ .sriov_configure = qlcnic_pci_sriov_configure,
+#endif
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index abbd22c814a..4b9bab18ebd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -810,11 +810,8 @@ static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
&tmp_addr_t, GFP_KERNEL);
- if (!tmp_addr) {
- dev_err(&adapter->pdev->dev,
- "Can't get memory for FW dump template\n");
+ if (!tmp_addr)
return -ENOMEM;
- }
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644
index 00000000000..b476ebac243
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -0,0 +1,214 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef _QLCNIC_83XX_SRIOV_H_
+#define _QLCNIC_83XX_SRIOV_H_
+
+#include "qlcnic.h"
+#include <linux/types.h>
+#include <linux/pci.h>
+
+extern const u32 qlcnic_83xx_reg_tbl[];
+extern const u32 qlcnic_83xx_ext_reg_tbl[];
+
+struct qlcnic_bc_payload {
+ u64 payload[126];
+};
+
+struct qlcnic_bc_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u8 version;
+ u8 msg_type:4;
+ u8 rsvd1:3;
+ u8 op_type:1;
+ u8 num_cmds;
+ u8 num_frags;
+ u8 frag_num;
+ u8 cmd_op;
+ u16 seq_id;
+ u64 rsvd3;
+#elif defined(__BIG_ENDIAN)
+ u8 num_frags;
+ u8 num_cmds;
+ u8 op_type:1;
+ u8 rsvd1:3;
+ u8 msg_type:4;
+ u8 version;
+ u16 seq_id;
+ u8 cmd_op;
+ u8 frag_num;
+ u64 rsvd3;
+#endif
+};
+
+enum qlcnic_bc_commands {
+ QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
+ QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
+};
+
+#define QLC_BC_CMD 1
+
+struct qlcnic_trans_list {
+ /* Lock for manipulating list */
+ spinlock_t lock;
+ struct list_head wait_list;
+ int count;
+};
+
+enum qlcnic_trans_state {
+ QLC_INIT = 0,
+ QLC_WAIT_FOR_CHANNEL_FREE,
+ QLC_WAIT_FOR_RESP,
+ QLC_ABORT,
+ QLC_END,
+};
+
+struct qlcnic_bc_trans {
+ u8 func_id;
+ u8 active;
+ u8 curr_rsp_frag;
+ u8 curr_req_frag;
+ u16 cmd_id;
+ u16 req_pay_size;
+ u16 rsp_pay_size;
+ u32 trans_id;
+ enum qlcnic_trans_state trans_state;
+ struct list_head list;
+ struct qlcnic_bc_hdr *req_hdr;
+ struct qlcnic_bc_hdr *rsp_hdr;
+ struct qlcnic_bc_payload *req_pay;
+ struct qlcnic_bc_payload *rsp_pay;
+ struct completion resp_cmpl;
+ struct qlcnic_vf_info *vf;
+};
+
+enum qlcnic_vf_state {
+ QLC_BC_VF_SEND = 0,
+ QLC_BC_VF_RECV,
+ QLC_BC_VF_CHANNEL,
+ QLC_BC_VF_STATE,
+};
+
+struct qlcnic_resources {
+ u16 num_tx_mac_filters;
+ u16 num_rx_ucast_mac_filters;
+ u16 num_rx_mcast_mac_filters;
+
+ u16 num_txvlan_keys;
+
+ u16 num_rx_queues;
+ u16 num_tx_queues;
+
+ u16 num_rx_buf_rings;
+ u16 num_rx_status_rings;
+
+ u16 num_destip;
+ u32 num_lro_flows_supported;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_vport {
+ u16 handle;
+ u8 mac[6];
+};
+
+struct qlcnic_vf_info {
+ u8 pci_func;
+ u16 rx_ctx_id;
+ u16 tx_ctx_id;
+ unsigned long state;
+ struct completion ch_free_cmpl;
+ struct work_struct trans_work;
+ /* It synchronizes commands sent from VF */
+ struct mutex send_cmd_lock;
+ struct qlcnic_bc_trans *send_cmd;
+ struct qlcnic_trans_list rcv_act;
+ struct qlcnic_trans_list rcv_pend;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_vport *vp;
+};
+
+struct qlcnic_async_work_list {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+};
+
+struct qlcnic_back_channel {
+ u16 trans_counter;
+ struct workqueue_struct *bc_trans_wq;
+ struct workqueue_struct *bc_async_wq;
+ struct list_head async_list;
+};
+
+struct qlcnic_sriov {
+ u16 vp_handle;
+ u8 num_vfs;
+ struct qlcnic_resources ff_max;
+ struct qlcnic_back_channel bc;
+ struct qlcnic_vf_info *vf_info;
+};
+
+int qlcnic_sriov_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
+int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
+
+static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
+}
+
+#ifdef CONFIG_QLCNIC_SRIOV
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_cmd_args *);
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
+int qlcnic_pci_sriov_configure(struct pci_dev *, int);
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
+#else
+static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
+static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
+static inline void
+qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644
index 00000000000..14e9ebd3b73
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -0,0 +1,1297 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_83xx_hw.h"
+#include <linux/types.h>
+
+#define QLC_BC_COMMAND 0
+#define QLC_BC_RESPONSE 1
+
+#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
+#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
+
+#define QLC_BC_MSG 0
+#define QLC_BC_CFREE 1
+#define QLC_BC_HDR_SZ 16
+#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
+
+#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
+#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
+
+static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+
+static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_sriov_vf_mbx_op,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+};
+
+static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .cancel_idc_work = qlcnic_83xx_idc_exit,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
+ {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
+ {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
+};
+
+static inline bool qlcnic_sriov_bc_msg_check(u32 val)
+{
+ return (val & (1 << QLC_BC_MSG)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_channel_free_check(u32 val)
+{
+ return (val & (1 << QLC_BC_CFREE)) ? true : false;
+}
+
+static inline u8 qlcnic_sriov_target_func_id(u32 val)
+{
+ return (val >> 4) & 0xff;
+}
+
+static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
+{
+ struct pci_dev *dev = adapter->pdev;
+ int pos;
+ u16 stride, offset;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+
+ return (dev->devfn + offset + stride * vf_id) & 0xff;
+}
+
+int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct qlcnic_sriov *sriov;
+ struct qlcnic_back_channel *bc;
+ struct workqueue_struct *wq;
+ struct qlcnic_vport *vp;
+ struct qlcnic_vf_info *vf;
+ int err, i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return -EIO;
+
+ sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
+ if (!sriov)
+ return -ENOMEM;
+
+ adapter->ahw->sriov = sriov;
+ sriov->num_vfs = num_vfs;
+ bc = &sriov->bc;
+ sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
+ num_vfs, GFP_KERNEL);
+ if (!sriov->vf_info) {
+ err = -ENOMEM;
+ goto qlcnic_free_sriov;
+ }
+
+ wq = create_singlethread_workqueue("bc-trans");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Cannot create bc-trans workqueue\n");
+ goto qlcnic_free_vf_info;
+ }
+
+ bc->bc_trans_wq = wq;
+
+ wq = create_singlethread_workqueue("async");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
+ goto qlcnic_destroy_trans_wq;
+ }
+
+ bc->bc_async_wq = wq;
+ INIT_LIST_HEAD(&bc->async_list);
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->adapter = adapter;
+ vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
+ mutex_init(&vf->send_cmd_lock);
+ INIT_LIST_HEAD(&vf->rcv_act.wait_list);
+ INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
+ spin_lock_init(&vf->rcv_act.lock);
+ spin_lock_init(&vf->rcv_pend.lock);
+ init_completion(&vf->ch_free_cmpl);
+
+ if (qlcnic_sriov_pf_check(adapter)) {
+ vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
+ if (!vp) {
+ err = -ENOMEM;
+ goto qlcnic_destroy_async_wq;
+ }
+ sriov->vf_info[i].vp = vp;
+ random_ether_addr(vp->mac);
+ dev_info(&adapter->pdev->dev,
+ "MAC Address %pM is configured for VF %d\n",
+ vp->mac, i);
+ }
+ }
+
+ return 0;
+
+qlcnic_destroy_async_wq:
+ destroy_workqueue(bc->bc_async_wq);
+
+qlcnic_destroy_trans_wq:
+ destroy_workqueue(bc->bc_trans_wq);
+
+qlcnic_free_vf_info:
+ kfree(sriov->vf_info);
+
+qlcnic_free_sriov:
+ kfree(adapter->ahw->sriov);
+ return err;
+}
+
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ int i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_cleanup_async_list(bc);
+ destroy_workqueue(bc->bc_async_wq);
+ destroy_workqueue(bc->bc_trans_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ kfree(sriov->vf_info[i].vp);
+
+ kfree(sriov->vf_info);
+ kfree(adapter->ahw->sriov);
+}
+
+static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
+{
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+}
+
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_cleanup(adapter);
+}
+
+static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
+ u32 *pay, u8 pci_func, u8 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ unsigned long flags;
+ u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
+ u16 opcode;
+ u8 mbx_err_code;
+ int i, j;
+
+ opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ dev_info(&adapter->pdev->dev,
+ "Mailbox cmd attempted, 0x%x\n", opcode);
+ dev_info(&adapter->pdev->dev, "Mailbox detached\n");
+ return 0;
+ }
+
+ spin_lock_irqsave(&ahw->mbx_lock, flags);
+
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (mbx_val) {
+ QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
+ spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+ return QLCNIC_RCODE_TIMEOUT;
+ }
+ /* Fill in mailbox registers */
+ val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ mbx_cmd = 0x1 | (1 << 4);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= (pci_func << 5);
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+ for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ i++, j++) {
+ writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
+ }
+ for (j = 0; j < size; j++, i++)
+ writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
+
+ /* Signal FW about the impending command */
+ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+
+ /* Waiting for the mailbox cmd to complete and while waiting here
+ * some AEN might arrive. If more than 5 seconds expire we can
+ * assume something is wrong.
+ */
+poll:
+ rsp = qlcnic_83xx_mbx_poll(adapter);
+ if (rsp != QLCNIC_RCODE_TIMEOUT) {
+ /* Get the FW response data */
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
+ qlcnic_83xx_process_aen(adapter);
+ mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (mbx_val)
+ goto poll;
+ }
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
+ opcode = QLCNIC_MBX_RSP(fw_data);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ rsp = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ rsp = qlcnic_83xx_mac_rcode(adapter);
+ if (!rsp)
+ goto out;
+ }
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x failed with err:0x%x\n",
+ opcode, mbx_err_code);
+ rsp = mbx_err_code;
+ break;
+ }
+ goto out;
+ }
+
+ dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
+ QLCNIC_MBX_RSP(mbx_cmd));
+ rsp = QLCNIC_RCODE_TIMEOUT;
+out:
+ /* clear fw mbx control register */
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+ spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ return rsp;
+}
+
+static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
+{
+ adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return -EIO;
+
+ qlcnic_sriov_vf_cfg_buff_desc(adapter);
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->op_mode = nic_info.op_mode;
+ ahw->capabilities = nic_info.capabilities;
+ return 0;
+}
+
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
+ int pci_using_dac)
+{
+ int err;
+
+ INIT_LIST_HEAD(&adapter->vf_mc_list);
+ if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
+ dev_warn(&adapter->pdev->dev,
+ "83xx adapter do not support MSI interrupts\n");
+
+ err = qlcnic_setup_intr(adapter, 1);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+
+ err = qlcnic_sriov_init(adapter, 1);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto err_out_cleanup_sriov;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_disable_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
+ if (err)
+ goto err_out_send_channel_term;
+
+ pci_set_drvdata(adapter->pdev, adapter);
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ return 0;
+
+err_out_send_channel_term:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_disable_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+
+err_out_cleanup_sriov:
+ __qlcnic_sriov_cleanup(adapter);
+
+err_out_disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ return err;
+}
+
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ spin_lock_init(&ahw->mbx_lock);
+ set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ ahw->msix_supported = 1;
+ adapter->flags |= QLCNIC_TX_INTR_SHARED;
+
+ if (qlcnic_sriov_setup_vf(adapter, pci_using_dac))
+ return -EIO;
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
+
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->reset_context = 0;
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->need_fw_reset = 0;
+ return 0;
+}
+
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged SRIOV function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_sriov_vf_ops;
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ return;
+}
+
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
+{
+ u32 pay_size;
+
+ pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
+
+ if (pay_size)
+ pay_size = QLC_BC_PAYLOAD_SZ;
+ else
+ pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
+
+ return pay_size;
+}
+
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
+ u8 i;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
+ if (vf_info[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
+{
+ *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
+ if (!*trans)
+ return -ENOMEM;
+
+ init_completion(&(*trans)->resp_cmpl);
+ return 0;
+}
+
+static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
+ u32 size)
+{
+ *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
+ if (!*hdr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
+{
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+ int i, size;
+
+ mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
+
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_BC_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
+ memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) |
+ (3 << 29));
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd,
+ u16 seq, u8 msg_type)
+{
+ struct qlcnic_bc_hdr *hdr;
+ int i;
+ u32 num_regs, bc_pay_sz;
+ u16 remainder;
+ u8 cmd_op, num_frags, t_num_frags;
+
+ bc_pay_sz = QLC_BC_PAYLOAD_SZ;
+ if (msg_type == QLC_BC_COMMAND) {
+ trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
+ trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
+ num_regs = cmd->req.num;
+ trans->req_pay_size = (num_regs * 4);
+ num_regs = cmd->rsp.num;
+ trans->rsp_pay_size = (num_regs * 4);
+ cmd_op = cmd->req.arg[0] & 0xff;
+ remainder = (trans->req_pay_size) % (bc_pay_sz);
+ num_frags = (trans->req_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ t_num_frags = num_frags;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
+ return -ENOMEM;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
+ return -ENOMEM;
+ num_frags = t_num_frags;
+ hdr = trans->req_hdr;
+ } else {
+ cmd->req.arg = (u32 *)trans->req_pay;
+ cmd->rsp.arg = (u32 *)trans->rsp_pay;
+ cmd_op = cmd->req.arg[0] & 0xff;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ cmd->req.num = trans->req_pay_size / 4;
+ cmd->rsp.num = trans->rsp_pay_size / 4;
+ hdr = trans->rsp_hdr;
+ }
+
+ trans->trans_id = seq;
+ trans->cmd_id = cmd_op;
+ for (i = 0; i < num_frags; i++) {
+ hdr[i].version = 2;
+ hdr[i].msg_type = msg_type;
+ hdr[i].op_type = cmd->op_type;
+ hdr[i].num_cmds = 1;
+ hdr[i].num_frags = num_frags;
+ hdr[i].frag_num = i + 1;
+ hdr[i].cmd_op = cmd_op;
+ hdr[i].seq_id = seq;
+ }
+ return 0;
+}
+
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
+{
+ if (!trans)
+ return;
+ kfree(trans->req_hdr);
+ kfree(trans->rsp_hdr);
+ kfree(trans);
+}
+
+static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_trans_list *t_list;
+ unsigned long flags;
+ int ret = 0;
+
+ if (type == QLC_BC_RESPONSE) {
+ t_list = &vf->rcv_act;
+ spin_lock_irqsave(&t_list->lock, flags);
+ t_list->count--;
+ list_del(&trans->list);
+ if (t_list->count > 0)
+ ret = 1;
+ spin_unlock_irqrestore(&t_list->lock, flags);
+ }
+ if (type == QLC_BC_COMMAND) {
+ while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ msleep(100);
+ vf->send_cmd = NULL;
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+ }
+ return ret;
+}
+
+static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ INIT_WORK(&vf->trans_work, func);
+ queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
+}
+
+static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
+{
+ struct completion *cmpl = &trans->resp_cmpl;
+
+ if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
+ trans->trans_state = QLC_END;
+ else
+ trans->trans_state = QLC_ABORT;
+
+ return;
+}
+
+static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ if (type == QLC_BC_RESPONSE) {
+ trans->curr_rsp_frag++;
+ if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_END;
+ } else {
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag < trans->req_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_WAIT_FOR_RESP;
+ }
+}
+
+static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct completion *cmpl = &vf->ch_free_cmpl;
+
+ if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
+ trans->trans_state = QLC_ABORT;
+ return;
+ }
+
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ qlcnic_sriov_handle_multi_frags(trans, type);
+}
+
+static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
+ u32 *hdr, u32 *pay, u32 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 fw_mbx;
+ u8 i, max = 2, hdr_size, j;
+
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ max = (size / sizeof(u32)) + hdr_size;
+
+ fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
+ for (; j < max; i++, j++)
+ *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
+}
+
+static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
+{
+ int ret = -EBUSY;
+ u32 timeout = 10000;
+
+ do {
+ if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
+ ret = 0;
+ break;
+ }
+ mdelay(1);
+ } while (--timeout);
+
+ return ret;
+}
+
+static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u32 pay_size, hdr_size;
+ u32 *hdr, *pay;
+ int ret;
+ u8 pci_func = trans->func_id;
+
+ if (__qlcnic_sriov_issue_bc_post(vf))
+ return -EBUSY;
+
+ if (type == QLC_BC_COMMAND) {
+ hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
+ pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ pay_size = (pay_size / sizeof(u32));
+ } else {
+ hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
+ pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ pay_size = (pay_size / sizeof(u32));
+ }
+
+ ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
+ pci_func, pay_size);
+ return ret;
+}
+
+static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf, u8 type)
+{
+ int err;
+ bool flag = true;
+
+ while (flag) {
+ switch (trans->trans_state) {
+ case QLC_INIT:
+ trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
+ if (qlcnic_sriov_issue_bc_post(trans, type))
+ trans->trans_state = QLC_ABORT;
+ break;
+ case QLC_WAIT_FOR_CHANNEL_FREE:
+ qlcnic_sriov_wait_for_channel_free(trans, type);
+ break;
+ case QLC_WAIT_FOR_RESP:
+ qlcnic_sriov_wait_for_resp(trans);
+ break;
+ case QLC_END:
+ err = 0;
+ flag = false;
+ break;
+ case QLC_ABORT:
+ err = -EIO;
+ flag = false;
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ break;
+ default:
+ err = -EIO;
+ flag = false;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans, int pci_func)
+{
+ struct qlcnic_vf_info *vf;
+ int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return -EIO;
+
+ vf = &adapter->ahw->sriov->vf_info[index];
+ trans->vf = vf;
+ trans->func_id = pci_func;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ if (qlcnic_sriov_pf_check(adapter))
+ return -EIO;
+ if (qlcnic_sriov_vf_check(adapter) &&
+ trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return -EIO;
+ }
+
+ mutex_lock(&vf->send_cmd_lock);
+ vf->send_cmd = trans;
+ err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
+ qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
+ mutex_unlock(&vf->send_cmd_lock);
+ return err;
+}
+
+static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+#ifdef CONFIG_QLCNIC_SRIOV
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
+ return;
+ }
+#endif
+ cmd->rsp.arg[0] |= (0x9 << 25);
+ return;
+}
+
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
+ trans_work);
+ struct qlcnic_bc_trans *trans = NULL;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u8 req;
+
+ trans = list_first_entry(&vf->rcv_act.wait_list,
+ struct qlcnic_bc_trans, list);
+ adapter = vf->adapter;
+
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
+ QLC_BC_RESPONSE))
+ goto cleanup_trans;
+
+ __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
+ trans->trans_state = QLC_INIT;
+ __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
+
+cleanup_trans:
+ qlcnic_free_mbx_args(&cmd);
+ req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
+ qlcnic_sriov_cleanup_transaction(trans);
+ if (req)
+ qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+}
+
+static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ u32 pay_size;
+
+ if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ return;
+
+ trans = vf->send_cmd;
+
+ if (trans == NULL)
+ goto clear_send;
+
+ if (trans->trans_id != hdr->seq_id)
+ goto clear_send;
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
+ (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
+ pay_size);
+ if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ goto clear_send;
+
+ complete(&trans->resp_cmpl);
+
+clear_send:
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+}
+
+static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ spin_lock(&t_list->lock);
+ t_list->count++;
+ list_add_tail(&trans->list, &t_list->wait_list);
+ if (t_list->count == 1)
+ qlcnic_sriov_schedule_bc_cmd(sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+ spin_unlock(&t_list->lock);
+ return 0;
+}
+
+static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_hdr *hdr)
+{
+ struct qlcnic_bc_trans *trans = NULL;
+ struct list_head *node;
+ u32 pay_size, curr_frag;
+ u8 found = 0, active = 0;
+
+ spin_lock(&vf->rcv_pend.lock);
+ if (vf->rcv_pend.count > 0) {
+ list_for_each(node, &vf->rcv_pend.wait_list) {
+ trans = list_entry(node, struct qlcnic_bc_trans, list);
+ if (trans->trans_id == hdr->seq_id) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ curr_frag = trans->curr_req_frag;
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ curr_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + curr_frag),
+ (u32 *)(trans->req_pay + curr_frag),
+ pay_size);
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag >= hdr->num_frags) {
+ vf->rcv_pend.count--;
+ list_del(&trans->list);
+ active = 1;
+ }
+ }
+ spin_unlock(&vf->rcv_pend.lock);
+
+ if (active)
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans))
+ qlcnic_sriov_cleanup_transaction(trans);
+
+ return;
+}
+
+static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 pay_size;
+ int err;
+ u8 cmd_op;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
+ hdr->op_type != QLC_BC_CMD &&
+ hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return;
+
+ if (hdr->frag_num > 1) {
+ qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
+ return;
+ }
+
+ cmd_op = hdr->cmd_op;
+ if (qlcnic_sriov_alloc_bc_trans(&trans))
+ return;
+
+ if (hdr->op_type == QLC_BC_CMD)
+ err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
+ else
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
+
+ if (err) {
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ cmd.op_type = hdr->op_type;
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
+ QLC_BC_COMMAND)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + trans->curr_req_frag),
+ (u32 *)(trans->req_pay + trans->curr_req_frag),
+ pay_size);
+ trans->func_id = vf->pci_func;
+ trans->vf = vf;
+ trans->trans_id = hdr->seq_id;
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag == trans->req_hdr->num_frags) {
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+ } else {
+ spin_lock(&vf->rcv_pend.lock);
+ list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
+ vf->rcv_pend.count++;
+ spin_unlock(&vf->rcv_pend.lock);
+ }
+}
+
+static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr hdr;
+ u32 *ptr = (u32 *)&hdr;
+ u8 msg_type, i;
+
+ for (i = 2; i < 6; i++)
+ ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
+ msg_type = hdr.msg_type;
+
+ switch (msg_type) {
+ case QLC_BC_COMMAND:
+ qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
+ break;
+ case QLC_BC_RESPONSE:
+ qlcnic_sriov_handle_bc_resp(&hdr, vf);
+ break;
+ }
+}
+
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
+{
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_sriov *sriov;
+ int index;
+ u8 pci_func;
+
+ sriov = adapter->ahw->sriov;
+ pci_func = qlcnic_sriov_target_func_id(event);
+ index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return;
+
+ vf = &sriov->vf_info[index];
+ vf->pci_func = pci_func;
+
+ if (qlcnic_sriov_channel_free_check(event))
+ complete(&vf->ch_free_cmpl);
+
+ if (qlcnic_sriov_bc_msg_check(event))
+ qlcnic_sriov_handle_msg_event(sriov, vf);
+}
+
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
+ return -ENOMEM;
+
+ if (enable)
+ cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+
+ err = qlcnic_83xx_mbx_op(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s bc events, err=%d\n",
+ (enable ? "enable" : "disable"), err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_bc_trans *trans;
+ int err;
+ u32 rsp_data, opcode, mbx_err_code, rsp;
+ u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
+
+ if (qlcnic_sriov_alloc_bc_trans(&trans))
+ return -ENOMEM;
+
+ if (qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND))
+ return -ENOMEM;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ rsp = -EIO;
+ QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
+ QLCNIC_MBX_RSP(cmd->req.arg[0]), adapter->ahw->pci_func);
+ goto err_out;
+ }
+
+ err = qlcnic_sriov_send_bc_cmd(adapter, trans, adapter->ahw->pci_func);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x timed out for VF %d\n",
+ (cmd->req.arg[0] & 0xffff), adapter->ahw->pci_func);
+ rsp = QLCNIC_RCODE_TIMEOUT;
+ goto err_out;
+ }
+
+ rsp_data = cmd->rsp.arg[0];
+ mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
+ opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
+
+ if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
+ (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+ dev_err(&adapter->pdev->dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, adapter->ahw->pci_func);
+ }
+
+err_out:
+ qlcnic_sriov_cleanup_transaction(trans);
+ return rsp;
+}
+
+int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
+ int ret;
+
+ if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
+ return -ENOMEM;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
+ ret);
+ goto out;
+ }
+
+ cmd_op = (cmd.rsp.arg[0] & 0xff);
+ if (cmd.rsp.arg[0] >> 25 == 2)
+ return 2;
+ if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+void qlcnic_vf_add_mc_list(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head, tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+ head = &adapter->vf_mc_list;
+ netif_addr_lock_bh(netdev);
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ list_move(&cur->list, &tmp_list);
+ }
+
+ netif_addr_unlock_bh(netdev);
+
+ while (!list_empty(&tmp_list)) {
+ cur = list_entry((&tmp_list)->next,
+ struct qlcnic_mac_list_s, list);
+ qlcnic_nic_add_mac(adapter, cur->mac_addr);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
+{
+ struct list_head *head = &bc->async_list;
+ struct qlcnic_async_work_list *entry;
+
+ while (!list_empty(head)) {
+ entry = list_entry(head->next, struct qlcnic_async_work_list,
+ list);
+ cancel_work_sync(&entry->work);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ __qlcnic_set_multi(netdev);
+}
+
+static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+{
+ struct qlcnic_async_work_list *entry;
+ struct net_device *netdev;
+
+ entry = container_of(work, struct qlcnic_async_work_list, work);
+ netdev = (struct net_device *)entry->ptr;
+
+ qlcnic_sriov_vf_set_multi(netdev);
+ return;
+}
+
+static struct qlcnic_async_work_list *
+qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+{
+ struct list_head *node;
+ struct qlcnic_async_work_list *entry = NULL;
+ u8 empty = 0;
+
+ list_for_each(node, &bc->async_list) {
+ entry = list_entry(node, struct qlcnic_async_work_list, list);
+ if (!work_pending(&entry->work)) {
+ empty = 1;
+ break;
+ }
+ }
+
+ if (!empty) {
+ entry = kzalloc(sizeof(struct qlcnic_async_work_list),
+ GFP_ATOMIC);
+ if (entry == NULL)
+ return NULL;
+ list_add_tail(&entry->list, &bc->async_list);
+ }
+
+ return entry;
+}
+
+static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data)
+{
+ struct qlcnic_async_work_list *entry = NULL;
+
+ entry = qlcnic_sriov_get_free_node_async_work(bc);
+ if (!entry)
+ return;
+
+ entry->ptr = data;
+ INIT_WORK(&entry->work, func);
+ queue_work(bc->bc_async_wq, &entry->work);
+}
+
+void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+{
+
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+
+ qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
+ netdev);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644
index 00000000000..d6ac7dcef1e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -0,0 +1,1176 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include <linux/types.h>
+
+#define QLCNIC_SRIOV_VF_MAX_MAC 1
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
+
+struct qlcnic_sriov_cmd_handler {
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+struct qlcnic_sriov_fw_cmd_handler {
+ u32 cmd;
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info,
+ u16 vport_id)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = (vport_id << 16) | 0x1;
+ cmd.req.arg[2] = npar_info->bit_offsets;
+ cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
+ cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
+ cmd.req.arg[4] = npar_info->max_tx_mac_filters;
+ cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
+ cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
+ (npar_info->max_rx_ip_addr << 16);
+ cmd.req.arg[6] = npar_info->max_rx_lro_flow |
+ (npar_info->max_rx_status_rings << 16);
+ cmd.req.arg[7] = npar_info->max_rx_buf_rings |
+ (npar_info->max_rx_ques << 16);
+ cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
+ cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
+ cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to set vport info, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u16 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_resources *res = &sriov->ff_max;
+ int ret = -EIO, vpid;
+ u32 temp, num_vf_macs, num_vfs, max;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0)
+ return -EINVAL;
+
+ num_vfs = sriov->num_vfs;
+ max = num_vfs + 1;
+ info->bit_offsets = 0xffff;
+ info->min_tx_bw = 0;
+ info->max_tx_bw = MAX_BW;
+ info->max_tx_ques = res->num_tx_queues / max;
+ info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
+ num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
+
+ if (adapter->ahw->pci_func == func) {
+ temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
+ info->max_tx_mac_filters = temp;
+ } else {
+ info->max_rx_ucast_mac_filters = num_vf_macs;
+ info->max_tx_mac_filters = num_vf_macs;
+ }
+
+ info->max_rx_ip_addr = res->num_destip / max;
+ info->max_rx_status_rings = res->num_rx_status_rings / max;
+ info->max_rx_buf_rings = res->num_rx_buf_rings / max;
+ info->max_rx_ques = res->num_rx_queues / max;
+ info->max_rx_lro_flow = res->num_lro_flows_supported / max;
+ info->max_tx_vlan_keys = res->num_txvlan_keys;
+ info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
+ info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
+
+ ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
+
+ ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
+ ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
+ ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
+ ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
+ ff_max->num_rx_queues = info->max_rx_ques;
+ ff_max->num_tx_queues = info->max_tx_ques;
+ ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
+ ff_max->num_destip = info->max_rx_ip_addr;
+ ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
+ ff_max->num_rx_status_rings = info->max_rx_status_rings;
+ ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
+ ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
+}
+
+static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = 0x2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PF info, err=%d\n", err);
+ goto out;
+ }
+
+ npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
+ npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
+ npar_info->max_vports = MSW(cmd.rsp.arg[2]);
+ npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
+ npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
+ npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
+ npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+
+ dev_info(&adapter->pdev->dev,
+ "\n\ttotal_pf: %d,\n"
+ "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
+ npar_info->total_pf, npar_info->total_rss_engines,
+ npar_info->max_vports, npar_info->max_tx_ques,
+ npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = 0;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = 0;
+ }
+}
+
+static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
+ u16 vport_handle, u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = vport_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = vport_handle;
+ }
+}
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ return sriov->vp_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index >= 0) {
+ vf_info = &sriov->vf_info[index];
+ return vf_info->vp->handle;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
+ u8 flag, u16 func)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret;
+ int vpid;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
+ return -ENOMEM;
+
+ if (flag) {
+ cmd.req.arg[3] = func << 8;
+ } else {
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
+ }
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed %s vport, err %d for func 0x%x\n",
+ (flag ? "enable" : "disable"), ret, func);
+ goto out;
+ }
+
+ if (flag) {
+ vpid = cmd.rsp.arg[2] & 0xffff;
+ qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
+ } else {
+ qlcnic_sriov_pf_reset_vport_handle(adapter, func);
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
+ u8 func, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
+ return -ENOMEM;
+
+ cmd.req.arg[0] |= (3 << 29);
+ cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
+ if (enable)
+ cmd.req.arg[1] |= BIT_0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable sriov eswitch%d\n", err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
+{
+ u8 func = adapter->ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+ __qlcnic_sriov_cleanup(adapter);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+}
+
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
+{
+ if (!qlcnic_sriov_pf_check(adapter))
+ return;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ pci_disable_sriov(adapter->pdev);
+ netdev_info(adapter->netdev,
+ "SR-IOV is disabled successfully on port %d\n",
+ adapter->portnum);
+}
+
+static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_pf_disable(adapter);
+
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ /* After disabling SRIOV re-init the driver in default mode
+ configure opmode based on op_mode of function
+ */
+ if (qlcnic_83xx_configure_opmode(adapter))
+ return -EIO;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info, pf_info, vp_info;
+ int err;
+ u8 func = ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
+ if (err)
+ goto clear_sriov_enable;
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (err)
+ goto disable_eswitch;
+
+ err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
+ if (err)
+ goto delete_vport;
+
+ qlcnic_sriov_pf_set_ff_max_res(adapter, &pf_info);
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto delete_vport;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->nic_mode = QLC_83XX_SRIOV_MODE;
+ return err;
+
+delete_vport:
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+
+disable_eswitch:
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+
+clear_sriov_enable:
+ __qlcnic_sriov_cleanup(adapter);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ return err;
+}
+
+static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (err)
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ return err;
+}
+
+static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
+ int num_vfs)
+{
+ int err = 0;
+
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ if (qlcnic_sriov_init(adapter, num_vfs)) {
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ return -EIO;
+ }
+
+ if (qlcnic_sriov_pf_init(adapter))
+ return -EIO;
+
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ return err;
+}
+
+static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(netdev,
+ "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
+ return -EIO;
+ }
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
+ if (err) {
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
+ if (qlcnic_83xx_configure_opmode(adapter))
+ goto error;
+ } else {
+ netdev_info(adapter->netdev,
+ "SR-IOV is enabled successfully on port %d\n",
+ adapter->portnum);
+ }
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+error:
+ return err;
+}
+
+int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (num_vfs == 0)
+ err = qlcnic_pci_sriov_disable(adapter);
+ else
+ err = qlcnic_pci_sriov_enable(adapter, num_vfs);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
+ u16 func)
+{
+ struct qlcnic_info defvp_info;
+ int err;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
+ if (err)
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+ u16 func = vf->pci_func;
+
+ cmd->rsp.arg[0] = trans->req_hdr->cmd_op;
+ cmd->rsp.arg[0] |= (1 << 16);
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (!err) {
+ err = qlcnic_sriov_set_vf_vport_info(adapter, func);
+ if (err)
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+ } else {
+ err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+
+ if (err)
+ goto err_out;
+
+ cmd->rsp.arg[0] |= (1 << 25);
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+ return err;
+
+err_out:
+ cmd->rsp.arg[0] |= (2 << 25);
+ return err;
+}
+
+static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp,
+ u16 func, __le16 vlan, u8 op)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_macvlan_mbx mv;
+ u8 *addr;
+ int err;
+ u32 *buf;
+ int vpid;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+ return -ENOMEM;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (vlan)
+ op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
+
+ cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+
+ addr = vp->mac;
+ mv.vlan = le16_to_cpu(vlan);
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd.req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "MAC-VLAN %s to CAM failed, err=%d.\n",
+ ((op == 1) ? "add " : "delete "), err);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_rx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[6] = vf->vp->handle;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err) {
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
+ vf->rx_ctx_id = mbx_out->ctx_id;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
+ 0, QLCNIC_MAC_ADD);
+ } else {
+ vf->rx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u8 type, *mac;
+
+ type = cmd->req.arg[1];
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ cmd->rsp.arg[0] = (2 << 25);
+ break;
+ case QLCNIC_GET_CURRENT_MAC:
+ cmd->rsp.arg[0] = (1 << 25);
+ mac = vf->vp->mac;
+ cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
+ cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
+ ((mac[3]) << 16 & 0xff0000) |
+ ((mac[2]) << 24 & 0xff000000);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_tx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[5] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err) {
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
+ vf->tx_ctx_id = mbx_out->ctx_id;
+ } else {
+ vf->tx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
+ 0, QLCNIC_MAC_DEL);
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->rx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->tx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err = -EIO;
+ u8 op;
+
+ op = cmd->req.arg[1] & 0xff;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
+ return -EINVAL;
+
+ if (!(cmd->req.arg[1] & BIT_16))
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xff) != 0x1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ if (cmd->req.arg[2] > adapter->ahw->max_mtu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] & BIT_31) {
+ if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
+ return -EINVAL;
+ } else {
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u16 ctx_id, pkts, time;
+
+ ctx_id = cmd->req.arg[1] >> 16;
+ pkts = cmd->req.arg[2] & 0xffff;
+ time = cmd->req.arg[2] >> 16;
+
+ if (ctx_id != vf->rx_ctx_id)
+ return -EINVAL;
+ if (pkts > coal->rx_packets)
+ return -EINVAL;
+ if (time < coal->rx_time_us)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_macvlan_mbx *macvlan;
+
+ if (!(cmd->req.arg[1] & BIT_8))
+ return -EINVAL;
+
+ cmd->req.arg[1] |= (vf->vp->handle << 16);
+ cmd->req.arg[1] |= BIT_31;
+
+ macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
+ if (!(macvlan->mac_addr0 & BIT_0)) {
+ dev_err(&adapter->pdev->dev,
+ "MAC address change is not allowed from VF %d",
+ vf->pci_func);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ if (!(cmd->req.arg[1] & BIT_8))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_linkevent(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static const int qlcnic_pf_passthru_supp_cmds[] = {
+ QLCNIC_CMD_GET_STATISTICS,
+ QLCNIC_CMD_GET_PORT_CONFIG,
+ QLCNIC_CMD_GET_LINK_STATUS,
+};
+
+static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
+ [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+};
+
+static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
+ {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
+ {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
+ {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
+ {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
+ {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
+ {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
+ {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
+ {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
+ {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
+};
+
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 size, cmd_op;
+
+ cmd_op = trans->req_hdr->cmd_op;
+
+ if (trans->req_hdr->op_type == QLC_BC_CMD) {
+ size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
+ if (cmd_op < size) {
+ qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
+ return;
+ }
+ } else {
+ int i;
+ size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
+ qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
+ return;
+ }
+ }
+
+ size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
+ qlcnic_issue_cmd(adapter, cmd);
+ return;
+ }
+ }
+ }
+
+ cmd->rsp.arg[0] |= (0x9 << 25);
+}
+
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid;
+}
+
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ int vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 987fb6f8adc..c77675da671 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -21,8 +21,6 @@
#include <linux/aer.h>
#include <linux/log2.h>
-#include <linux/sysfs.h>
-
#define QLC_STATUS_UNSUPPORTED_CMD -2
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
@@ -886,6 +884,244 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
return size;
}
+static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ unsigned char *p_read_buf;
+ int ret, count;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!size)
+ return QL_STATUS_INVALID_PARAM;
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ count = size / sizeof(u32);
+
+ if (size % sizeof(u32))
+ count++;
+
+ p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_read_buf)
+ return -ENOMEM;
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_read_buf);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
+ count);
+
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ kfree(p_read_buf);
+ return ret;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+ memcpy(buf, p_read_buf, size);
+ kfree(p_read_buf);
+
+ return size;
+}
+
+static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
+ ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
+ (u32 *)p_src,
+ QLC_83XX_FLASH_WRITE_MAX);
+
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset, size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32);
+ offset = offset + sizeof(u32);
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int ret;
+ static int flash_mode;
+ unsigned long data;
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!buf)
+ return QL_STATUS_INVALID_PARAM;
+
+ ret = kstrtoul(buf, 16, &data);
+
+ switch (data) {
+ case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+ flash_mode = QLC_83XX_ERASE_MODE;
+ ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ break;
+
+ case QLC_83XX_FLASH_BULK_WRITE_CMD:
+ flash_mode = QLC_83XX_BULK_WRITE_MODE;
+ break;
+
+ case QLC_83XX_FLASH_WRITE_CMD:
+ flash_mode = QLC_83XX_WRITE_MODE;
+ break;
+ default:
+ if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+
+ if (flash_mode == QLC_83XX_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__,
+ __LINE__);
+ return -EIO;
+ }
+ }
+ }
+
+ return size;
+}
+
static struct device_attribute dev_attr_bridged_mode = {
.attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
.show = qlcnic_show_bridged_mode,
@@ -960,6 +1196,13 @@ static struct bin_attribute bin_attr_pm_config = {
.write = qlcnic_sysfs_write_pm_config,
};
+static struct bin_attribute bin_attr_flash = {
+ .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_83xx_sysfs_flash_read_handler,
+ .write = qlcnic_83xx_sysfs_flash_write_handler,
+};
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
@@ -1048,10 +1291,18 @@ void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
+
qlcnic_create_diag_entries(adapter);
+
+ if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
+ dev_info(dev, "failed to create flash sysfs entry\n");
}
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
+
qlcnic_remove_diag_entries(adapter);
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b13ab544a7e..1dd778a6f01 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1211,8 +1211,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1519,8 +1517,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb = netdev_alloc_skb(ndev, length);
if (!skb) {
- netif_err(qdev, drv, qdev->ndev,
- "Couldn't get an skb, need to unwind!.\n");
rx_ring->rx_dropped++;
put_page(lbq_desc->p.pg_chunk.page);
return;
@@ -1605,8 +1601,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Allocate new_skb and copy */
new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
if (new_skb == NULL) {
- netif_err(qdev, probe, qdev->ndev,
- "No skb available, drop the packet.\n");
rx_ring->rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 5b4103db70f..e9dc84943cf 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -224,11 +224,14 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
break;
}
+ if (limit < 0)
+ return -ETIMEDOUT;
+
return ioread16(ioaddr + MMRD);
}
/* Write a word data from PHY Chip */
-static void r6040_phy_write(void __iomem *ioaddr,
+static int r6040_phy_write(void __iomem *ioaddr,
int phy_addr, int reg, u16 val)
{
int limit = MAC_DEF_TIMEOUT;
@@ -243,6 +246,8 @@ static void r6040_phy_write(void __iomem *ioaddr,
if (!(cmd & MDIO_WRITE))
break;
}
+
+ return (limit < 0) ? -ETIMEDOUT : 0;
}
static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
@@ -261,9 +266,7 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- r6040_phy_write(ioaddr, phy_addr, reg, value);
-
- return 0;
+ return r6040_phy_write(ioaddr, phy_addr, reg, value);
}
static int r6040_mdiobus_reset(struct mii_bus *bus)
@@ -347,7 +350,6 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
do {
skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
if (!skb) {
- netdev_err(dev, "failed to alloc skb for rx\n");
rc = -ENOMEM;
goto err_exit;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1276ac71353..3ccedeb8aba 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2041,8 +2041,6 @@ keep_pkt:
netif_receive_skb (skb);
} else {
- if (net_ratelimit())
- netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
received++;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 9f2d416de75..d77d60ea820 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -782,8 +782,6 @@ static void net_rx(struct net_device *dev)
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
- dev->name);
dev->stats.rx_dropped++;
goto done;
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6ed333fe5c0..a7499cbf450 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2,7 +2,8 @@
* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2012 Renesas Solutions Corp.
+ * Copyright (C) 2008-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -49,6 +50,269 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDSR] = 0x0000,
+ [EDMR] = 0x0400,
+ [EDTRR] = 0x0408,
+ [EDRRR] = 0x0410,
+ [EESR] = 0x0428,
+ [EESIPR] = 0x0430,
+ [TDLAR] = 0x0010,
+ [TDFAR] = 0x0014,
+ [TDFXR] = 0x0018,
+ [TDFFR] = 0x001c,
+ [RDLAR] = 0x0030,
+ [RDFAR] = 0x0034,
+ [RDFXR] = 0x0038,
+ [RDFFR] = 0x003c,
+ [TRSCER] = 0x0438,
+ [RMFCR] = 0x0440,
+ [TFTR] = 0x0448,
+ [FDR] = 0x0450,
+ [RMCR] = 0x0458,
+ [RPADIR] = 0x0460,
+ [FCFTR] = 0x0468,
+ [CSMR] = 0x04E4,
+
+ [ECMR] = 0x0500,
+ [ECSR] = 0x0510,
+ [ECSIPR] = 0x0518,
+ [PIR] = 0x0520,
+ [PSR] = 0x0528,
+ [PIPR] = 0x052c,
+ [RFLR] = 0x0508,
+ [APR] = 0x0554,
+ [MPR] = 0x0558,
+ [PFTCR] = 0x055c,
+ [PFRCR] = 0x0560,
+ [TPAUSER] = 0x0564,
+ [GECMR] = 0x05b0,
+ [BCULR] = 0x05b4,
+ [MAHR] = 0x05c0,
+ [MALR] = 0x05c8,
+ [TROCR] = 0x0700,
+ [CDCR] = 0x0708,
+ [LCCR] = 0x0710,
+ [CEFCR] = 0x0740,
+ [FRECR] = 0x0748,
+ [TSFRCR] = 0x0750,
+ [TLFRCR] = 0x0758,
+ [RFCR] = 0x0760,
+ [CERCR] = 0x0768,
+ [CEECR] = 0x0770,
+ [MAFCR] = 0x0778,
+ [RMII_MII] = 0x0790,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAG0] = 0x0040,
+ [TSU_QTAG1] = 0x0044,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_VTAG0] = 0x0058,
+ [TSU_VTAG1] = 0x005c,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRH31] = 0x01f8,
+ [TSU_ADRL31] = 0x01fc,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+};
+
+static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0300,
+ [RFLR] = 0x0308,
+ [ECSR] = 0x0310,
+ [ECSIPR] = 0x0318,
+ [PIR] = 0x0320,
+ [PSR] = 0x0328,
+ [RDMLR] = 0x0340,
+ [IPGR] = 0x0350,
+ [APR] = 0x0354,
+ [MPR] = 0x0358,
+ [RFCF] = 0x0360,
+ [TPAUSER] = 0x0364,
+ [TPAUSECR] = 0x0368,
+ [MAHR] = 0x03c0,
+ [MALR] = 0x03c8,
+ [TROCR] = 0x03d0,
+ [CDCR] = 0x03d4,
+ [LCCR] = 0x03d8,
+ [CNDCR] = 0x03dc,
+ [CEFCR] = 0x03e4,
+ [FRECR] = 0x03e8,
+ [TSFRCR] = 0x03ec,
+ [TLFRCR] = 0x03f0,
+ [RFCR] = 0x03f4,
+ [MAFCR] = 0x03f8,
+
+ [EDMR] = 0x0200,
+ [EDTRR] = 0x0208,
+ [EDRRR] = 0x0210,
+ [TDLAR] = 0x0218,
+ [RDLAR] = 0x0220,
+ [EESR] = 0x0228,
+ [EESIPR] = 0x0230,
+ [TRSCER] = 0x0238,
+ [RMFCR] = 0x0240,
+ [TFTR] = 0x0248,
+ [FDR] = 0x0250,
+ [RMCR] = 0x0258,
+ [TFUCR] = 0x0264,
+ [RFOCR] = 0x0268,
+ [FCFTR] = 0x0270,
+ [TRIMD] = 0x027c,
+};
+
+static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0100,
+ [RFLR] = 0x0108,
+ [ECSR] = 0x0110,
+ [ECSIPR] = 0x0118,
+ [PIR] = 0x0120,
+ [PSR] = 0x0128,
+ [RDMLR] = 0x0140,
+ [IPGR] = 0x0150,
+ [APR] = 0x0154,
+ [MPR] = 0x0158,
+ [TPAUSER] = 0x0164,
+ [RFCF] = 0x0160,
+ [TPAUSECR] = 0x0168,
+ [BCFRR] = 0x016c,
+ [MAHR] = 0x01c0,
+ [MALR] = 0x01c8,
+ [TROCR] = 0x01d0,
+ [CDCR] = 0x01d4,
+ [LCCR] = 0x01d8,
+ [CNDCR] = 0x01dc,
+ [CEFCR] = 0x01e4,
+ [FRECR] = 0x01e8,
+ [TSFRCR] = 0x01ec,
+ [TLFRCR] = 0x01f0,
+ [RFCR] = 0x01f4,
+ [MAFCR] = 0x01f8,
+ [RTRATE] = 0x01fc,
+
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0008,
+ [EDRRR] = 0x0010,
+ [TDLAR] = 0x0018,
+ [RDLAR] = 0x0020,
+ [EESR] = 0x0028,
+ [EESIPR] = 0x0030,
+ [TRSCER] = 0x0038,
+ [RMFCR] = 0x0040,
+ [TFTR] = 0x0048,
+ [FDR] = 0x0050,
+ [RMCR] = 0x0058,
+ [TFUCR] = 0x0064,
+ [RFOCR] = 0x0068,
+ [FCFTR] = 0x0070,
+ [RPADIR] = 0x0078,
+ [TRIMD] = 0x007c,
+ [RBWAR] = 0x00c8,
+ [RDFAR] = 0x00cc,
+ [TBRAR] = 0x00d4,
+ [TDFAR] = 0x00d8,
+};
+
+static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [ECMR] = 0x0160,
+ [ECSR] = 0x0164,
+ [ECSIPR] = 0x0168,
+ [PIR] = 0x016c,
+ [MAHR] = 0x0170,
+ [MALR] = 0x0174,
+ [RFLR] = 0x0178,
+ [PSR] = 0x017c,
+ [TROCR] = 0x0180,
+ [CDCR] = 0x0184,
+ [LCCR] = 0x0188,
+ [CNDCR] = 0x018c,
+ [CEFCR] = 0x0194,
+ [FRECR] = 0x0198,
+ [TSFRCR] = 0x019c,
+ [TLFRCR] = 0x01a0,
+ [RFCR] = 0x01a4,
+ [MAFCR] = 0x01a8,
+ [IPGR] = 0x01b4,
+ [APR] = 0x01b8,
+ [MPR] = 0x01bc,
+ [TPAUSER] = 0x01c4,
+ [BCFR] = 0x01cc,
+
+ [ARSTR] = 0x0000,
+ [TSU_CTRST] = 0x0004,
+ [TSU_FWEN0] = 0x0010,
+ [TSU_FWEN1] = 0x0014,
+ [TSU_FCM] = 0x0018,
+ [TSU_BSYSL0] = 0x0020,
+ [TSU_BSYSL1] = 0x0024,
+ [TSU_PRISL0] = 0x0028,
+ [TSU_PRISL1] = 0x002c,
+ [TSU_FWSL0] = 0x0030,
+ [TSU_FWSL1] = 0x0034,
+ [TSU_FWSLC] = 0x0038,
+ [TSU_QTAGM0] = 0x0040,
+ [TSU_QTAGM1] = 0x0044,
+ [TSU_ADQT0] = 0x0048,
+ [TSU_ADQT1] = 0x004c,
+ [TSU_FWSR] = 0x0050,
+ [TSU_FWINMK] = 0x0054,
+ [TSU_ADSBSY] = 0x0060,
+ [TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
+
+ [TXNLCR0] = 0x0080,
+ [TXALCR0] = 0x0084,
+ [RXNLCR0] = 0x0088,
+ [RXALCR0] = 0x008c,
+ [FWNLCR0] = 0x0090,
+ [FWALCR0] = 0x0094,
+ [TXNLCR1] = 0x00a0,
+ [TXALCR1] = 0x00a0,
+ [RXNLCR1] = 0x00a8,
+ [RXALCR1] = 0x00ac,
+ [FWNLCR1] = 0x00b0,
+ [FWALCR1] = 0x00b4,
+
+ [TSU_ADRH0] = 0x0100,
+ [TSU_ADRL0] = 0x0104,
+ [TSU_ADRL31] = 0x01fc,
+};
+
#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
defined(CONFIG_CPU_SUBTYPE_SH7763) || \
defined(CONFIG_ARCH_R8A7740)
@@ -78,7 +342,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
#endif
/* There is CPU dependent code */
-#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779)
+#if defined(CONFIG_ARCH_R8A7779)
#define SH_ETH_RESET_DEFAULT 1
static void sh_eth_set_duplex(struct net_device *ndev)
{
@@ -93,18 +357,60 @@ static void sh_eth_set_duplex(struct net_device *ndev)
static void sh_eth_set_rate(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned int bits = ECMR_RTM;
-#if defined(CONFIG_ARCH_R8A7779)
- bits |= ECMR_ELB;
-#endif
+ switch (mdp->speed) {
+ case 10: /* 10BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
+ break;
+ case 100:/* 100BASE */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
+ break;
+ default:
+ break;
+ }
+}
+
+/* R8A7779 */
+static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
+ EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+ .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+};
+#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
+#define SH_ETH_RESET_DEFAULT 1
+static void sh_eth_set_duplex(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (mdp->duplex) /* Full */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
+ else /* Half */
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+}
+
+static void sh_eth_set_rate(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
switch (mdp->speed) {
case 10: /* 10BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
break;
case 100:/* 100BASE */
- sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR);
+ sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
break;
default:
break;
@@ -592,7 +898,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
cnt--;
}
if (cnt < 0) {
- printk(KERN_ERR "Device reset fail\n");
+ pr_err("Device reset fail\n");
ret = -ETIMEDOUT;
}
return ret;
@@ -908,11 +1214,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Rx descriptors. */
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
- GFP_KERNEL);
-
+ GFP_KERNEL);
if (!mdp->rx_ring) {
- dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
- rx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
@@ -922,10 +1225,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
/* Allocate all Tx descriptors. */
tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!mdp->tx_ring) {
- dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
- tx_ringsize);
ret = -ENOMEM;
goto desc_ring_free;
}
@@ -2228,7 +2529,6 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
/* MDIO bus release function */
static int sh_mdio_release(struct net_device *ndev)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
/* unregister mdio bus */
@@ -2237,15 +2537,9 @@ static int sh_mdio_release(struct net_device *ndev)
/* remove mdio bus info from net_device */
dev_set_drvdata(&ndev->dev, NULL);
- /* free interrupts memory */
- kfree(bus->irq);
-
/* free bitbang info */
free_mdio_bitbang(bus);
- /* free bitbang memory */
- kfree(mdp->bitbang);
-
return 0;
}
@@ -2258,7 +2552,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
struct sh_eth_private *mdp = netdev_priv(ndev);
/* create bit control struct for PHY */
- bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
+ bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
+ GFP_KERNEL);
if (!bitbang) {
ret = -ENOMEM;
goto out;
@@ -2267,18 +2562,17 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* bitbang init */
bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
bitbang->set_gate = pd->set_mdio_gate;
- bitbang->mdi_msk = 0x08;
- bitbang->mdo_msk = 0x04;
- bitbang->mmd_msk = 0x02;/* MMD */
- bitbang->mdc_msk = 0x01;
+ bitbang->mdi_msk = PIR_MDI;
+ bitbang->mdo_msk = PIR_MDO;
+ bitbang->mmd_msk = PIR_MMD;
+ bitbang->mdc_msk = PIR_MDC;
bitbang->ctrl.ops = &bb_ops;
/* MII controller setting */
- mdp->bitbang = bitbang;
mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!mdp->mii_bus) {
ret = -ENOMEM;
- goto out_free_bitbang;
+ goto out;
}
/* Hook up MII support for ethtool */
@@ -2288,7 +2582,9 @@ static int sh_mdio_init(struct net_device *ndev, int id,
mdp->pdev->name, id);
/* PHY IRQ */
- mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
+ sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
@@ -2300,21 +2596,15 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* register mdio bus */
ret = mdiobus_register(mdp->mii_bus);
if (ret)
- goto out_free_irq;
+ goto out_free_bus;
dev_set_drvdata(&ndev->dev, mdp->mii_bus);
return 0;
-out_free_irq:
- kfree(mdp->mii_bus->irq);
-
out_free_bus:
free_mdio_bitbang(mdp->mii_bus);
-out_free_bitbang:
- kfree(bitbang);
-
out:
return ret;
}
@@ -2327,6 +2617,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
case SH_ETH_REG_GIGABIT:
reg_offset = sh_eth_offset_gigabit;
break;
+ case SH_ETH_REG_FAST_RCAR:
+ reg_offset = sh_eth_offset_fast_rcar;
+ break;
case SH_ETH_REG_FAST_SH4:
reg_offset = sh_eth_offset_fast_sh4;
break;
@@ -2334,7 +2627,7 @@ static const u16 *sh_eth_get_register_offset(int register_type)
reg_offset = sh_eth_offset_fast_sh3_sh2;
break;
default:
- printk(KERN_ERR "Unknown register type (%d)\n", register_type);
+ pr_err("Unknown register type (%d)\n", register_type);
break;
}
@@ -2364,7 +2657,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp = NULL;
- struct sh_eth_plat_data *pd;
+ struct sh_eth_plat_data *pd = pdev->dev.platform_data;
/* get base addr */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2402,10 +2695,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
- mdp->addr = ioremap(res->start, resource_size(res));
- if (mdp->addr == NULL) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "ioremap failed.\n");
+ mdp->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mdp->addr)) {
+ ret = PTR_ERR(mdp->addr);
goto out_release;
}
@@ -2414,7 +2706,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
- pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
/* get PHY ID */
mdp->phy_id = pd->phy;
mdp->phy_interface = pd->phy_interface;
@@ -2452,11 +2743,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ret = -ENODEV;
goto out_release;
}
- mdp->tsu_addr = ioremap(rtsu->start,
- resource_size(rtsu));
- if (mdp->tsu_addr == NULL) {
- ret = -ENOMEM;
- dev_err(&pdev->dev, "TSU ioremap failed.\n");
+ mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
+ if (IS_ERR(mdp->tsu_addr)) {
+ ret = PTR_ERR(mdp->tsu_addr);
goto out_release;
}
mdp->port = devno % 2;
@@ -2497,10 +2786,6 @@ out_unregister:
out_release:
/* net_dev free */
- if (mdp && mdp->addr)
- iounmap(mdp->addr);
- if (mdp && mdp->tsu_addr)
- iounmap(mdp->tsu_addr);
if (ndev)
free_netdev(ndev);
@@ -2511,14 +2796,10 @@ out:
static int sh_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct sh_eth_private *mdp = netdev_priv(ndev);
- if (mdp->cd->tsu)
- iounmap(mdp->tsu_addr);
sh_mdio_release(ndev);
unregister_netdev(ndev);
pm_runtime_disable(&pdev->dev);
- iounmap(mdp->addr);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 828be451500..1ddc9f235bc 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -156,225 +156,6 @@ enum {
SH_ETH_MAX_REGISTER_OFFSET,
};
-static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
- [EDSR] = 0x0000,
- [EDMR] = 0x0400,
- [EDTRR] = 0x0408,
- [EDRRR] = 0x0410,
- [EESR] = 0x0428,
- [EESIPR] = 0x0430,
- [TDLAR] = 0x0010,
- [TDFAR] = 0x0014,
- [TDFXR] = 0x0018,
- [TDFFR] = 0x001c,
- [RDLAR] = 0x0030,
- [RDFAR] = 0x0034,
- [RDFXR] = 0x0038,
- [RDFFR] = 0x003c,
- [TRSCER] = 0x0438,
- [RMFCR] = 0x0440,
- [TFTR] = 0x0448,
- [FDR] = 0x0450,
- [RMCR] = 0x0458,
- [RPADIR] = 0x0460,
- [FCFTR] = 0x0468,
- [CSMR] = 0x04E4,
-
- [ECMR] = 0x0500,
- [ECSR] = 0x0510,
- [ECSIPR] = 0x0518,
- [PIR] = 0x0520,
- [PSR] = 0x0528,
- [PIPR] = 0x052c,
- [RFLR] = 0x0508,
- [APR] = 0x0554,
- [MPR] = 0x0558,
- [PFTCR] = 0x055c,
- [PFRCR] = 0x0560,
- [TPAUSER] = 0x0564,
- [GECMR] = 0x05b0,
- [BCULR] = 0x05b4,
- [MAHR] = 0x05c0,
- [MALR] = 0x05c8,
- [TROCR] = 0x0700,
- [CDCR] = 0x0708,
- [LCCR] = 0x0710,
- [CEFCR] = 0x0740,
- [FRECR] = 0x0748,
- [TSFRCR] = 0x0750,
- [TLFRCR] = 0x0758,
- [RFCR] = 0x0760,
- [CERCR] = 0x0768,
- [CEECR] = 0x0770,
- [MAFCR] = 0x0778,
- [RMII_MII] = 0x0790,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWEN0] = 0x0010,
- [TSU_FWEN1] = 0x0014,
- [TSU_FCM] = 0x0018,
- [TSU_BSYSL0] = 0x0020,
- [TSU_BSYSL1] = 0x0024,
- [TSU_PRISL0] = 0x0028,
- [TSU_PRISL1] = 0x002c,
- [TSU_FWSL0] = 0x0030,
- [TSU_FWSL1] = 0x0034,
- [TSU_FWSLC] = 0x0038,
- [TSU_QTAG0] = 0x0040,
- [TSU_QTAG1] = 0x0044,
- [TSU_FWSR] = 0x0050,
- [TSU_FWINMK] = 0x0054,
- [TSU_ADQT0] = 0x0048,
- [TSU_ADQT1] = 0x004c,
- [TSU_VTAG0] = 0x0058,
- [TSU_VTAG1] = 0x005c,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
- [TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRH31] = 0x01f8,
- [TSU_ADRL31] = 0x01fc,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008c,
- [FWNLCR0] = 0x0090,
- [FWALCR0] = 0x0094,
- [TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
- [RXNLCR1] = 0x00a8,
- [RXALCR1] = 0x00ac,
- [FWNLCR1] = 0x00b0,
- [FWALCR1] = 0x00b4,
-};
-
-static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
- [ECMR] = 0x0100,
- [RFLR] = 0x0108,
- [ECSR] = 0x0110,
- [ECSIPR] = 0x0118,
- [PIR] = 0x0120,
- [PSR] = 0x0128,
- [RDMLR] = 0x0140,
- [IPGR] = 0x0150,
- [APR] = 0x0154,
- [MPR] = 0x0158,
- [TPAUSER] = 0x0164,
- [RFCF] = 0x0160,
- [TPAUSECR] = 0x0168,
- [BCFRR] = 0x016c,
- [MAHR] = 0x01c0,
- [MALR] = 0x01c8,
- [TROCR] = 0x01d0,
- [CDCR] = 0x01d4,
- [LCCR] = 0x01d8,
- [CNDCR] = 0x01dc,
- [CEFCR] = 0x01e4,
- [FRECR] = 0x01e8,
- [TSFRCR] = 0x01ec,
- [TLFRCR] = 0x01f0,
- [RFCR] = 0x01f4,
- [MAFCR] = 0x01f8,
- [RTRATE] = 0x01fc,
-
- [EDMR] = 0x0000,
- [EDTRR] = 0x0008,
- [EDRRR] = 0x0010,
- [TDLAR] = 0x0018,
- [RDLAR] = 0x0020,
- [EESR] = 0x0028,
- [EESIPR] = 0x0030,
- [TRSCER] = 0x0038,
- [RMFCR] = 0x0040,
- [TFTR] = 0x0048,
- [FDR] = 0x0050,
- [RMCR] = 0x0058,
- [TFUCR] = 0x0064,
- [RFOCR] = 0x0068,
- [FCFTR] = 0x0070,
- [RPADIR] = 0x0078,
- [TRIMD] = 0x007c,
- [RBWAR] = 0x00c8,
- [RDFAR] = 0x00cc,
- [TBRAR] = 0x00d4,
- [TDFAR] = 0x00d8,
-};
-
-static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
- [ECMR] = 0x0160,
- [ECSR] = 0x0164,
- [ECSIPR] = 0x0168,
- [PIR] = 0x016c,
- [MAHR] = 0x0170,
- [MALR] = 0x0174,
- [RFLR] = 0x0178,
- [PSR] = 0x017c,
- [TROCR] = 0x0180,
- [CDCR] = 0x0184,
- [LCCR] = 0x0188,
- [CNDCR] = 0x018c,
- [CEFCR] = 0x0194,
- [FRECR] = 0x0198,
- [TSFRCR] = 0x019c,
- [TLFRCR] = 0x01a0,
- [RFCR] = 0x01a4,
- [MAFCR] = 0x01a8,
- [IPGR] = 0x01b4,
- [APR] = 0x01b8,
- [MPR] = 0x01bc,
- [TPAUSER] = 0x01c4,
- [BCFR] = 0x01cc,
-
- [ARSTR] = 0x0000,
- [TSU_CTRST] = 0x0004,
- [TSU_FWEN0] = 0x0010,
- [TSU_FWEN1] = 0x0014,
- [TSU_FCM] = 0x0018,
- [TSU_BSYSL0] = 0x0020,
- [TSU_BSYSL1] = 0x0024,
- [TSU_PRISL0] = 0x0028,
- [TSU_PRISL1] = 0x002c,
- [TSU_FWSL0] = 0x0030,
- [TSU_FWSL1] = 0x0034,
- [TSU_FWSLC] = 0x0038,
- [TSU_QTAGM0] = 0x0040,
- [TSU_QTAGM1] = 0x0044,
- [TSU_ADQT0] = 0x0048,
- [TSU_ADQT1] = 0x004c,
- [TSU_FWSR] = 0x0050,
- [TSU_FWINMK] = 0x0054,
- [TSU_ADSBSY] = 0x0060,
- [TSU_TEN] = 0x0064,
- [TSU_POST1] = 0x0070,
- [TSU_POST2] = 0x0074,
- [TSU_POST3] = 0x0078,
- [TSU_POST4] = 0x007c,
-
- [TXNLCR0] = 0x0080,
- [TXALCR0] = 0x0084,
- [RXNLCR0] = 0x0088,
- [RXALCR0] = 0x008c,
- [FWNLCR0] = 0x0090,
- [FWALCR0] = 0x0094,
- [TXNLCR1] = 0x00a0,
- [TXALCR1] = 0x00a0,
- [RXNLCR1] = 0x00a8,
- [RXALCR1] = 0x00ac,
- [FWNLCR1] = 0x00b0,
- [FWALCR1] = 0x00b4,
-
- [TSU_ADRH0] = 0x0100,
- [TSU_ADRL0] = 0x0104,
- [TSU_ADRL31] = 0x01fc,
-
-};
-
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
#define SH4_SKB_RX_ALIGN 32
@@ -705,7 +486,6 @@ struct sh_eth_private {
const u16 *reg_offset;
void __iomem *addr;
void __iomem *tsu_addr;
- struct bb_info *bitbang;
u32 num_rx_ring;
u32 num_tx_ring;
dma_addr_t rx_desc_dma;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 21683e2b1ff..b6739afeaca 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -998,6 +998,7 @@ static int s6gmac_probe(struct platform_device *pdev)
mb = mdiobus_alloc();
if (!mb) {
printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
+ res = -ENOMEM;
goto errmii;
}
mb->name = "s6gmac_mii";
@@ -1053,20 +1054,7 @@ static struct platform_driver s6gmac_driver = {
},
};
-static int __init s6gmac_init(void)
-{
- printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
- return platform_driver_register(&s6gmac_driver);
-}
-
-
-static void __exit s6gmac_exit(void)
-{
- platform_driver_unregister(&s6gmac_driver);
-}
-
-module_init(s6gmac_init);
-module_exit(s6gmac_exit);
+module_platform_driver(s6gmac_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 3aca57853ed..bdac936a68b 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -651,8 +651,11 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
received ++;
- } else
- goto dropping;
+ } else {
+ ether3_outw(next_ptr >> 8, REG_RECVEND);
+ dev->stats.rx_dropped++;
+ goto done;
+ }
} else {
struct net_device_stats *stats = &dev->stats;
ether3_outw(next_ptr >> 8, REG_RECVEND);
@@ -679,21 +682,6 @@ done:
}
return maxcnt;
-
-dropping:{
- static unsigned long last_warned;
-
- ether3_outw(next_ptr >> 8, REG_RECVEND);
- /*
- * Don't print this message too many times...
- */
- if (time_after(jiffies, last_warned + 10 * HZ)) {
- last_warned = jiffies;
- printk("%s: memory squeeze, dropping packet.\n", dev->name);
- }
- dev->stats.rx_dropped++;
- goto done;
- }
}
/*
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 0fde9ca2826..0ad5694b41f 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -381,8 +381,6 @@ memory_squeeze:
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
} else {
- printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
- dev->name);
dev->stats.rx_dropped++;
}
} else {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0bc00991d31..01b99206139 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -22,6 +22,7 @@
#include <linux/topology.h>
#include <linux/gfp.h>
#include <linux/cpu_rmap.h>
+#include <linux/aer.h>
#include "net_driver.h"
#include "efx.h"
#include "nic.h"
@@ -71,21 +72,21 @@ const char *const efx_loopback_mode_names[] = {
const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *const efx_reset_type_names[] = {
- [RESET_TYPE_INVISIBLE] = "INVISIBLE",
- [RESET_TYPE_ALL] = "ALL",
- [RESET_TYPE_WORLD] = "WORLD",
- [RESET_TYPE_DISABLE] = "DISABLE",
- [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
- [RESET_TYPE_INT_ERROR] = "INT_ERROR",
- [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
- [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
- [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
- [RESET_TYPE_TX_SKIP] = "TX_SKIP",
- [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
+ [RESET_TYPE_INVISIBLE] = "INVISIBLE",
+ [RESET_TYPE_ALL] = "ALL",
+ [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
+ [RESET_TYPE_WORLD] = "WORLD",
+ [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+ [RESET_TYPE_DISABLE] = "DISABLE",
+ [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
+ [RESET_TYPE_INT_ERROR] = "INT_ERROR",
+ [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
+ [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
+ [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_TX_SKIP] = "TX_SKIP",
+ [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
-#define EFX_MAX_MTU (9 * 1024)
-
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -117,9 +118,12 @@ MODULE_PARM_DESC(separate_tx_channels,
static int napi_weight = 64;
/* This is the time (in jiffies) between invocations of the hardware
- * monitor. On Falcon-based NICs, this will:
+ * monitor.
+ * On Falcon-based NICs, this will:
* - Check the on-board hardware monitor;
* - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
*/
static unsigned int efx_monitor_interval = 1 * HZ;
@@ -203,13 +207,14 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \
if ((efx->state == STATE_READY) || \
+ (efx->state == STATE_RECOVERY) || \
(efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \
} while (0)
static int efx_check_disabled(struct efx_nic *efx)
{
- if (efx->state == STATE_DISABLED) {
+ if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
@@ -242,15 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
struct efx_rx_queue *rx_queue =
efx_channel_get_rx_queue(channel);
- /* Deliver last RX packet. */
- if (channel->rx_pkt) {
- __efx_rx_packet(channel, channel->rx_pkt);
- channel->rx_pkt = NULL;
- }
- if (rx_queue->enabled) {
- efx_rx_strategy(channel);
+ efx_rx_flush_packet(channel);
+ if (rx_queue->enabled)
efx_fast_push_rx_descriptors(rx_queue);
- }
}
return spent;
@@ -625,20 +624,51 @@ fail:
*/
static void efx_start_datapath(struct efx_nic *efx)
{
+ bool old_rx_scatter = efx->rx_scatter;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
struct efx_channel *channel;
+ size_t rx_buf_len;
/* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
- efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
- efx->type->rx_buffer_hash_size +
- efx->type->rx_buffer_padding);
- efx->rx_buffer_order = get_order(efx->rx_buffer_len +
- sizeof(struct efx_rx_page_state));
+ efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+ efx->type->rx_buffer_padding);
+ rx_buf_len = (sizeof(struct efx_rx_page_state) +
+ EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
+ if (rx_buf_len <= PAGE_SIZE) {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = 0;
+ } else if (efx->type->can_rx_scatter) {
+ BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+ EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
+ PAGE_SIZE / 2);
+ efx->rx_scatter = true;
+ efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+ efx->rx_buffer_order = 0;
+ } else {
+ efx->rx_scatter = false;
+ efx->rx_buffer_order = get_order(rx_buf_len);
+ }
+
+ efx_rx_config_page_split(efx);
+ if (efx->rx_buffer_order)
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u; page order=%u batch=%u\n",
+ efx->rx_dma_len, efx->rx_buffer_order,
+ efx->rx_pages_per_batch);
+ else
+ netif_dbg(efx, drv, efx->net_dev,
+ "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+ efx->rx_dma_len, efx->rx_page_buf_step,
+ efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+ /* RX filters also have scatter-enabled flags */
+ if (efx->rx_scatter != old_rx_scatter)
+ efx_filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
@@ -655,16 +685,12 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue);
- /* The rx buffer allocation strategy is MTU dependent */
- efx_rx_strategy(channel);
-
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
efx_nic_generate_fill_event(rx_queue);
}
- WARN_ON(channel->rx_pkt != NULL);
- efx_rx_strategy(channel);
+ WARN_ON(channel->rx_pkt_n_frags);
}
if (netif_device_present(efx->net_dev))
@@ -683,7 +709,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
BUG_ON(efx->port_enabled);
/* Only perform flush if dma is enabled */
- if (dev->is_busmaster) {
+ if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
rc = efx_nic_flush_queues(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
@@ -1596,13 +1622,15 @@ static void efx_start_all(struct efx_nic *efx)
efx_start_port(efx);
efx_start_datapath(efx);
- /* Start the hardware monitor if there is one. Otherwise (we're link
- * event driven), we have to poll the PHY because after an event queue
- * flush, we could have a missed a link state change */
- if (efx->type->monitor != NULL) {
+ /* Start the hardware monitor if there is one */
+ if (efx->type->monitor != NULL)
queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval);
- } else {
+
+ /* If link state detection is normally event-driven, we have
+ * to poll now because we could have missed a change
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx))
efx_link_status_changed(efx);
@@ -2309,7 +2337,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
out:
/* Leave device stopped if necessary */
- disabled = rc || method == RESET_TYPE_DISABLE;
+ disabled = rc ||
+ method == RESET_TYPE_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_DISABLE;
rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) {
disabled = true;
@@ -2328,13 +2358,48 @@ out:
return rc;
}
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+static int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+ /* A PCI error can occur and not be seen by EEH because nothing
+ * happens on the PCI bus. In this case the driver may fail and
+ * schedule a 'recover or reset', leading to this recovery handler.
+ * Manually call the eeh failure check function.
+ */
+ struct eeh_dev *eehdev =
+ of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
+
+ if (eeh_dev_check_failure(eehdev)) {
+ /* The EEH mechanisms will handle the error and reset the
+ * device if necessary.
+ */
+ return 1;
+ }
+#endif
+ return 0;
+}
+
/* The worker thread exists so that code that cannot sleep can
* schedule a reset for later.
*/
static void efx_reset_work(struct work_struct *data)
{
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
- unsigned long pending = ACCESS_ONCE(efx->reset_pending);
+ unsigned long pending;
+ enum reset_type method;
+
+ pending = ACCESS_ONCE(efx->reset_pending);
+ method = fls(pending) - 1;
+
+ if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+ method == RESET_TYPE_RECOVER_OR_ALL) &&
+ efx_try_recovery(efx))
+ return;
if (!pending)
return;
@@ -2346,7 +2411,7 @@ static void efx_reset_work(struct work_struct *data)
* it cannot change again.
*/
if (efx->state == STATE_READY)
- (void)efx_reset(efx, fls(pending) - 1);
+ (void)efx_reset(efx, method);
rtnl_unlock();
}
@@ -2355,11 +2420,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{
enum reset_type method;
+ if (efx->state == STATE_RECOVERY) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "recovering: skip scheduling %s reset\n",
+ RESET_TYPE(type));
+ return;
+ }
+
switch (type) {
case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL:
+ case RESET_TYPE_RECOVER_OR_ALL:
case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE:
+ case RESET_TYPE_RECOVER_OR_DISABLE:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
@@ -2569,6 +2643,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_fini_struct(efx);
pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev);
+
+ pci_disable_pcie_error_reporting(pci_dev);
};
/* NIC VPD information
@@ -2741,6 +2817,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc);
+ rc = pci_enable_pcie_error_reporting(pci_dev);
+ if (rc && rc != -EINVAL)
+ netif_warn(efx, probe, efx->net_dev,
+ "pci_enable_pcie_error_reporting failed (%d)\n", rc);
+
return 0;
fail4:
@@ -2865,12 +2946,112 @@ static const struct dev_pm_ops efx_pm_ops = {
.restore = efx_pm_resume,
};
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
+ enum pci_channel_state state)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rtnl_lock();
+
+ if (efx->state != STATE_DISABLED) {
+ efx->state = STATE_RECOVERY;
+ efx->reset_pending = 0;
+
+ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_stop_interrupts(efx, false);
+
+ status = PCI_ERS_RESULT_NEED_RESET;
+ } else {
+ /* If the interface is disabled we don't want to do anything
+ * with it.
+ */
+ status = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ rtnl_unlock();
+
+ pci_disable_device(pdev);
+
+ return status;
+}
+
+/* Fake a successfull reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+ int rc;
+
+ if (pci_enable_device(pdev)) {
+ netif_err(efx, hw, efx->net_dev,
+ "Cannot re-enable PCI device after reset.\n");
+ status = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ rc = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
+ /* Non-fatal error. Continue. */
+ }
+
+ return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+ struct efx_nic *efx = pci_get_drvdata(pdev);
+ int rc;
+
+ rtnl_lock();
+
+ if (efx->state == STATE_DISABLED)
+ goto out;
+
+ rc = efx_reset(efx, RESET_TYPE_ALL);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev,
+ "efx_reset failed after PCI error (%d)\n", rc);
+ } else {
+ efx->state = STATE_READY;
+ netif_dbg(efx, hw, efx->net_dev,
+ "Done resetting and resuming IO after PCI error.\n");
+ }
+
+out:
+ rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+static struct pci_error_handlers efx_err_handlers = {
+ .error_detected = efx_io_error_detected,
+ .slot_reset = efx_io_slot_reset,
+ .resume = efx_io_resume,
+};
+
static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = efx_pci_table,
.probe = efx_pci_probe,
.remove = efx_pci_remove,
.driver.pm = &efx_pm_ops,
+ .err_handler = &efx_err_handlers,
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index d2f790df6dc..8372da239b4 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */
+extern void efx_rx_config_page_split(struct efx_nic *efx);
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+extern void __efx_rx_packet(struct efx_channel *channel);
+extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
+ unsigned int index, unsigned int n_frags,
unsigned int len, u16 flags);
+static inline void efx_rx_flush_packet(struct efx_channel *channel)
+{
+ if (channel->rx_pkt_n_frags)
+ __efx_rx_packet(channel);
+}
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx);
+extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
extern s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace);
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 182dbe2cc6e..ab8fb5889e5 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -137,8 +137,12 @@ enum efx_loopback_mode {
* Reset methods are numbered in order of increasing scope.
*
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
+ * if unsuccessful.
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
+ * unsuccessful.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,9 +154,11 @@ enum efx_loopback_mode {
*/
enum reset_type {
RESET_TYPE_INVISIBLE = 0,
- RESET_TYPE_ALL = 1,
- RESET_TYPE_WORLD = 2,
- RESET_TYPE_DISABLE = 3,
+ RESET_TYPE_RECOVER_OR_ALL = 1,
+ RESET_TYPE_ALL = 2,
+ RESET_TYPE_WORLD = 3,
+ RESET_TYPE_RECOVER_OR_DISABLE = 4,
+ RESET_TYPE_DISABLE = 5,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8e61cd06f66..6e768175e7e 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
};
/* Number of ethtool statistics */
@@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
rule->m_ext.data[1]))
return -EINVAL;
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0,
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
0xfff : rule->ring_cookie);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 49bcd196e10..4486102fa9b 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
static void falcon_init_rx_cfg(struct efx_nic *efx)
{
- /* Prior to Siena the RX DMA engine will split each frame at
- * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
- * be so large that that never happens. */
- const unsigned huge_buf_size = (3 * 4096) >> 5;
/* RX control FIFO thresholds (32 entries) */
const unsigned ctrl_xon_thr = 20;
const unsigned ctrl_xoff_thr = 25;
@@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_RX_CFG);
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
- /* Data FIFO size is 5.5K */
+ /* Data FIFO size is 5.5K. The RX DMA engine only
+ * supports scattering for user-mode queues, but will
+ * split DMA writes at intervals of RX_USR_BUF_SIZE
+ * (32-byte units) even for kernel-mode queues. We
+ * set it to be so large that that never happens.
+ */
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
- huge_buf_size);
+ (3 * 4096) >> 5);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
@@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
/* Data FIFO size is 80K; register fields moved */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
- huge_buf_size);
+ EFX_RX_USR_BUF_SIZE >> 5);
/* Send XON and XOFF at ~3 * max MTU away from empty/full */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
@@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_padding = 0x24,
+ .can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
@@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
+ .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 8af42cd1fed..2397f0e8d3e 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -66,6 +66,10 @@ struct efx_filter_state {
#endif
};
+static void efx_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_filter_table *table,
+ unsigned int filter_idx);
+
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */
static u16 efx_filter_hash(u32 key)
@@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
}
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
struct efx_filter_spec *spec = &table->spec[filter_idx];
+ enum efx_filter_flags flags = 0;
+
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ if (efx->n_rx_channels > 1)
+ flags |= EFX_FILTER_FLAG_RX_RSS;
- efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
- EFX_FILTER_FLAG_RX_RSS, 0);
+ if (efx->rx_scatter)
+ flags |= EFX_FILTER_FLAG_RX_SCATTER;
+
+ efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
spec->type = EFX_FILTER_UC_DEF + filter_idx;
table->used_bitmap[0] |= 1 << filter_idx;
}
@@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
break;
}
- case EFX_FILTER_TABLE_RX_DEF:
- /* One filter spec per type */
- BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
- BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
- EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- return spec->type - EFX_FILTER_UC_DEF;
-
case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_7(
@@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
return true;
}
-static int efx_filter_search(struct efx_filter_table *table,
- struct efx_filter_spec *spec, u32 key,
- bool for_insert, unsigned int *depth_required)
-{
- unsigned hash, incr, filter_idx, depth, depth_max;
-
- hash = efx_filter_hash(key);
- incr = efx_filter_increment(key);
-
- filter_idx = hash & (table->size - 1);
- depth = 1;
- depth_max = (for_insert ?
- (spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
- table->search_depth[spec->type]);
-
- for (;;) {
- /* Return success if entry is used and matches this spec
- * or entry is unused and we are trying to insert.
- */
- if (test_bit(filter_idx, table->used_bitmap) ?
- efx_filter_equal(spec, &table->spec[filter_idx]) :
- for_insert) {
- *depth_required = depth;
- return filter_idx;
- }
-
- /* Return failure if we reached the maximum search depth */
- if (depth == depth_max)
- return for_insert ? -EBUSY : -ENOENT;
-
- filter_idx = (filter_idx + incr) & (table->size - 1);
- ++depth;
- }
-}
-
/*
* Construct/deconstruct external filter IDs. At least the RX filter
* IDs must be ordered by matching priority, for RX NFC semantics.
@@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
* efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter
* @spec: Specification for the filter
- * @replace: Flag for whether the specified filter may replace a filter
- * with an identical match expression and equal or lower priority
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
*
* On success, return the filter ID.
* On failure, return a negative error code.
+ *
+ * If an existing filter has equal match values to the new filter
+ * spec, then the new filter might replace it, depending on the
+ * relative priorities. If the existing filter has lower priority, or
+ * if @replace_equal is set and it has equal priority, then it is
+ * replaced. Otherwise the function fails, returning -%EPERM if
+ * the existing filter has higher priority or -%EEXIST if it has
+ * equal priority.
*/
s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace)
+ bool replace_equal)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- unsigned int filter_idx, depth = 0;
- u32 key;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
int rc;
if (!table || table->size == 0)
return -EINVAL;
- key = efx_filter_build(&filter, spec);
-
netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type,
table->search_depth[spec->type]);
- spin_lock_bh(&state->lock);
+ if (table->id == EFX_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
+ EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
+ rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
+ ins_index = rep_index;
- rc = efx_filter_search(table, spec, key, true, &depth);
- if (rc < 0)
- goto out;
- filter_idx = rc;
- BUG_ON(filter_idx >= table->size);
- saved_spec = &table->spec[filter_idx];
-
- if (test_bit(filter_idx, table->used_bitmap)) {
- /* Should we replace the existing filter? */
- if (!replace) {
+ spin_lock_bh(&state->lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_filter_build(&filter, spec);
+ unsigned int hash = efx_filter_hash(key);
+ unsigned int incr = efx_filter_increment(key);
+ unsigned int max_rep_depth = table->search_depth[spec->type];
+ unsigned int max_ins_depth =
+ spec->priority <= EFX_FILTER_PRI_HINT ?
+ FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&state->lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_filter_equal(spec, &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_filter_spec *saved_spec = &table->spec[rep_index];
+
+ if (spec->priority == saved_spec->priority && !replace_equal) {
rc = -EEXIST;
goto out;
}
@@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
rc = -EPERM;
goto out;
}
- } else {
- __set_bit(filter_idx, table->used_bitmap);
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
++table->used;
}
- *saved_spec = *spec;
+ table->spec[ins_index] = *spec;
if (table->id == EFX_FILTER_TABLE_RX_DEF) {
efx_filter_push_rx_config(efx);
@@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
}
efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_filter_table_clear_entry(efx, table, rep_index);
}
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
- __func__, spec->type, filter_idx, spec->dmaq_id);
- rc = efx_filter_make_id(spec, filter_idx);
+ __func__, spec->type, ins_index, spec->dmaq_id);
+ rc = efx_filter_make_id(spec, ins_index);
out:
spin_unlock_bh(&state->lock);
@@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx)
kfree(state);
}
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_filter_push_rx_config() */
+ continue;
+
+ efx_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&state->lock);
+}
+
#ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9d426d0457b..c5c9747861b 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -553,6 +553,7 @@
#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0a90abd2421..9bd433a095c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -69,6 +69,12 @@
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+/* Maximum possible MTU the driver supports */
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
+#define EFX_RX_USR_BUF_SIZE 1824
+
/* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data;
@@ -206,25 +212,23 @@ struct efx_tx_queue {
/**
* struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer
- * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE).
- * Will be %NULL if the buffer slot is currently free.
- * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
+ * @page: The associated page buffer.
* Will be %NULL if the buffer slot is currently free.
- * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
- * @len: Buffer length, in bytes.
- * @flags: Flags for buffer and packet state.
+ * @page_offset: If pending: offset in @page of DMA base address.
+ * If completed: offset in @page of Ethernet header.
+ * @len: If pending: length for DMA descriptor.
+ * If completed: received length, excluding hash prefix.
+ * @flags: Flags for buffer and packet state. These are only set on the
+ * first buffer of a scattered packet.
*/
struct efx_rx_buffer {
dma_addr_t dma_addr;
- union {
- struct sk_buff *skb;
- struct page *page;
- } u;
+ struct page *page;
u16 page_offset;
u16 len;
u16 flags;
};
-#define EFX_RX_BUF_PAGE 0x0001
+#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
@@ -260,14 +264,23 @@ struct efx_rx_page_state {
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
+ * @scatter_n: Number of buffers used by current packet
+ * @page_ring: The ring to store DMA mapped pages for reuse.
+ * @page_add: Counter to calculate the write pointer for the recycle ring.
+ * @page_remove: Counter to calculate the read pointer for the recycle ring.
+ * @page_recycle_count: The number of pages that have been recycled.
+ * @page_recycle_failed: The number of pages that couldn't be recycled because
+ * the kernel still held a reference to them.
+ * @page_recycle_full: The number of pages that were released because the
+ * recycle ring was full.
+ * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
* @max_fill: RX descriptor maximum fill level (<= ring size)
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
* (<= @max_fill)
* @min_fill: RX descriptor minimum non-zero fill level.
* This records the minimum fill level observed when a ring
* refill was triggered.
- * @alloc_page_count: RX allocation strategy counter.
- * @alloc_skb_count: RX allocation strategy counter.
+ * @recycle_count: RX buffer recycle counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event().
*/
struct efx_rx_queue {
@@ -279,15 +292,22 @@ struct efx_rx_queue {
bool enabled;
bool flush_pending;
- int added_count;
- int notified_count;
- int removed_count;
+ unsigned int added_count;
+ unsigned int notified_count;
+ unsigned int removed_count;
+ unsigned int scatter_n;
+ struct page **page_ring;
+ unsigned int page_add;
+ unsigned int page_remove;
+ unsigned int page_recycle_count;
+ unsigned int page_recycle_failed;
+ unsigned int page_recycle_full;
+ unsigned int page_ptr_mask;
unsigned int max_fill;
unsigned int fast_fill_trigger;
unsigned int min_fill;
unsigned int min_overfill;
- unsigned int alloc_page_count;
- unsigned int alloc_skb_count;
+ unsigned int recycle_count;
struct timer_list slow_fill;
unsigned int slow_fill_count;
};
@@ -336,10 +356,6 @@ enum efx_rx_alloc_method {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
- * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
- * and diagnostic counters
- * @rx_alloc_push_pages: RX allocation method currently in use for pushing
- * descriptors
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -347,6 +363,12 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
+ * lack of descriptors
+ * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
+ * __efx_rx_packet(), or zero if there is none
+ * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
+ * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
*/
@@ -371,9 +393,6 @@ struct efx_channel {
unsigned int rfs_filters_added;
#endif
- int rx_alloc_level;
- int rx_alloc_push_pages;
-
unsigned n_rx_tobe_disc;
unsigned n_rx_ip_hdr_chksum_err;
unsigned n_rx_tcp_udp_chksum_err;
@@ -381,11 +400,10 @@ struct efx_channel {
unsigned n_rx_frm_trunc;
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
+ unsigned int n_rx_nodesc_trunc;
- /* Used to pipeline received packets in order to optimise memory
- * access with prefetches.
- */
- struct efx_rx_buffer *rx_pkt;
+ unsigned int rx_pkt_n_frags;
+ unsigned int rx_pkt_index;
struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
@@ -410,7 +428,7 @@ struct efx_channel_type {
void (*post_remove)(struct efx_channel *);
void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *);
- void (*receive_skb)(struct efx_channel *, struct sk_buff *);
+ bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
bool keep_eventq;
};
@@ -446,6 +464,7 @@ enum nic_state {
STATE_UNINIT = 0, /* device being probed/removed or is frozen */
STATE_READY = 1, /* hardware ready and netdev registered */
STATE_DISABLED = 2, /* device disabled due to hardware errors */
+ STATE_RECOVERY = 3, /* device recovering from PCI error */
};
/*
@@ -684,10 +703,13 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
- * @rx_buffer_len: RX buffer length
+ * @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+ * for use in sk_buff::truesize
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
+ * @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer
@@ -800,10 +822,15 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
- unsigned int rx_buffer_len;
+ unsigned int rx_dma_len;
unsigned int rx_buffer_order;
+ unsigned int rx_buffer_truesize;
+ unsigned int rx_page_buf_step;
+ unsigned int rx_bufs_per_page;
+ unsigned int rx_pages_per_batch;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
+ bool rx_scatter;
unsigned int_error_count;
unsigned long int_error_expire;
@@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_hash_size: Size of hash at start of RX buffer
- * @rx_buffer_padding: Size of padding at end of RX buffer
+ * @rx_buffer_hash_size: Size of hash at start of RX packet
+ * @rx_buffer_padding: Size of padding at end of RX packet
+ * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed
@@ -983,6 +1011,7 @@ struct efx_nic_type {
u64 max_dma_mask;
unsigned int rx_buffer_hash_size;
unsigned int rx_buffer_padding;
+ bool can_rx_scatter;
unsigned int max_interrupt_mode;
unsigned int phys_addr_channels;
unsigned int timer_period_max;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index eaa8e874a3c..b0503cd8c2a 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len)
{
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, GFP_ATOMIC);
+ &buffer->dma_addr,
+ GFP_ATOMIC | __GFP_ZERO);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
- memset(buffer->addr, 0, len);
return 0;
}
@@ -592,12 +592,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+ rx_queue->scatter_n = 0;
+
/* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd);
@@ -614,8 +624,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
- /* For >=B0 this is scatter so disable */
- FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
@@ -969,13 +978,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
EFX_RX_PKT_DISCARD : 0;
}
-/* Handle receive events that are not in-order. */
-static void
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
expected = rx_queue->removed_count & rx_queue->ptr_mask;
dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
@@ -984,6 +1004,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
}
/* Handle a packet received event
@@ -999,7 +1020,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
- bool rx_ev_pkt_ok;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
@@ -1007,21 +1028,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
- /* Basic packet information */
- rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
- rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
- if (unlikely(rx_ev_desc_ptr != expected_ptr))
- efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK and packet type is TCP/IP or
@@ -1049,7 +1105,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
channel->irq_mod_score += 2;
/* Handle received packet */
- efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags);
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
}
/* If this flush done event corresponds to a &struct efx_tx_queue, then
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3f93624fc27..07f6baa15c0 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -99,6 +99,9 @@
#define PTP_V2_VERSION_LENGTH 1
#define PTP_V2_VERSION_OFFSET 29
+#define PTP_V2_UUID_LENGTH 8
+#define PTP_V2_UUID_OFFSET 48
+
/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
* the MC only captures the last six bytes of the clock identity. These values
* reflect those, not the ones used in the standard. The standard permits
@@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
unsigned number_readings = (response_length /
MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
unsigned i;
- unsigned min;
- unsigned min_set = 0;
unsigned total;
unsigned ngood = 0;
unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data;
- bool min_valid = false;
u32 last_sec;
u32 start_sec;
struct timespec delta;
@@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (number_readings == 0)
return -EAGAIN;
- /* Find minimum value in this set of results, discarding clearly
- * erroneous results.
+ /* Read the set of results and increment stats for any results that
+ * appera to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
- if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
- if (min_valid) {
- if (ptp->timeset[i].window < min_set)
- min_set = ptp->timeset[i].window;
- } else {
- min_valid = true;
- min_set = ptp->timeset[i].window;
- }
- }
- }
-
- if (min_valid) {
- if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
- min = ptp->base_sync_ns;
- else
- min = min_set;
- } else {
- min = SYNCHRONISATION_GRANULARITY_NS;
}
- /* Discard excessively long synchronise durations. The MC times
- * when it finishes reading the host time so the corrected window
- * time should be fairly constant for a given platform.
+ /* Find the last good host-MC synchronization result. The MC times
+ * when it finishes reading the host time so the corrected window time
+ * should be fairly constant for a given platform.
*/
total = 0;
for (i = 0; i < number_readings; i++)
@@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev,
- "PTP no suitable synchronisations %dns %dns\n",
- ptp->base_sync_ns, min_set);
+ "PTP no suitable synchronisations %dns\n",
+ ptp->base_sync_ns);
return -EAGAIN;
}
@@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
* the receive timestamp from the MC - this will probably occur after the
* packet arrival because of the processing in the MC.
*/
-static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
{
struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
- u8 *data;
+ u8 *match_data_012, *match_data_345;
unsigned int version;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
/* Correct version? */
if (ptp->mode == MC_CMD_PTP_MODE_V1) {
- if (skb->len < PTP_V1_MIN_LENGTH) {
- netif_receive_skb(skb);
- return;
+ if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
+ return false;
}
version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
- netif_receive_skb(skb);
- return;
+ return false;
}
+
+ /* PTP V1 uses all six bytes of the UUID to match the packet
+ * to the timestamp
+ */
+ match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
+ match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
} else {
- if (skb->len < PTP_V2_MIN_LENGTH) {
- netif_receive_skb(skb);
- return;
+ if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
+ return false;
}
version = skb->data[PTP_V2_VERSION_OFFSET];
-
- BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
- BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
- BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
- BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
- BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
-
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
- netif_receive_skb(skb);
- return;
+ return false;
+ }
+
+ /* The original V2 implementation uses bytes 2-7 of
+ * the UUID to match the packet to the timestamp. This
+ * discards two of the bytes of the MAC address used
+ * to create the UUID (SF bug 33070). The PTP V2
+ * enhanced mode fixes this issue and uses bytes 0-2
+ * and byte 5-7 of the UUID.
+ */
+ match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+ if (ptp->mode == MC_CMD_PTP_MODE_V2) {
+ match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+ } else {
+ match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+ BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
@@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
timestamps = skb_hwtstamps(skb);
memset(timestamps, 0, sizeof(*timestamps));
+ /* We expect the sequence number to be in the same position in
+ * the packet for PTP V1 and V2
+ */
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+ BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
/* Extract UUID/Sequence information */
- data = skb->data + PTP_V1_UUID_OFFSET;
- match->words[0] = (data[0] |
- (data[1] << 8) |
- (data[2] << 16) |
- (data[3] << 24));
- match->words[1] = (data[4] |
- (data[5] << 8) |
+ match->words[0] = (match_data_012[0] |
+ (match_data_012[1] << 8) |
+ (match_data_012[2] << 16) |
+ (match_data_345[0] << 24));
+ match->words[1] = (match_data_345[1] |
+ (match_data_345[2] << 8) |
(skb->data[PTP_V1_SEQUENCE_OFFSET +
PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
@@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
skb_queue_tail(&ptp->rxq, skb);
queue_work(ptp->workwq, &ptp->work);
+
+ return true;
}
/* Transmit a PTP packet. This has to be transmitted by the MC
@@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
* timestamped
*/
init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
- new_mode = MC_CMD_PTP_MODE_V2;
+ new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
enable_wanted = true;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
if (init->tx_type != HWTSTAMP_TX_OFF)
enable_wanted = true;
+ /* Old versions of the firmware do not support the improved
+ * UUID filtering option (SF bug 33070). If the firmware does
+ * not accept the enhanced mode, fall back to the standard PTP
+ * v2 UUID filtering.
+ */
rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
+ if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
+ rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
if (rc != 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index bb579a6128c..e73e30bac10 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -16,6 +16,7 @@
#include <linux/udp.h>
#include <linux/prefetch.h>
#include <linux/moduleparam.h>
+#include <linux/iommu.h>
#include <net/ip.h>
#include <net/checksum.h>
#include "net_driver.h"
@@ -24,85 +25,39 @@
#include "selftest.h"
#include "workarounds.h"
-/* Number of RX descriptors pushed at once. */
-#define EFX_RX_BATCH 8
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
-/* Maximum size of a buffer sharing a page */
-#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+/* Number of RX buffers to recycle pages for. When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
/* Size of buffer allocated for skb header area. */
#define EFX_SKB_HEADERS 64u
-/*
- * rx_alloc_method - RX buffer allocation method
- *
- * This driver supports two methods for allocating and using RX buffers:
- * each RX buffer may be backed by an skb or by an order-n page.
- *
- * When GRO is in use then the second method has a lower overhead,
- * since we don't have to allocate then free skbs on reassembled frames.
- *
- * Values:
- * - RX_ALLOC_METHOD_AUTO = 0
- * - RX_ALLOC_METHOD_SKB = 1
- * - RX_ALLOC_METHOD_PAGE = 2
- *
- * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
- * controlled by the parameters below.
- *
- * - Since pushing and popping descriptors are separated by the rx_queue
- * size, so the watermarks should be ~rxd_size.
- * - The performance win by using page-based allocation for GRO is less
- * than the performance hit of using page-based allocation of non-GRO,
- * so the watermarks should reflect this.
- *
- * Per channel we maintain a single variable, updated by each channel:
- *
- * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
- * RX_ALLOC_FACTOR_SKB)
- * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
- * limits the hysteresis), and update the allocation strategy:
- *
- * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
- * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
- */
-static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
-
-#define RX_ALLOC_LEVEL_GRO 0x2000
-#define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_GRO 1
-#define RX_ALLOC_FACTOR_SKB (-2)
-
/* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring.
*/
static unsigned int rx_refill_threshold;
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+ EFX_RX_USR_BUF_SIZE)
+
/*
* RX maximum head room required.
*
- * This must be at least 1 to prevent overflow and at least 2 to allow
- * pipelined receives.
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
*/
-#define EFX_RXD_HEAD_ROOM 2
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
-/* Offset of ethernet header within page */
-static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
- struct efx_rx_buffer *buf)
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
- return buf->page_offset + efx->type->rx_buffer_hash_size;
-}
-static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
-{
- return PAGE_SIZE << efx->rx_buffer_order;
-}
-
-static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
-{
- if (buf->flags & EFX_RX_BUF_PAGE)
- return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
- else
- return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
+ return page_address(buf->page) + buf->page_offset;
}
static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
#endif
}
-/**
- * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
- *
- * @rx_queue: Efx RX queue
- *
- * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
- * struct efx_rx_buffer for each one. Return a negative error code or 0
- * on success. May fail having only inserted fewer than EFX_RX_BATCH
- * buffers.
- */
-static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+ if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+ return efx_rx_buffer(rx_queue, 0);
+ else
+ return rx_buf + 1;
+}
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int len)
+{
+ dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+ DMA_FROM_DEVICE);
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
+ L1_CACHE_BYTES);
+ efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+ ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+ efx->rx_page_buf_step);
+ efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+ efx->rx_bufs_per_page;
+ efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+ efx->rx_bufs_per_page);
+}
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
- struct net_device *net_dev = efx->net_dev;
- struct efx_rx_buffer *rx_buf;
- struct sk_buff *skb;
- int skb_len = efx->rx_buffer_len;
- unsigned index, count;
+ struct page *page;
+ struct efx_rx_page_state *state;
+ unsigned index;
- for (count = 0; count < EFX_RX_BATCH; ++count) {
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
-
- rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
- if (unlikely(!skb))
- return -ENOMEM;
-
- /* Adjust the SKB for padding */
- skb_reserve(skb, NET_IP_ALIGN);
- rx_buf->len = skb_len - NET_IP_ALIGN;
- rx_buf->flags = 0;
-
- rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
- skb->data, rx_buf->len,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
- rx_buf->dma_addr))) {
- dev_kfree_skb_any(skb);
- rx_buf->u.skb = NULL;
- return -EIO;
- }
+ index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+ page = rx_queue->page_ring[index];
+ if (page == NULL)
+ return NULL;
+
+ rx_queue->page_ring[index] = NULL;
+ /* page_remove cannot exceed page_add. */
+ if (rx_queue->page_remove != rx_queue->page_add)
+ ++rx_queue->page_remove;
- ++rx_queue->added_count;
- ++rx_queue->alloc_skb_count;
+ /* If page_count is 1 then we hold the only reference to this page. */
+ if (page_count(page) == 1) {
+ ++rx_queue->page_recycle_count;
+ return page;
+ } else {
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ ++rx_queue->page_recycle_failed;
}
- return 0;
+ return NULL;
}
/**
- * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
+ * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
*
- * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
- * and populates struct efx_rx_buffers for each one. Return a negative error
- * code or 0 on success. If a single page can be split between two buffers,
- * then the page will either be inserted fully, or not at at all.
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
*/
-static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
@@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
dma_addr_t dma_addr;
unsigned index, count;
- /* We can split a page between two buffers */
- BUILD_BUG_ON(EFX_RX_BATCH & 1);
-
- for (count = 0; count < EFX_RX_BATCH; ++count) {
- page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
- efx->rx_buffer_order);
- if (unlikely(page == NULL))
- return -ENOMEM;
- dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
- efx_rx_buf_size(efx),
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
- __free_pages(page, efx->rx_buffer_order);
- return -EIO;
+ count = 0;
+ do {
+ page = efx_reuse_page(rx_queue);
+ if (page == NULL) {
+ page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+ efx->rx_buffer_order);
+ if (unlikely(page == NULL))
+ return -ENOMEM;
+ dma_addr =
+ dma_map_page(&efx->pci_dev->dev, page, 0,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ dma_addr))) {
+ __free_pages(page, efx->rx_buffer_order);
+ return -EIO;
+ }
+ state = page_address(page);
+ state->dma_addr = dma_addr;
+ } else {
+ state = page_address(page);
+ dma_addr = state->dma_addr;
}
- state = page_address(page);
- state->refcnt = 0;
- state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state);
page_offset = sizeof(struct efx_rx_page_state);
- split:
- index = rx_queue->added_count & rx_queue->ptr_mask;
- rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
- rx_buf->u.page = page;
- rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
- rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
- rx_buf->flags = EFX_RX_BUF_PAGE;
- ++rx_queue->added_count;
- ++rx_queue->alloc_page_count;
- ++state->refcnt;
-
- if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
- /* Use the second half of the page */
+ do {
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->page = page;
+ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_dma_len;
+ rx_buf->flags = 0;
+ ++rx_queue->added_count;
get_page(page);
- dma_addr += (PAGE_SIZE >> 1);
- page_offset += (PAGE_SIZE >> 1);
- ++count;
- goto split;
- }
- }
+ dma_addr += efx->rx_page_buf_step;
+ page_offset += efx->rx_page_buf_step;
+ } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+ rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+ } while (++count < efx->rx_pages_per_batch);
return 0;
}
+/* Unmap a DMA-mapped page. This function is only called for the final RX
+ * buffer in a page.
+ */
static void efx_unmap_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf,
- unsigned int used_len)
+ struct efx_rx_buffer *rx_buf)
{
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
- struct efx_rx_page_state *state;
-
- state = page_address(rx_buf->u.page);
- if (--state->refcnt == 0) {
- dma_unmap_page(&efx->pci_dev->dev,
- state->dma_addr,
- efx_rx_buf_size(efx),
- DMA_FROM_DEVICE);
- } else if (used_len) {
- dma_sync_single_for_cpu(&efx->pci_dev->dev,
- rx_buf->dma_addr, used_len,
- DMA_FROM_DEVICE);
- }
- } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
- rx_buf->len, DMA_FROM_DEVICE);
+ struct page *page = rx_buf->page;
+
+ if (page) {
+ struct efx_rx_page_state *state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev,
+ state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
}
}
-static void efx_free_rx_buffer(struct efx_nic *efx,
- struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
{
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
- __free_pages(rx_buf->u.page, efx->rx_buffer_order);
- rx_buf->u.page = NULL;
- } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- dev_kfree_skb_any(rx_buf->u.skb);
- rx_buf->u.skb = NULL;
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
}
}
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf)
{
- efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
- efx_free_rx_buffer(rx_queue->efx, rx_buf);
-}
+ struct page *page = rx_buf->page;
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned index;
-/* Attempt to resurrect the other receive buffer that used to share this page,
- * which had previously been passed up to the kernel and freed. */
-static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
- struct efx_rx_buffer *rx_buf)
-{
- struct efx_rx_page_state *state = page_address(rx_buf->u.page);
- struct efx_rx_buffer *new_buf;
- unsigned fill_level, index;
-
- /* +1 because efx_rx_packet() incremented removed_count. +1 because
- * we'd like to insert an additional descriptor whilst leaving
- * EFX_RXD_HEAD_ROOM for the non-recycle path */
- fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
- if (unlikely(fill_level > rx_queue->max_fill)) {
- /* We could place "state" on a list, and drain the list in
- * efx_fast_push_rx_descriptors(). For now, this will do. */
+ /* Only recycle the page after processing the final buffer. */
+ if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
return;
- }
- ++state->refcnt;
- get_page(rx_buf->u.page);
+ index = rx_queue->page_add & rx_queue->page_ptr_mask;
+ if (rx_queue->page_ring[index] == NULL) {
+ unsigned read_index = rx_queue->page_remove &
+ rx_queue->page_ptr_mask;
- index = rx_queue->added_count & rx_queue->ptr_mask;
- new_buf = efx_rx_buffer(rx_queue, index);
- new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
- new_buf->u.page = rx_buf->u.page;
- new_buf->len = rx_buf->len;
- new_buf->flags = EFX_RX_BUF_PAGE;
- ++rx_queue->added_count;
+ /* The next slot in the recycle ring is available, but
+ * increment page_remove if the read pointer currently
+ * points here.
+ */
+ if (read_index == index)
+ ++rx_queue->page_remove;
+ rx_queue->page_ring[index] = page;
+ ++rx_queue->page_add;
+ return;
+ }
+ ++rx_queue->page_recycle_full;
+ efx_unmap_rx_buffer(efx, rx_buf);
+ put_page(rx_buf->page);
}
-/* Recycle the given rx buffer directly back into the rx_queue. There is
- * always room to add this buffer, because we've just popped a buffer. */
-static void efx_recycle_rx_buffer(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
{
- struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- struct efx_rx_buffer *new_buf;
- unsigned index;
-
- rx_buf->flags &= EFX_RX_BUF_PAGE;
-
- if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
- efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
- page_count(rx_buf->u.page) == 1)
- efx_resurrect_rx_buffer(rx_queue, rx_buf);
+ /* Release the page reference we hold for the buffer. */
+ if (rx_buf->page)
+ put_page(rx_buf->page);
+
+ /* If this is the last buffer in a page, unmap and free it. */
+ if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+ efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+ efx_free_rx_buffer(rx_buf);
+ }
+ rx_buf->page = NULL;
+}
- index = rx_queue->added_count & rx_queue->ptr_mask;
- new_buf = efx_rx_buffer(rx_queue, index);
+/* Recycle the pages that are used by buffers that have just been received. */
+static void efx_recycle_rx_buffers(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
- memcpy(new_buf, rx_buf, sizeof(*new_buf));
- rx_buf->u.page = NULL;
- ++rx_queue->added_count;
+ do {
+ efx_recycle_rx_page(channel, rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
}
/**
@@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
*/
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- unsigned fill_level;
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int fill_level, batch_size;
int space, rc = 0;
/* Calculate current fill level, and exit if we don't need to fill */
@@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
rx_queue->min_fill = fill_level;
}
+ batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
space = rx_queue->max_fill - fill_level;
- EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
+ EFX_BUG_ON_PARANOID(space < batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from"
- " level %d to level %d using %s allocation\n",
+ " level %d to level %d\n",
efx_rx_queue_index(rx_queue), fill_level,
- rx_queue->max_fill,
- channel->rx_alloc_push_pages ? "page" : "skb");
+ rx_queue->max_fill);
+
do {
- if (channel->rx_alloc_push_pages)
- rc = efx_init_rx_buffers_page(rx_queue);
- else
- rc = efx_init_rx_buffers_skb(rx_queue);
+ rc = efx_init_rx_buffers(rx_queue);
if (unlikely(rc)) {
/* Ensure that we don't leave the rx queue empty */
if (rx_queue->added_count == rx_queue->removed_count)
efx_schedule_slow_fill(rx_queue);
goto out;
}
- } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
+ } while ((space -= batch_size) >= batch_size);
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filled descriptor ring "
@@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context)
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf,
- int len, bool *leak_packet)
+ int len)
{
struct efx_nic *efx = rx_queue->efx;
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
"RX event (0x%x > 0x%x+0x%x). Leaking\n",
efx_rx_queue_index(rx_queue), len, max_len,
efx->type->rx_buffer_padding);
- /* If this buffer was skb-allocated, then the meta
- * data at the end of the skb will be trashed. So
- * we have no choice but to leak the fragment.
- */
- *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
} else {
if (net_ratelimit())
@@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
/* Pass a received packet up through GRO. GRO can handle pages
* regardless of checksum state and skbs with a good checksum.
*/
-static void efx_rx_packet_gro(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- const u8 *eh)
+static void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags, u8 *eh)
{
struct napi_struct *napi = &channel->napi_str;
gro_result_t gro_result;
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
- if (rx_buf->flags & EFX_RX_BUF_PAGE) {
- struct efx_nic *efx = channel->efx;
- struct page *page = rx_buf->u.page;
- struct sk_buff *skb;
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb)) {
+ while (n_frags--) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ return;
+ }
- rx_buf->u.page = NULL;
+ if (efx->net_dev->features & NETIF_F_RXHASH)
+ skb->rxhash = efx_rx_buf_hash(eh);
+ skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+ CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
+
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
- skb = napi_get_frags(napi);
- if (!skb) {
- put_page(page);
- return;
- }
+ skb->data_len = skb->len;
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+ gro_result = napi_gro_frags(napi);
+ if (gro_result != GRO_DROP)
+ channel->irq_mod_score += 2;
+}
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags,
+ u8 *eh, int hdr_len)
+{
+ struct efx_nic *efx = channel->efx;
+ struct sk_buff *skb;
- skb_fill_page_desc(skb, 0, page,
- efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
+ /* Allocate an SKB to store the headers */
+ skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+ if (unlikely(skb == NULL))
+ return NULL;
- skb->len = rx_buf->len;
- skb->data_len = rx_buf->len;
- skb->truesize += rx_buf->len;
- skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
- CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+ EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
+ skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
+ memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
- gro_result = napi_gro_frags(napi);
- } else {
- struct sk_buff *skb = rx_buf->u.skb;
+ /* Append the remaining page(s) onto the frag list */
+ if (rx_buf->len > hdr_len) {
+ rx_buf->page_offset += hdr_len;
+ rx_buf->len -= hdr_len;
- EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
- rx_buf->u.skb = NULL;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ for (;;) {
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buf->page, rx_buf->page_offset,
+ rx_buf->len);
+ rx_buf->page = NULL;
+ skb->len += rx_buf->len;
+ skb->data_len += rx_buf->len;
+ if (skb_shinfo(skb)->nr_frags == n_frags)
+ break;
- gro_result = napi_gro_receive(napi, skb);
+ rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+ }
+ } else {
+ __free_pages(rx_buf->page, efx->rx_buffer_order);
+ rx_buf->page = NULL;
+ n_frags = 0;
}
- if (gro_result == GRO_NORMAL) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
- } else if (gro_result != GRO_DROP) {
- channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
- channel->irq_mod_score += 2;
- }
+ skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+ return skb;
}
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
- unsigned int len, u16 flags)
+ unsigned int n_frags, unsigned int len, u16 flags)
{
struct efx_nic *efx = rx_queue->efx;
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_rx_buffer *rx_buf;
- bool leak_packet = false;
rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->flags |= flags;
- /* This allows the refill path to post another buffer.
- * EFX_RXD_HEAD_ROOM ensures that the slot we are using
- * isn't overwritten yet.
- */
- rx_queue->removed_count++;
-
- /* Validate the length encoded in the event vs the descriptor pushed */
- efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
+ /* Validate the number of fragments and completed length */
+ if (n_frags == 1) {
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
+ unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(!efx->rx_scatter)) {
+ /* If this isn't an explicit discard request, either
+ * the hardware or the driver is broken.
+ */
+ WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
+ rx_buf->flags |= EFX_RX_PKT_DISCARD;
+ }
netif_vdbg(efx, rx_status, efx->net_dev,
- "RX queue %d received id %x at %llx+%x %s%s\n",
+ "RX queue %d received ids %x-%x len %d %s%s\n",
efx_rx_queue_index(rx_queue), index,
- (unsigned long long)rx_buf->dma_addr, len,
+ (index + n_frags - 1) & rx_queue->ptr_mask, len,
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
- /* Discard packet, if instructed to do so */
+ /* Discard packet, if instructed to do so. Process the
+ * previous receive first.
+ */
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
- if (unlikely(leak_packet))
- channel->n_skbuff_leaks++;
- else
- efx_recycle_rx_buffer(channel, rx_buf);
-
- /* Don't hold off the previous receive */
- rx_buf = NULL;
- goto out;
+ efx_rx_flush_packet(channel);
+ put_page(rx_buf->page);
+ efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+ return;
}
- /* Release and/or sync DMA mapping - assumes all RX buffers
- * consumed in-order per RX queue
+ if (n_frags == 1)
+ rx_buf->len = len;
+
+ /* Release and/or sync the DMA mapping - assumes all RX buffers
+ * consumed in-order per RX queue.
*/
- efx_unmap_rx_buffer(efx, rx_buf, len);
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
/* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it.
*/
- prefetch(efx_rx_buf_eh(efx, rx_buf));
+ prefetch(efx_rx_buf_va(rx_buf));
+
+ rx_buf->page_offset += efx->type->rx_buffer_hash_size;
+ rx_buf->len -= efx->type->rx_buffer_hash_size;
+
+ if (n_frags > 1) {
+ /* Release/sync DMA mapping for additional fragments.
+ * Fix length for last fragment.
+ */
+ unsigned int tail_frags = n_frags - 1;
+
+ for (;;) {
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ if (--tail_frags == 0)
+ break;
+ efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+ }
+ rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+ efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+ }
+
+ /* All fragments have been DMA-synced, so recycle buffers and pages. */
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ efx_recycle_rx_buffers(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
- rx_buf->len = len - efx->type->rx_buffer_hash_size;
-out:
- if (channel->rx_pkt)
- __efx_rx_packet(channel, channel->rx_pkt);
- channel->rx_pkt = rx_buf;
+ efx_rx_flush_packet(channel);
+ channel->rx_pkt_n_frags = n_frags;
+ channel->rx_pkt_index = index;
}
-static void efx_rx_deliver(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf)
+static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct sk_buff *skb;
+ u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
- /* We now own the SKB */
- skb = rx_buf->u.skb;
- rx_buf->u.skb = NULL;
+ skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+ if (unlikely(skb == NULL)) {
+ efx_free_rx_buffer(rx_buf);
+ return;
+ }
+ skb_record_rx_queue(skb, channel->rx_queue.core_index);
/* Set the SKB flags */
skb_checksum_none_assert(skb);
- /* Record the rx_queue */
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
-
- /* Pass the packet up */
if (channel->type->receive_skb)
- channel->type->receive_skb(channel, skb);
- else
- netif_receive_skb(skb);
+ if (channel->type->receive_skb(channel, skb))
+ return;
- /* Update allocation strategy method */
- channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+ /* Pass the packet up */
+ netif_receive_skb(skb);
}
/* Handle a received packet. Second half: Touches packet payload. */
-void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
+void __efx_rx_packet(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
- u8 *eh = efx_rx_buf_eh(efx, rx_buf);
+ struct efx_rx_buffer *rx_buf =
+ efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ u8 *eh = efx_rx_buf_va(rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
efx_loopback_rx_packet(efx, eh, rx_buf->len);
- efx_free_rx_buffer(efx, rx_buf);
- return;
- }
-
- if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
- struct sk_buff *skb = rx_buf->u.skb;
-
- prefetch(skb_shinfo(skb));
-
- skb_reserve(skb, efx->type->rx_buffer_hash_size);
- skb_put(skb, rx_buf->len);
-
- if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
-
- /* Move past the ethernet header. rx_buf->data still points
- * at the ethernet header */
- skb->protocol = eth_type_trans(skb, efx->net_dev);
-
- skb_record_rx_queue(skb, channel->rx_queue.core_index);
+ efx_free_rx_buffer(rx_buf);
+ goto out;
}
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
- if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
- !channel->type->receive_skb)
- efx_rx_packet_gro(channel, rx_buf, eh);
+ if (!channel->type->receive_skb)
+ efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
else
- efx_rx_deliver(channel, rx_buf);
-}
-
-void efx_rx_strategy(struct efx_channel *channel)
-{
- enum efx_rx_alloc_method method = rx_alloc_method;
-
- if (channel->type->receive_skb) {
- channel->rx_alloc_push_pages = false;
- return;
- }
-
- /* Only makes sense to use page based allocation if GRO is enabled */
- if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
- method = RX_ALLOC_METHOD_SKB;
- } else if (method == RX_ALLOC_METHOD_AUTO) {
- /* Constrain the rx_alloc_level */
- if (channel->rx_alloc_level < 0)
- channel->rx_alloc_level = 0;
- else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
- channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
-
- /* Decide on the allocation method */
- method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
- RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
- }
-
- /* Push the option */
- channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
+ efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+ channel->rx_pkt_n_frags = 0;
}
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
@@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
kfree(rx_queue->buffer);
rx_queue->buffer = NULL;
}
+
return rc;
}
+static void efx_init_rx_recycle_ring(struct efx_nic *efx,
+ struct efx_rx_queue *rx_queue)
+{
+ unsigned int bufs_in_recycle_ring, page_ring_size;
+
+ /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+ if (efx->pci_dev->dev.iommu_group)
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+ else
+ bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+ page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+ efx->rx_bufs_per_page);
+ rx_queue->page_ring = kcalloc(page_ring_size,
+ sizeof(*rx_queue->page_ring), GFP_KERNEL);
+ rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
@@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->notified_count = 0;
rx_queue->removed_count = 0;
rx_queue->min_fill = -1U;
+ efx_init_rx_recycle_ring(efx, rx_queue);
+
+ rx_queue->page_remove = 0;
+ rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+ rx_queue->page_recycle_count = 0;
+ rx_queue->page_recycle_failed = 0;
+ rx_queue->page_recycle_full = 0;
/* Initialise limit fields */
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
- max_trigger = max_fill - EFX_RX_BATCH;
+ max_trigger =
+ max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
if (rx_refill_threshold != 0) {
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
if (trigger > max_trigger)
@@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
{
int i;
+ struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
- /* Release RX buffers NB start at index 0 not current HW ptr */
+ /* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
- for (i = 0; i <= rx_queue->ptr_mask; i++) {
- rx_buf = efx_rx_buffer(rx_queue, i);
+ for (i = rx_queue->removed_count; i < rx_queue->added_count;
+ i++) {
+ unsigned index = i & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
efx_fini_rx_buffer(rx_queue, rx_buf);
}
}
+
+ /* Unmap and release the pages in the recycle ring. Remove the ring. */
+ for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+ struct page *page = rx_queue->page_ring[i];
+ struct efx_rx_page_state *state;
+
+ if (page == NULL)
+ continue;
+
+ state = page_address(page);
+ dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+ PAGE_SIZE << efx->rx_buffer_order,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ }
+ kfree(rx_queue->page_ring);
+ rx_queue->page_ring = NULL;
}
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
}
-module_param(rx_alloc_method, int, 0644);
-MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
-
module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ba40f67e4f0..51669244d15 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -202,7 +202,7 @@ out:
static enum reset_type siena_map_reset_reason(enum reset_type reason)
{
- return RESET_TYPE_ALL;
+ return RESET_TYPE_RECOVER_OR_ALL;
}
static int siena_map_reset_flags(u32 *flags)
@@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
return efx_mcdi_reset_port(efx);
}
+#ifdef CONFIG_EEH
+/* When a PCI device is isolated from the bus, a subsequent MMIO read is
+ * required for the kernel EEH mechanisms to notice. As the Solarflare driver
+ * was written to minimise MMIO read (for latency) then a periodic call to check
+ * the EEH status of the device is required so that device recovery can happen
+ * in a timely fashion.
+ */
+static void siena_monitor(struct efx_nic *efx)
+{
+ struct eeh_dev *eehdev =
+ of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
+
+ eeh_dev_check_failure(eehdev);
+}
+#endif
+
static int siena_probe_nvconfig(struct efx_nic *efx)
{
u32 caps = 0;
@@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
+ EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
/* Set hash key for IPv4 */
@@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = {
.init = siena_init_nic,
.dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void,
+#ifdef CONFIG_EEH
+ .monitor = siena_monitor,
+#else
.monitor = NULL,
+#endif
.map_reset_reason = siena_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
.reset = siena_reset_hw,
@@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0,
+ .can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 79ad9c94a21..4bdbaad9932 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma, GFP_ATOMIC);
+ &priv->tx_ring_dma,
+ GFP_ATOMIC | __GFP_ZERO);
if (!priv->tx_ring)
return -ENOMEM;
- memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE);
+
priv->tx_count = priv->tx_read = priv->tx_write = 0;
mace->eth.tx_ring_base = priv->tx_ring_dma;
/* Now init skb save area */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index efca14eaefa..e45829628d5 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1841,15 +1841,12 @@ refill_rx_ring:
entry = sis_priv->dirty_rx % NUM_RX_DESC;
if (sis_priv->rx_skbuff[entry] == NULL) {
- if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) {
+ skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
+ if (skb == NULL) {
/* not enough memory for skbuff, this makes a
* "hole" on the buffer ring, it is not clear
* how the hardware will react to this kind
* of degenerated buffer */
- if (netif_msg_rx_err(sis_priv))
- printk(KERN_INFO "%s: Memory squeeze, "
- "deferring packet.\n",
- net_dev->name);
net_dev->stats.rx_dropped++;
break;
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 50823da9dc1..e85c2e7e824 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1223,9 +1223,7 @@ static void smc_rcv(struct net_device *dev)
dev->stats.multicast++;
skb = netdev_alloc_skb(dev, packet_length + 5);
-
if ( skb == NULL ) {
- printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
dev->stats.rx_dropped++;
goto done;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 591650a8de3..dfbf978315d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -465,8 +465,6 @@ static inline void smc_rcv(struct net_device *dev)
*/
skb = netdev_alloc_skb(dev, packet_len);
if (unlikely(skb == NULL)) {
- printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
- dev->name);
SMC_WAIT_MMU_BUSY(lp);
SMC_SET_MMU_CMD(lp, MC_RELEASE);
dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index da5cc9a3b34..48e2b99bec5 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2115,7 +2115,7 @@ static int smsc911x_init(struct net_device *dev)
spin_lock_init(&pdata->dev_lock);
spin_lock_init(&pdata->mac_lock);
- if (pdata->ioaddr == 0) {
+ if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index d457fa2d750..ffa5c4ad121 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -848,10 +848,8 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
BUG_ON(pd->rx_buffers[index].skb);
BUG_ON(pd->rx_buffers[index].mapping);
- if (unlikely(!skb)) {
- smsc_warn(RX_ERR, "Failed to allocate new skb!");
+ if (unlikely(!skb))
return -ENOMEM;
- }
mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index c0ea838c78d..f695a50bac4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
select MII
select PHYLIB
select CRC32
+ select PTP_1588_CLOCK
---help---
This is the driver for the Ethernet IPs are built around a
Synopsys IP Core and only tested on the STMicroelectronics
@@ -54,22 +55,4 @@ config STMMAC_DA
By default, the DMA arbitration scheme is based on Round-robin
(rx:tx priority is 1:1).
-choice
- prompt "Select the DMA TX/RX descriptor operating modes"
- depends on STMMAC_ETH
- ---help---
- This driver supports DMA descriptor to operate both in dual buffer
- (RING) and linked-list(CHAINED) mode. In RING mode each descriptor
- points to two data buffer pointers whereas in CHAINED mode they
- points to only one data buffer pointer.
-
-config STMMAC_RING
- bool "Enable Descriptor Ring Mode"
-
-config STMMAC_CHAINED
- bool "Enable Descriptor Chained Mode"
-
-endchoice
-
-
endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c8e8ea60ac1..356a9dd32be 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,9 +1,7 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
-stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
-stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
+ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
- mmc_core.o $(stmmac-y)
+ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 0668659803e..37a3f93b487 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,7 +28,7 @@
#include "stmmac.h"
-unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
+static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
{
struct stmmac_priv *priv = (struct stmmac_priv *) p;
unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
while (len != 0) {
entry = (++priv->cur_tx) % txsize;
@@ -57,8 +58,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i),
bmax, DMA_TO_DEVICE);
- priv->hw->desc->prepare_tx_desc(desc, 0, bmax,
- csum);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
+ STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
len -= bmax;
@@ -67,8 +69,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device,
(skb->data + bmax * i), len,
DMA_TO_DEVICE);
- priv->hw->desc->prepare_tx_desc(desc, 0, len,
- csum);
+ priv->tx_skbuff_dma[entry] = desc->des2;
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+ STMMAC_CHAIN_MODE);
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
len = 0;
@@ -89,49 +92,70 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
return ret;
}
-static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
-{
-}
-
-static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
-{
-}
-
-static void stmmac_clean_desc3(struct dma_desc *p)
-{
-}
-
-static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
- unsigned int size)
+static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
+ unsigned int size, unsigned int extend_desc)
{
/*
* In chained mode the des3 points to the next element in the ring.
* The latest element has to point to the head.
*/
int i;
- struct dma_desc *p = des;
dma_addr_t dma_phy = phy_addr;
- for (i = 0; i < (size - 1); i++) {
- dma_phy += sizeof(struct dma_desc);
- p->des3 = (unsigned int)dma_phy;
- p++;
+ if (extend_desc) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *) des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_extended_desc);
+ p->basic.des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->basic.des3 = (unsigned int)phy_addr;
+
+ } else {
+ struct dma_desc *p = (struct dma_desc *) des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_desc);
+ p->des3 = (unsigned int)dma_phy;
+ p++;
+ }
+ p->des3 = (unsigned int)phy_addr;
}
- p->des3 = (unsigned int)phy_addr;
}
-static int stmmac_set_16kib_bfsize(int mtu)
+static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+
+ if (priv->hwts_rx_en && !priv->extend_desc)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = (unsigned int)(priv->dma_rx_phy +
+ (((priv->dirty_rx) + 1) %
+ priv->dma_rx_size) *
+ sizeof(struct dma_desc));
+}
+
+static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
- /* Not supported */
- return 0;
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+
+ if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = (unsigned int)(priv->dma_tx_phy +
+ (((priv->dirty_tx + 1) %
+ priv->dma_tx_size) *
+ sizeof(struct dma_desc)));
}
-const struct stmmac_ring_mode_ops ring_mode_ops = {
+const struct stmmac_chain_mode_ops chain_mode_ops = {
+ .init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
- .init_desc3 = stmmac_init_desc3,
- .init_dma_chain = stmmac_init_dma_chain,
.clean_desc3 = stmmac_clean_desc3,
- .set_16kib_bfsize = stmmac_set_16kib_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 186d1480612..ad7e20a9875 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -117,6 +117,36 @@ struct stmmac_extra_stats {
unsigned long irq_rx_path_in_lpi_mode_n;
unsigned long irq_rx_path_exit_lpi_mode_n;
unsigned long phy_eee_wakeup_error_n;
+ /* Extended RDES status */
+ unsigned long ip_hdr_err;
+ unsigned long ip_payload_err;
+ unsigned long ip_csum_bypassed;
+ unsigned long ipv4_pkt_rcvd;
+ unsigned long ipv6_pkt_rcvd;
+ unsigned long rx_msg_type_ext_no_ptp;
+ unsigned long rx_msg_type_sync;
+ unsigned long rx_msg_type_follow_up;
+ unsigned long rx_msg_type_delay_req;
+ unsigned long rx_msg_type_delay_resp;
+ unsigned long rx_msg_type_pdelay_req;
+ unsigned long rx_msg_type_pdelay_resp;
+ unsigned long rx_msg_type_pdelay_follow_up;
+ unsigned long ptp_frame_type;
+ unsigned long ptp_ver;
+ unsigned long timestamp_dropped;
+ unsigned long av_pkt_rcvd;
+ unsigned long av_tagged_pkt_rcvd;
+ unsigned long vlan_tag_priority_val;
+ unsigned long l3_filter_match;
+ unsigned long l4_filter_match;
+ unsigned long l3_l4_filter_no_match;
+ /* PCS */
+ unsigned long irq_pcs_ane_n;
+ unsigned long irq_pcs_link_n;
+ unsigned long irq_rgmii_n;
+ unsigned long pcs_link;
+ unsigned long pcs_duplex;
+ unsigned long pcs_speed;
};
/* CSR Frequency Access Defines*/
@@ -138,6 +168,12 @@ struct stmmac_extra_stats {
#define FLOW_TX 2
#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+/* PCS defines */
+#define STMMAC_PCS_RGMII (1 << 0)
+#define STMMAC_PCS_SGMII (1 << 1)
+#define STMMAC_PCS_TBI (1 << 2)
+#define STMMAC_PCS_RTBI (1 << 3)
+
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
/* DAM HW feature register fields */
@@ -194,17 +230,25 @@ enum dma_irq_status {
handle_tx = 0x8,
};
-enum core_specific_irq_mask {
- core_mmc_tx_irq = 1,
- core_mmc_rx_irq = 2,
- core_mmc_rx_csum_offload_irq = 4,
- core_irq_receive_pmt_irq = 8,
- core_irq_tx_path_in_lpi_mode = 16,
- core_irq_tx_path_exit_lpi_mode = 32,
- core_irq_rx_path_in_lpi_mode = 64,
- core_irq_rx_path_exit_lpi_mode = 128,
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
+
+#define CORE_PCS_ANE_COMPLETE (1 << 5)
+#define CORE_PCS_LINK_STATUS (1 << 6)
+#define CORE_RGMII_IRQ (1 << 7)
+
+struct rgmii_adv {
+ unsigned int pause;
+ unsigned int duplex;
+ unsigned int lp_pause;
+ unsigned int lp_duplex;
};
+#define STMMAC_PCS_PAUSE 1
+#define STMMAC_PCS_ASYM_PAUSE 2
+
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -255,23 +299,26 @@ struct dma_features {
#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
+#define STMMAC_CHAIN_MODE 0x1
+#define STMMAC_RING_MODE 0x2
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
- void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
+ void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
+ int end);
/* DMA TX descriptor ring initialization */
- void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
+ void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
- int csum_flag);
+ int csum_flag, int mode);
/* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p);
/* Invoked by the xmit function to close the tx descriptor */
void (*close_tx_desc) (struct dma_desc *p);
/* Clean the tx descriptor as soon as the tx irq is received */
- void (*release_tx_desc) (struct dma_desc *p);
+ void (*release_tx_desc) (struct dma_desc *p, int mode);
/* Clear interrupt on tx frame completion. When this bit is
* set an interrupt happens as soon as the frame is transmitted */
void (*clear_tx_ic) (struct dma_desc *p);
@@ -290,12 +337,22 @@ struct stmmac_desc_ops {
/* Return the reception status looking at the RDES1 */
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
struct dma_desc *p);
+ void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p);
+ /* Set tx timestamp enable bit */
+ void (*enable_tx_timestamp) (struct dma_desc *p);
+ /* get tx timestamp status */
+ int (*get_tx_timestamp_status) (struct dma_desc *p);
+ /* get timestamp value */
+ u64 (*get_timestamp) (void *desc, u32 ats);
+ /* get rx timestamp status */
+ int (*get_rx_timestamp_status) (void *desc, u32 ats);
};
struct stmmac_dma_ops {
/* DMA core initialization */
int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
- int burst_len, u32 dma_tx, u32 dma_rx);
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds);
/* Dump DMA registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Set tx/rx threshold in the csr6 register
@@ -327,7 +384,8 @@ struct stmmac_ops {
/* Dump MAC registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- int (*host_irq_status) (void __iomem *ioaddr);
+ int (*host_irq_status) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev, int id);
/* Flow control setting */
@@ -344,6 +402,18 @@ struct stmmac_ops {
void (*reset_eee_mode) (void __iomem *ioaddr);
void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
void (*set_eee_pls) (void __iomem *ioaddr, int link);
+ void (*ctrl_ane) (void __iomem *ioaddr, bool restart);
+ void (*get_adv) (void __iomem *ioaddr, struct rgmii_adv *adv);
+};
+
+struct stmmac_hwtimestamp {
+ void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
+ void (*config_sub_second_increment) (void __iomem *ioaddr);
+ int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
+ int (*config_addend)(void __iomem *ioaddr, u32 addend);
+ int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub);
+ u64 (*get_systime)(void __iomem *ioaddr);
};
struct mac_link {
@@ -360,19 +430,28 @@ struct mii_regs {
struct stmmac_ring_mode_ops {
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
- void (*refill_desc3) (int bfsize, struct dma_desc *p);
- void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p);
- void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr,
- unsigned int size);
- void (*clean_desc3) (struct dma_desc *p);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*init_desc3) (struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
int (*set_16kib_bfsize) (int mtu);
};
+struct stmmac_chain_mode_ops {
+ void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
+ unsigned int extend_desc);
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
+};
+
struct mac_device_info {
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
const struct stmmac_ring_mode_ops *ring;
+ const struct stmmac_chain_mode_ops *chain;
+ const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
unsigned int synopsys_uid;
@@ -390,5 +469,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
+extern const struct stmmac_chain_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 223adf95fd0..2eca0c03303 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -24,6 +24,7 @@
#ifndef __DESCS_H__
#define __DESCS_H__
+/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
/* Receive descriptor */
union {
@@ -60,7 +61,7 @@ struct dma_desc {
} rx;
struct {
/* RDES0 */
- u32 payload_csum_error:1;
+ u32 rx_mac_addr:1;
u32 crc_error:1;
u32 dribbling:1;
u32 error_gmii:1;
@@ -162,13 +163,57 @@ struct dma_desc {
unsigned int des3;
};
+/* Extended descriptor structure (supported by new SYNP GMAC generations) */
+struct dma_extended_desc {
+ struct dma_desc basic;
+ union {
+ struct {
+ u32 ip_payload_type:3;
+ u32 ip_hdr_err:1;
+ u32 ip_payload_err:1;
+ u32 ip_csum_bypassed:1;
+ u32 ipv4_pkt_rcvd:1;
+ u32 ipv6_pkt_rcvd:1;
+ u32 msg_type:4;
+ u32 ptp_frame_type:1;
+ u32 ptp_ver:1;
+ u32 timestamp_dropped:1;
+ u32 reserved:1;
+ u32 av_pkt_rcvd:1;
+ u32 av_tagged_pkt_rcvd:1;
+ u32 vlan_tag_priority_val:3;
+ u32 reserved3:3;
+ u32 l3_filter_match:1;
+ u32 l4_filter_match:1;
+ u32 l3_l4_filter_no_match:2;
+ u32 reserved4:4;
+ } erx;
+ struct {
+ u32 reserved;
+ } etx;
+ } des4;
+ unsigned int des5; /* Reserved */
+ unsigned int des6; /* Tx/Rx Timestamp Low */
+ unsigned int des7; /* Tx/Rx Timestamp High */
+};
+
/* Transmit checksum insertion control */
enum tdes_csum_insertion {
cic_disabled = 0, /* Checksum Insertion Control */
cic_only_ip = 1, /* Only IP header */
- cic_no_pseudoheader = 2, /* IP header but pseudoheader
- * is not calculated */
+ /* IP header but pseudoheader is not calculated */
+ cic_no_pseudoheader = 2,
cic_full = 3, /* IP header and pseudoheader */
};
+/* Extended RDES4 definitions */
+#define RDES_EXT_NO_PTP 0
+#define RDES_EXT_SYNC 0x1
+#define RDES_EXT_FOLLOW_UP 0x2
+#define RDES_EXT_DELAY_REQ 0x3
+#define RDES_EXT_DELAY_RESP 0x4
+#define RDES_EXT_PDELAY_REQ 0x5
+#define RDES_EXT_PDELAY_RESP 0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+
#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 7ee9499a6e3..20f83fc9cf1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -30,26 +30,28 @@
#ifndef __DESC_COM_H__
#define __DESC_COM_H__
-#if defined(CONFIG_STMMAC_RING)
-static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Specific functions used for Ring mode */
+
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
{
p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
if (end)
p->des01.erx.end_ring = 1;
}
-static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
{
if (end)
p->des01.etx.end_ring = 1;
}
-static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
{
p->des01.etx.end_ring = ter;
}
-static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_4KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
@@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.etx.buffer1_size = len;
}
-static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
{
p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
if (end)
p->des01.rx.end_ring = 1;
}
-static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
{
if (end)
p->des01.tx.end_ring = 1;
}
-static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
{
p->des01.tx.end_ring = ter;
}
-static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
{
if (unlikely(len > BUF_SIZE_2KiB)) {
p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
@@ -85,47 +88,48 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.tx.buffer1_size = len;
}
-#else
+/* Specific functions used for Chain mode */
-static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.erx.second_address_chained = 1;
}
-static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end)
+static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.etx.second_address_chained = 1;
}
-static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
{
p->des01.etx.second_address_chained = 1;
}
-static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
p->des01.etx.buffer1_size = len;
}
-static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end)
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
{
p->des01.rx.second_address_chained = 1;
}
-static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size)
+static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int
+ ring_size)
{
p->des01.tx.second_address_chained = 1;
}
-static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter)
+static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
{
p->des01.tx.second_address_chained = 1;
}
-static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
+static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
{
p->des01.tx.buffer1_size = len;
}
-#endif
-
#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 7ad56afd632..57f4e8f607e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -89,13 +89,46 @@ enum power_event {
(reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 32
+/* PCS registers (AN/TBI/SGMII/RGMII) offset */
#define GMAC_AN_CTRL 0x000000c0 /* AN control */
#define GMAC_AN_STATUS 0x000000c4 /* AN status */
#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
-#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */
+#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
#define GMAC_TBI 0x000000d4 /* TBI extend status */
-#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */
+#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
+
+/* AN Configuration defines */
+#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
+
+/* AN Status defines */
+#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
+#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
+#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
+#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
+
+/* Register 54 (SGMII/RGMII status register) */
+#define GMAC_S_R_GMII_LINK 0x8
+#define GMAC_S_R_GMII_SPEED 0x5
+#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
+#define GMAC_S_R_GMII_MODE 0x1
+#define GMAC_S_R_GMII_SPEED_125 2
+#define GMAC_S_R_GMII_SPEED_25 1
+
+/* Common ADV and LPA defines */
+#define GMAC_ANE_FD (1 << 5)
+#define GMAC_ANE_HD (1 << 6)
+#define GMAC_ANE_PSE (3 << 7)
+#define GMAC_ANE_PSE_SHIFT 7
+
+ /* GMAC Configuration defines */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
/* GMAC Configuration defines */
#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
@@ -155,6 +188,7 @@ enum inter_frame_gap {
/* Programmable burst length (passed thorugh platform)*/
#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
enum rx_tx_priority_ratio {
double_ratio = 0x00004000, /*2:1 */
@@ -230,5 +264,7 @@ enum rtc_control {
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index bfe02260549..29138da19db 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -28,6 +28,7 @@
#include <linux/crc32.h>
#include <linux/slab.h>
+#include <linux/ethtool.h>
#include <asm/io.h>
#include "dwmac1000.h"
@@ -193,59 +194,91 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
writel(pmt, ioaddr + GMAC_PMT);
}
-
-static int dwmac1000_irq_status(void __iomem *ioaddr)
+static int dwmac1000_irq_status(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
- int status = 0;
+ int ret = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & mmc_tx_irq)) {
CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
- status |= core_mmc_tx_irq;
+ x->mmc_tx_irq_n++;
}
if (unlikely(intr_status & mmc_rx_irq)) {
CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
- status |= core_mmc_rx_irq;
+ x->mmc_rx_irq_n++;
}
if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
- status |= core_mmc_rx_csum_offload_irq;
+ x->mmc_rx_csum_offload_irq_n++;
}
if (unlikely(intr_status & pmt_irq)) {
CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
- status |= core_irq_receive_pmt_irq;
+ x->irq_receive_pmt_irq_n++;
}
/* MAC trx/rx EEE LPI entry/exit interrupts */
if (intr_status & lpiis_irq) {
/* Clean LPI interrupt by reading the Reg 12 */
- u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
+ ret = readl(ioaddr + LPI_CTRL_STATUS);
- if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
+ if (ret & LPI_CTRL_STATUS_TLPIEN) {
CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
- status |= core_irq_tx_path_in_lpi_mode;
+ x->irq_tx_path_in_lpi_mode_n++;
}
- if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
+ if (ret & LPI_CTRL_STATUS_TLPIEX) {
CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
- status |= core_irq_tx_path_exit_lpi_mode;
+ x->irq_tx_path_exit_lpi_mode_n++;
}
- if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
+ if (ret & LPI_CTRL_STATUS_RLPIEN) {
CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
- status |= core_irq_rx_path_in_lpi_mode;
+ x->irq_rx_path_in_lpi_mode_n++;
}
- if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
+ if (ret & LPI_CTRL_STATUS_RLPIEX) {
CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
- status |= core_irq_rx_path_exit_lpi_mode;
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+ }
+
+ if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n");
+ readl(ioaddr + GMAC_AN_STATUS);
+ x->irq_pcs_ane_n++;
+ }
+ if (intr_status & rgmii_irq) {
+ u32 status = readl(ioaddr + GMAC_S_R_GMII);
+ CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n");
+ x->irq_rgmii_n++;
+
+ /* Save and dump the link status. */
+ if (status & GMAC_S_R_GMII_LINK) {
+ int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
+ GMAC_S_R_GMII_SPEED_SHIFT;
+ x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
+
+ if (speed_value == GMAC_S_R_GMII_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_S_R_GMII_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_link = 1;
+ pr_debug("Link is Up - %d/%s\n", (int) x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_debug("Link is Down\n");
}
}
- return status;
+ return ret;
}
static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
@@ -297,6 +330,41 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
+static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
+{
+ u32 value;
+
+ value = readl(ioaddr + GMAC_AN_CTRL);
+ /* auto negotiation enable and External Loopback enable */
+ value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL);
+}
+
+static void dwmac1000_get_adv(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ u32 value = readl(ioaddr + GMAC_ANE_ADV);
+
+ if (value & GMAC_ANE_FD)
+ adv->duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv->duplex |= DUPLEX_HALF;
+
+ adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_ANE_LPA);
+
+ if (value & GMAC_ANE_FD)
+ adv->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv->lp_duplex = DUPLEX_HALF;
+
+ adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+}
+
static const struct stmmac_ops dwmac1000_ops = {
.core_init = dwmac1000_core_init,
.rx_ipc = dwmac1000_rx_ipc_enable,
@@ -311,6 +379,8 @@ static const struct stmmac_ops dwmac1000_ops = {
.reset_eee_mode = dwmac1000_reset_eee_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
+ .ctrl_ane = dwmac1000_ctrl_ane,
+ .get_adv = dwmac1000_get_adv,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index bf83c03bfd0..f1c4b2c00aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
#include "dwmac1000.h"
#include "dwmac_dma.h"
-static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
- int mb, int burst_len, u32 dma_tx, u32 dma_rx)
+static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int limit;
@@ -73,6 +73,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
#ifdef CONFIG_STMMAC_DA
value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
#endif
+
+ if (atds)
+ value |= DMA_BUS_MODE_ATDS;
+
writel(value, ioaddr + DMA_BUS_MODE);
/* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f83210e7c22..cb86a58c1c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,7 +72,8 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
return 0;
}
-static int dwmac100_irq_status(void __iomem *ioaddr)
+static int dwmac100_irq_status(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
{
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index c2b4d55a79b..e979a8b2ae4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
#include "dwmac100.h"
#include "dwmac_dma.h"
-static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
- int mb, int burst_len, u32 dma_tx, u32 dma_rx)
+static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int burst_len, u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int limit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2fc8ef95f97..0fbc8fafa70 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,6 +150,57 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
+static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p)
+{
+ if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
+ if (p->des4.erx.ip_hdr_err)
+ x->ip_hdr_err++;
+ if (p->des4.erx.ip_payload_err)
+ x->ip_payload_err++;
+ if (p->des4.erx.ip_csum_bypassed)
+ x->ip_csum_bypassed++;
+ if (p->des4.erx.ipv4_pkt_rcvd)
+ x->ipv4_pkt_rcvd++;
+ if (p->des4.erx.ipv6_pkt_rcvd)
+ x->ipv6_pkt_rcvd++;
+ if (p->des4.erx.msg_type == RDES_EXT_SYNC)
+ x->rx_msg_type_sync++;
+ else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
+ x->rx_msg_type_follow_up++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_delay_req++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
+ x->rx_msg_type_delay_resp++;
+ else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_pdelay_req++;
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
+ x->rx_msg_type_pdelay_resp++;
+ else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->rx_msg_type_pdelay_follow_up++;
+ else
+ x->rx_msg_type_ext_no_ptp++;
+ if (p->des4.erx.ptp_frame_type)
+ x->ptp_frame_type++;
+ if (p->des4.erx.ptp_ver)
+ x->ptp_ver++;
+ if (p->des4.erx.timestamp_dropped)
+ x->timestamp_dropped++;
+ if (p->des4.erx.av_pkt_rcvd)
+ x->av_pkt_rcvd++;
+ if (p->des4.erx.av_tagged_pkt_rcvd)
+ x->av_tagged_pkt_rcvd++;
+ if (p->des4.erx.vlan_tag_priority_val)
+ x->vlan_tag_priority_val++;
+ if (p->des4.erx.l3_filter_match)
+ x->l3_filter_match++;
+ if (p->des4.erx.l4_filter_match)
+ x->l4_filter_match++;
+ if (p->des4.erx.l3_l4_filter_no_match)
+ x->l3_l4_filter_no_match++;
+ }
+}
+
static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
@@ -198,7 +249,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
- p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
+ p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
if (unlikely(p->des01.erx.dribbling)) {
CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
@@ -225,34 +276,32 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
x->rx_vlan++;
}
#endif
+
return ret;
}
-static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
+static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.erx.own = 1;
- p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ p->des01.erx.own = 1;
+ p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
- ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_rx_set_on_chain(p, end);
+ else
+ ehn_desc_rx_set_on_ring(p, end);
- if (disable_rx_ic)
- p->des01.erx.disable_ic = 1;
- p++;
- }
+ if (disable_rx_ic)
+ p->des01.erx.disable_ic = 1;
}
-static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- int i;
-
- for (i = 0; i < ring_size; i++) {
- p->des01.etx.own = 0;
- ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1));
- p++;
- }
+ p->des01.etx.own = 0;
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_tx_set_on_chain(p, end);
+ else
+ ehn_desc_tx_set_on_ring(p, end);
}
static int enh_desc_get_tx_owner(struct dma_desc *p)
@@ -280,20 +329,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p)
return p->des01.etx.last_segment;
}
-static void enh_desc_release_tx_desc(struct dma_desc *p)
+static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
{
int ter = p->des01.etx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- enh_desc_end_tx_desc(p, ter);
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_desc_end_tx_desc_on_chain(p, ter);
+ else
+ enh_desc_end_tx_desc_on_ring(p, ter);
}
static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
+ int csum_flag, int mode)
{
p->des01.etx.first_segment = is_fs;
- enh_set_tx_desc_len(p, len);
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_set_tx_desc_len_on_chain(p, len);
+ else
+ enh_set_tx_desc_len_on_ring(p, len);
if (likely(csum_flag))
p->des01.etx.checksum_insertion = cic_full;
@@ -323,6 +378,49 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
return p->des01.erx.frame_length;
}
+static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des01.etx.time_stamp_enable = 1;
+}
+
+static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return p->des01.etx.time_stamp_status;
+}
+
+static u64 enh_desc_get_timestamp(void *desc, u32 ats)
+{
+ u64 ns;
+
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ ns = p->des6;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des7 * 1000000000ULL;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ ns = p->des2;
+ ns += p->des3 * 1000000000ULL;
+ }
+
+ return ns;
+}
+
+static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ return p->basic.des01.erx.ipc_csum_error;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+ }
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -339,4 +437,9 @@ const struct stmmac_desc_ops enh_desc_ops = {
.set_tx_owner = enh_desc_set_tx_owner,
.set_rx_owner = enh_desc_set_rx_owner,
.get_rx_frame_len = enh_desc_get_rx_frame_len,
+ .rx_extended_status = enh_desc_get_ext_status,
+ .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
+ .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
+ .get_timestamp = enh_desc_get_timestamp,
+ .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 68962c549a2..7cbcea348c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -122,30 +122,28 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic)
+static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ int end)
{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.rx.own = 1;
- p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ p->des01.rx.own = 1;
+ p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
- ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1));
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_rx_set_on_chain(p, end);
+ else
+ ndesc_rx_set_on_ring(p, end);
- if (disable_rx_ic)
- p->des01.rx.disable_ic = 1;
- p++;
- }
+ if (disable_rx_ic)
+ p->des01.rx.disable_ic = 1;
}
-static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
{
- int i;
- for (i = 0; i < ring_size; i++) {
- p->des01.tx.own = 0;
- ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1)));
- p++;
- }
+ p->des01.tx.own = 0;
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_tx_set_on_chain(p, end);
+ else
+ ndesc_tx_set_on_ring(p, end);
}
static int ndesc_get_tx_owner(struct dma_desc *p)
@@ -173,19 +171,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p)
return p->des01.tx.last_segment;
}
-static void ndesc_release_tx_desc(struct dma_desc *p)
+static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
{
int ter = p->des01.tx.end_ring;
memset(p, 0, offsetof(struct dma_desc, des2));
- ndesc_end_tx_desc(p, ter);
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_end_tx_desc_on_chain(p, ter);
+ else
+ ndesc_end_tx_desc_on_ring(p, ter);
}
static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
- int csum_flag)
+ int csum_flag, int mode)
{
p->des01.tx.first_segment = is_fs;
- norm_set_tx_desc_len(p, len);
+ if (mode == STMMAC_CHAIN_MODE)
+ norm_set_tx_desc_len_on_chain(p, len);
+ else
+ norm_set_tx_desc_len_on_ring(p, len);
if (likely(csum_flag))
p->des01.tx.checksum_insertion = cic_full;
@@ -215,6 +219,39 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
return p->des01.rx.frame_length;
}
+static void ndesc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des01.tx.time_stamp_enable = 1;
+}
+
+static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return p->des01.tx.time_stamp_status;
+}
+
+static u64 ndesc_get_timestamp(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = p->des2;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des3 * 1000000000ULL;
+
+ return ns;
+}
+
+static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+
+ if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -231,4 +268,8 @@ const struct stmmac_desc_ops ndesc_ops = {
.set_tx_owner = ndesc_set_tx_owner,
.set_rx_owner = ndesc_set_rx_owner,
.get_rx_frame_len = ndesc_get_rx_frame_len,
+ .enable_tx_timestamp = ndesc_enable_tx_timestamp,
+ .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
+ .get_timestamp = ndesc_get_timestamp,
+ .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 4b785e10f2e..d0265a7d5a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -48,25 +48,30 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des2 = dma_map_single(priv->device, skb->data,
bmax, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
- csum);
+ priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
+ STMMAC_RING_MODE);
wmb();
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
+ STMMAC_RING_MODE);
wmb();
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
+ STMMAC_RING_MODE);
}
return entry;
@@ -82,27 +87,23 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
return ret;
}
-static void stmmac_refill_desc3(int bfsize, struct dma_desc *p)
+static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{
- /* Fill DES3 in case of RING mode */
- if (bfsize >= BUF_SIZE_8KiB)
- p->des3 = p->des2 + BUF_SIZE_8KiB;
-}
+ struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
-/* In ring mode we need to fill the desc3 because it is used
- * as buffer */
-static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
-{
- if (unlikely(des3_as_data_buf))
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ if (unlikely(priv->plat->has_gmac))
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
-static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
- unsigned int size)
+/* In ring mode we need to fill the desc3 because it is used as buffer */
+static void stmmac_init_desc3(struct dma_desc *p)
{
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
-static void stmmac_clean_desc3(struct dma_desc *p)
+static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
{
if (unlikely(p->des3))
p->des3 = 0;
@@ -121,7 +122,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = {
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
.init_desc3 = stmmac_init_desc3,
- .init_dma_chain = stmmac_init_dma_chain,
.clean_desc3 = stmmac_clean_desc3,
.set_16kib_bfsize = stmmac_set_16kib_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b05df8983be..75f997b467a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,25 +24,29 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "Nov_2012"
+#define DRV_MODULE_VERSION "March_2013"
#include <linux/clk.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
#include <linux/pci.h>
#include "common.h"
+#include <linux/ptp_clock_kernel.h>
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
- struct dma_desc *dma_tx ____cacheline_aligned;
+ struct dma_desc *dma_tx ____cacheline_aligned; /* Basic TX desc */
+ struct dma_extended_desc *dma_etx; /* Extended TX descriptor */
dma_addr_t dma_tx_phy;
struct sk_buff **tx_skbuff;
+ dma_addr_t *tx_skbuff_dma;
unsigned int cur_tx;
unsigned int dirty_tx;
unsigned int dma_tx_size;
int tx_coalesce;
- struct dma_desc *dma_rx ;
+ struct dma_desc *dma_rx; /* Basic RX descriptor */
+ struct dma_extended_desc *dma_erx; /* Extended RX descriptor */
unsigned int cur_rx;
unsigned int dirty_rx;
struct sk_buff **rx_skbuff;
@@ -93,6 +97,16 @@ struct stmmac_priv {
u32 tx_coal_timer;
int use_riwt;
u32 rx_riwt;
+ unsigned int mode;
+ int extend_desc;
+ int pcs;
+ int hwts_tx_en;
+ int hwts_rx_en;
+ unsigned int default_addend;
+ u32 adv_ts;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_ops;
+ spinlock_t ptp_lock;
};
extern int phyaddr;
@@ -102,6 +116,9 @@ extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern int stmmac_ptp_register(struct stmmac_priv *priv);
+extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1ac39c1b05..c5f9cb85c8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/net_tstamp.h>
#include <asm/io.h>
#include "stmmac.h"
@@ -108,6 +109,33 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
STMMAC_STAT(phy_eee_wakeup_error_n),
+ /* Extended RDES status */
+ STMMAC_STAT(ip_hdr_err),
+ STMMAC_STAT(ip_payload_err),
+ STMMAC_STAT(ip_csum_bypassed),
+ STMMAC_STAT(ipv4_pkt_rcvd),
+ STMMAC_STAT(ipv6_pkt_rcvd),
+ STMMAC_STAT(rx_msg_type_ext_no_ptp),
+ STMMAC_STAT(rx_msg_type_sync),
+ STMMAC_STAT(rx_msg_type_follow_up),
+ STMMAC_STAT(rx_msg_type_delay_req),
+ STMMAC_STAT(rx_msg_type_delay_resp),
+ STMMAC_STAT(rx_msg_type_pdelay_req),
+ STMMAC_STAT(rx_msg_type_pdelay_resp),
+ STMMAC_STAT(rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(ptp_frame_type),
+ STMMAC_STAT(ptp_ver),
+ STMMAC_STAT(timestamp_dropped),
+ STMMAC_STAT(av_pkt_rcvd),
+ STMMAC_STAT(av_tagged_pkt_rcvd),
+ STMMAC_STAT(vlan_tag_priority_val),
+ STMMAC_STAT(l3_filter_match),
+ STMMAC_STAT(l4_filter_match),
+ STMMAC_STAT(l3_l4_filter_no_match),
+ /* PCS */
+ STMMAC_STAT(irq_pcs_ane_n),
+ STMMAC_STAT(irq_pcs_link_n),
+ STMMAC_STAT(irq_rgmii_n),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -219,6 +247,70 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phy = priv->phydev;
int rc;
+
+ if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ struct rgmii_adv adv;
+
+ if (!priv->xstats.pcs_link) {
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+ cmd->duplex = priv->xstats.pcs_duplex;
+
+ ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
+
+ /* Get and convert ADV/LP_ADV from the HW AN registers */
+ if (priv->hw->mac->get_adv)
+ priv->hw->mac->get_adv(priv->ioaddr, &adv);
+ else
+ return -EOPNOTSUPP; /* should never happen indeed */
+
+ /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
+
+ if (adv.pause & STMMAC_PCS_PAUSE)
+ cmd->advertising |= ADVERTISED_Pause;
+ if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
+ cmd->advertising |= ADVERTISED_Asym_Pause;
+ if (adv.lp_pause & STMMAC_PCS_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Pause;
+ if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+
+ /* Reg49[3] always set because ANE is always supported */
+ cmd->autoneg = ADVERTISED_Autoneg;
+ cmd->supported |= SUPPORTED_Autoneg;
+ cmd->advertising |= ADVERTISED_Autoneg;
+ cmd->lp_advertising |= ADVERTISED_Autoneg;
+
+ if (adv.duplex) {
+ cmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Full);
+ cmd->advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ } else {
+ cmd->supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+ cmd->advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ if (adv.lp_duplex)
+ cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else
+ cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ cmd->port = PORT_OTHER;
+
+ return 0;
+ }
+
if (phy == NULL) {
pr_err("%s: %s: PHY is not registered\n",
__func__, dev->name);
@@ -243,6 +335,30 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
+ if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
+
+ /* Only support ANE */
+ if (cmd->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ mask &= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full);
+
+ spin_lock(&priv->lock);
+ if (priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 1);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
+ }
+
spin_lock(&priv->lock);
rc = phy_ethtool_sset(phy, cmd);
spin_unlock(&priv->lock);
@@ -312,6 +428,9 @@ stmmac_get_pauseparam(struct net_device *netdev,
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ if (priv->pcs) /* FIXME */
+ return;
+
spin_lock(&priv->lock);
pause->rx_pause = 0;
@@ -335,6 +454,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
int new_pause = FLOW_OFF;
int ret = 0;
+ if (priv->pcs) /* FIXME */
+ return -EOPNOTSUPP;
+
spin_lock(&priv->lock);
if (pause->rx_pause)
@@ -604,6 +726,38 @@ static int stmmac_set_coalesce(struct net_device *dev,
return 0;
}
+static int stmmac_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (priv->ptp_clock)
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+ return 0;
+ } else
+ return ethtool_op_get_ts_info(dev, info);
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -623,7 +777,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
new file mode 100644
index 00000000000..def7e75e1d5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -0,0 +1,148 @@
+/*******************************************************************************
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This implements all the API for managing HW timestamp & PTP.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "stmmac_ptp.h"
+
+static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
+{
+ writel(data, ioaddr + PTP_TCR);
+}
+
+static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + PTP_TCR);
+ unsigned long data;
+
+ /* Convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where, ptp_clock = 50MHz.
+ */
+ data = (1000000000ULL / 50000000);
+
+ /* 0.465ns accuracy */
+ if (value & PTP_TCR_TSCTRLSSR)
+ data = (data * 100) / 465;
+
+ writel(data, ioaddr + PTP_SSIR);
+}
+
+static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+{
+ int limit;
+ u32 value;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(nsec, ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSINIT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time initialize to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
+{
+ u32 value;
+ int limit;
+
+ writel(addend, ioaddr + PTP_TAR);
+ /* issue command to update the addend value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSADDREG;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present addend update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub)
+{
+ u32 value;
+ int limit;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
+ ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSUPDT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time adjust/update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u64 stmmac_get_systime(void __iomem *ioaddr)
+{
+ u64 ns;
+
+ ns = readl(ioaddr + PTP_STNSR);
+ /* convert sec time value to nanosecond */
+ ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+
+ return ns;
+}
+
+const struct stmmac_hwtimestamp stmmac_ptp = {
+ .config_hw_tstamping = stmmac_config_hw_tstamping,
+ .init_systime = stmmac_init_systime,
+ .config_sub_second_increment = stmmac_config_sub_second_increment,
+ .config_addend = stmmac_config_addend,
+ .adjust_systime = stmmac_adjust_systime,
+ .get_systime = stmmac_get_systime,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 39c6c552463..6b26d31c268 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -47,6 +47,8 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
+#include <linux/net_tstamp.h>
+#include "stmmac_ptp.h"
#include "stmmac.h"
#undef STMMAC_DEBUG
@@ -130,6 +132,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+/* By default the driver will use the ring mode to manage tx and rx descriptors
+ * but passing this value so user can force to use the chain instead of the ring
+ */
+static unsigned int chain_mode;
+module_param(chain_mode, int, S_IRUGO);
+MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -304,6 +313,339 @@ static void stmmac_eee_adjust(struct stmmac_priv *priv)
priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
}
+/* stmmac_get_tx_hwtstamp:
+ * @priv : pointer to private device structure.
+ * @entry : descriptor index to be used.
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the descriptor & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
+ unsigned int entry,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamp;
+ u64 ns;
+ void *desc = NULL;
+
+ if (!priv->hwts_tx_en)
+ return;
+
+ /* if skb doesn't support hw tstamp */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ return;
+
+ if (priv->adv_ts)
+ desc = (priv->dma_etx + entry);
+ else
+ desc = (priv->dma_tx + entry);
+
+ /* check tx tstamp status */
+ if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
+ return;
+
+ /* get the valid tstamp */
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+
+ return;
+}
+
+/* stmmac_get_rx_hwtstamp:
+ * @priv : pointer to private device structure.
+ * @entry : descriptor index to be used.
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
+ unsigned int entry,
+ struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ u64 ns;
+ void *desc = NULL;
+
+ if (!priv->hwts_rx_en)
+ return;
+
+ if (priv->adv_ts)
+ desc = (priv->dma_erx + entry);
+ else
+ desc = (priv->dma_rx + entry);
+
+ /* if rx tstamp is not valid */
+ if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
+ return;
+
+ /* get valid tstamp */
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+}
+
+/**
+ * stmmac_hwtstamp_ioctl - control hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function configures the MAC to enable/disable both outgoing(TX)
+ * and incoming(RX) packets time stamping based on user input.
+ * Return Value:
+ * 0 on success and an appropriate -ve integer on failure.
+ */
+static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+ struct timespec now;
+ u64 temp = 0;
+ u32 ptp_v2 = 0;
+ u32 tstamp_all = 0;
+ u32 ptp_over_ipv4_udp = 0;
+ u32 ptp_over_ipv6_udp = 0;
+ u32 ptp_over_ethernet = 0;
+ u32 snap_type_sel = 0;
+ u32 ts_master_en = 0;
+ u32 ts_event_en = 0;
+ u32 value = 0;
+
+ if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
+ netdev_alert(priv->dev, "No support for HW time stamping\n");
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data,
+ sizeof(struct hwtstamp_config)))
+ return -EFAULT;
+
+ pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->hwts_tx_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->hwts_tx_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (priv->adv_ts) {
+ switch (config.rx_filter) {
+ /* time stamp no incoming packet at all */
+ case HWTSTAMP_FILTER_NONE:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ /* PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ /* PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ /* PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ /* time stamp any incoming packet */
+ case HWTSTAMP_FILTER_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_all = PTP_TCR_TSENALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+ } else {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ default:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ }
+ }
+ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+
+ if (!priv->hwts_tx_en && !priv->hwts_rx_en)
+ priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
+ else {
+ value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
+ tstamp_all | ptp_v2 | ptp_over_ethernet |
+ ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel);
+
+ priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
+
+ /* program Sub Second Increment reg */
+ priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
+ * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
+ * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
+ * achive 20ns accuracy.
+ *
+ * 2^x * y == (y << x), hence
+ * 2^32 * 50000000 ==> (50000000 << 32)
+ */
+ temp = (u64)(50000000ULL << 32);
+ priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
+ priv->hw->ptp->config_addend(priv->ioaddr,
+ priv->default_addend);
+
+ /* initialize system time */
+ getnstimeofday(&now);
+ priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
+ now.tv_nsec);
+ }
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int stmmac_init_ptp(struct stmmac_priv *priv)
+{
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ if (netif_msg_hw(priv)) {
+ if (priv->dma_cap.time_stamp) {
+ pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+ priv->adv_ts = 0;
+ }
+ if (priv->dma_cap.atime_stamp && priv->extend_desc) {
+ pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
+ priv->adv_ts = 1;
+ }
+ }
+
+ priv->hw->ptp = &stmmac_ptp;
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return stmmac_ptp_register(priv);
+}
+
+static void stmmac_release_ptp(struct stmmac_priv *priv)
+{
+ stmmac_ptp_unregister(priv);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -398,6 +740,24 @@ static void stmmac_adjust_link(struct net_device *dev)
DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
}
+static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+{
+ int interface = priv->plat->interface;
+
+ if (priv->dma_cap.pcs) {
+ if ((interface & PHY_INTERFACE_MODE_RGMII) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_ID) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (interface & PHY_INTERFACE_MODE_RGMII_TXID)) {
+ pr_debug("STMMAC: PCS RGMII support enable\n");
+ priv->pcs = STMMAC_PCS_RGMII;
+ } else if (interface & PHY_INTERFACE_MODE_SGMII) {
+ pr_debug("STMMAC: PCS SGMII support enable\n");
+ priv->pcs = STMMAC_PCS_SGMII;
+ }
+ }
+}
+
/**
* stmmac_init_phy - PHY initialization
* @dev: net device structure
@@ -461,29 +821,56 @@ static int stmmac_init_phy(struct net_device *dev)
}
/**
- * display_ring
+ * stmmac_display_ring
* @p: pointer to the ring.
* @size: size of the ring.
- * Description: display all the descriptors within the ring.
+ * Description: display the control/status and buffer descriptors.
*/
-static void display_ring(struct dma_desc *p, int size)
+static void stmmac_display_ring(void *head, int size, int extend_desc)
{
- struct tmp_s {
- u64 a;
- unsigned int b;
- unsigned int c;
- };
int i;
+ struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
+ struct dma_desc *p = (struct dma_desc *) head;
+
for (i = 0; i < size; i++) {
- struct tmp_s *x = (struct tmp_s *)(p + i);
- pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
- i, (unsigned int)virt_to_phys(&p[i]),
- (unsigned int)(x->a), (unsigned int)((x->a) >> 32),
- x->b, x->c);
+ u64 x;
+ if (extend_desc) {
+ x = *(u64 *) ep;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int) virt_to_phys(ep),
+ (unsigned int) x, (unsigned int) (x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ } else {
+ x = *(u64 *) p;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ i, (unsigned int) virt_to_phys(p),
+ (unsigned int) x, (unsigned int) (x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
pr_info("\n");
}
}
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ if (priv->extend_desc) {
+ pr_info("Extended RX descriptor ring:\n");
+ stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
+ pr_info("Extended TX descriptor ring:\n");
+ stmmac_display_ring((void *) priv->dma_etx, txsize, 1);
+ } else {
+ pr_info("RX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
+ pr_info("TX descriptor ring:\n");
+ stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+ }
+}
+
static int stmmac_set_bfsize(int mtu, int bufsize)
{
int ret = bufsize;
@@ -500,6 +887,59 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
return ret;
}
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+ int i;
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
+
+ /* Clear the Rx/Tx descriptors */
+ for (i = 0; i < rxsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ else
+ priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == rxsize - 1));
+ for (i = 0; i < txsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->mode,
+ (i == txsize - 1));
+ else
+ priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->mode,
+ (i == txsize - 1));
+}
+
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+ int i)
+{
+ struct sk_buff *skb;
+
+ skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (unlikely(skb == NULL)) {
+ pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ return 1;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skbuff[i] = skb;
+ priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz,
+ DMA_FROM_DEVICE);
+
+ p->des2 = priv->rx_skbuff_dma[i];
+
+ if ((priv->mode == STMMAC_RING_MODE) &&
+ (priv->dma_buf_sz == BUF_SIZE_16KiB))
+ priv->hw->ring->init_desc3(p);
+
+ return 0;
+}
+
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
@@ -511,75 +951,70 @@ static void init_dma_desc_rings(struct net_device *dev)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
- struct sk_buff *skb;
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
- unsigned int bfsize;
- int dis_ic = 0;
- int des3_as_data_buf = 0;
+ unsigned int bfsize = 0;
/* Set the max buffer size according to the DESC mode
* and the MTU. Note that RING mode allows 16KiB bsize. */
- bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+ if (priv->mode == STMMAC_RING_MODE)
+ bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
- if (bfsize == BUF_SIZE_16KiB)
- des3_as_data_buf = 1;
- else
+ if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
txsize, rxsize, bfsize);
- priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
- GFP_KERNEL);
- priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- priv->dma_rx =
- (struct dma_desc *)dma_alloc_coherent(priv->device,
- rxsize *
+ if (priv->extend_desc) {
+ priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+ sizeof(struct
+ dma_extended_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if ((!priv->dma_erx) || (!priv->dma_etx))
+ return;
+ } else {
+ priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
- priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
- GFP_KERNEL);
- priv->dma_tx =
- (struct dma_desc *)dma_alloc_coherent(priv->device,
- txsize *
+ priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
-
- if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) {
- pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
- return;
+ if ((!priv->dma_rx) || (!priv->dma_tx))
+ return;
}
- DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
- "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
- dev->name, priv->dma_rx, priv->dma_tx,
- (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+ priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (netif_msg_drv(priv))
+ pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
+ (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
/* RX INITIALIZATION */
- DBG(probe, INFO, "stmmac: SKB addresses:\n"
- "skb\t\tskb data\tdma data\n");
-
+ DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
for (i = 0; i < rxsize; i++) {
- struct dma_desc *p = priv->dma_rx + i;
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_erx + i)->basic);
+ else
+ p = priv->dma_rx + i;
- skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN,
- GFP_KERNEL);
- if (unlikely(skb == NULL)) {
- pr_err("%s: Rx init fails; skb is NULL\n", __func__);
+ if (stmmac_init_rx_buffers(priv, p, i))
break;
- }
- skb_reserve(skb, NET_IP_ALIGN);
- priv->rx_skbuff[i] = skb;
- priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
- bfsize, DMA_FROM_DEVICE);
-
- p->des2 = priv->rx_skbuff_dma[i];
-
- priv->hw->ring->init_desc3(des3_as_data_buf, p);
DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
@@ -589,32 +1024,40 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->dma_buf_sz = bfsize;
buf_sz = bfsize;
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc) {
+ priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
+ rxsize, 1);
+ priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
+ txsize, 1);
+ } else {
+ priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
+ rxsize, 0);
+ priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
+ txsize, 0);
+ }
+ }
+
/* TX INITIALIZATION */
for (i = 0; i < txsize; i++) {
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+ p->des2 = 0;
+ priv->tx_skbuff_dma[i] = 0;
priv->tx_skbuff[i] = NULL;
- priv->dma_tx[i].des2 = 0;
}
- /* In case of Chained mode this sets the des3 to the next
- * element in the chain */
- priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
- priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
-
priv->dirty_tx = 0;
priv->cur_tx = 0;
- if (priv->use_riwt)
- dis_ic = 1;
- /* Clear the Rx/Tx descriptors */
- priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
- priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
+ stmmac_clear_descriptors(priv);
- if (netif_msg_hw(priv)) {
- pr_info("RX descriptor ring:\n");
- display_ring(priv->dma_rx, rxsize);
- pr_info("TX descriptor ring:\n");
- display_ring(priv->dma_tx, txsize);
- }
+ if (netif_msg_hw(priv))
+ stmmac_display_rings(priv);
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -637,13 +1080,20 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
for (i = 0; i < priv->dma_tx_size; i++) {
if (priv->tx_skbuff[i] != NULL) {
- struct dma_desc *p = priv->dma_tx + i;
- if (p->des2)
- dma_unmap_single(priv->device, p->des2,
+ struct dma_desc *p;
+ if (priv->extend_desc)
+ p = &((priv->dma_etx + i)->basic);
+ else
+ p = priv->dma_tx + i;
+
+ if (priv->tx_skbuff_dma[i])
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[i],
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
+ priv->tx_skbuff_dma[i] = 0;
}
}
}
@@ -656,14 +1106,24 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
/* Free the region of consistent memory previously allocated for
* the DMA */
- dma_free_coherent(priv->device,
- priv->dma_tx_size * sizeof(struct dma_desc),
- priv->dma_tx, priv->dma_tx_phy);
- dma_free_coherent(priv->device,
- priv->dma_rx_size * sizeof(struct dma_desc),
- priv->dma_rx, priv->dma_rx_phy);
+ if (!priv->extend_desc) {
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ } else {
+ dma_free_coherent(priv->device, priv->dma_tx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_etx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ }
kfree(priv->rx_skbuff_dma);
kfree(priv->rx_skbuff);
+ kfree(priv->tx_skbuff_dma);
kfree(priv->tx_skbuff);
}
@@ -708,13 +1168,18 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
int last;
unsigned int entry = priv->dirty_tx % txsize;
struct sk_buff *skb = priv->tx_skbuff[entry];
- struct dma_desc *p = priv->dma_tx + entry;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *) (priv->dma_etx + entry);
+ else
+ p = priv->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
if (priv->hw->desc->get_tx_owner(p))
break;
- /* Verify tx error by looking at the last segment */
+ /* Verify tx error by looking at the last segment. */
last = priv->hw->desc->get_tx_ls(p);
if (likely(last)) {
int tx_error =
@@ -726,22 +1191,27 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->xstats.tx_pkt_n++;
} else
priv->dev->stats.tx_errors++;
+
+ stmmac_get_tx_hwtstamp(priv, entry, skb);
}
TX_DBG("%s: curr %d, dirty %d\n", __func__,
priv->cur_tx, priv->dirty_tx);
- if (likely(p->des2))
- dma_unmap_single(priv->device, p->des2,
+ if (likely(priv->tx_skbuff_dma[entry])) {
+ dma_unmap_single(priv->device,
+ priv->tx_skbuff_dma[entry],
priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
- priv->hw->ring->clean_desc3(p);
+ priv->tx_skbuff_dma[entry] = 0;
+ }
+ priv->hw->ring->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
priv->tx_skbuff[entry] = NULL;
}
- priv->hw->desc->release_tx_desc(p);
+ priv->hw->desc->release_tx_desc(p, priv->mode);
priv->dirty_tx++;
}
@@ -782,11 +1252,21 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
*/
static void stmmac_tx_err(struct stmmac_priv *priv)
{
+ int i;
+ int txsize = priv->dma_tx_size;
netif_stop_queue(priv->dev);
priv->hw->dma->stop_tx(priv->ioaddr);
dma_free_tx_skbufs(priv);
- priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ for (i = 0; i < txsize; i++)
+ if (priv->extend_desc)
+ priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
+ priv->mode,
+ (i == txsize - 1));
+ else
+ priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
+ priv->mode,
+ (i == txsize - 1));
priv->dirty_tx = 0;
priv->cur_tx = 0;
priv->hw->dma->start_tx(priv->ioaddr);
@@ -860,6 +1340,14 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
if (priv->plat->enh_desc) {
pr_info(" Enhanced/Alternate descriptors\n");
+
+ /* GMAC older than 3.50 has no extended descriptors */
+ if (priv->synopsys_id >= DWMAC_CORE_3_50) {
+ pr_info("\tEnabled extended descriptors\n");
+ priv->extend_desc = 1;
+ } else
+ pr_warn("Extended descriptors not supported\n");
+
priv->hw->desc = &enh_desc_ops;
} else {
pr_info(" Normal descriptors\n");
@@ -946,6 +1434,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
int mixed_burst = 0;
+ int atds = 0;
/* Some DMA parameters can be passed from the platform;
* in case of these are not passed we keep a default
@@ -957,9 +1446,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
burst_len = priv->plat->dma_cfg->burst_len;
}
+ if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+ atds = 1;
+
return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
burst_len, priv->dma_tx_phy,
- priv->dma_rx_phy);
+ priv->dma_rx_phy, atds);
}
/**
@@ -1012,10 +1504,13 @@ static int stmmac_open(struct net_device *dev)
stmmac_check_ether_addr(priv);
- ret = stmmac_init_phy(dev);
- if (unlikely(ret)) {
- pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
- goto open_error;
+ if (!priv->pcs) {
+ ret = stmmac_init_phy(dev);
+ if (ret) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto open_error;
+ }
}
/* Create and initialize the TX/RX descriptors chains. */
@@ -1084,6 +1579,10 @@ static int stmmac_open(struct net_device *dev)
stmmac_mmc_setup(priv);
+ ret = stmmac_init_ptp(priv);
+ if (ret)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+
#ifdef CONFIG_STMMAC_DEBUG_FS
ret = stmmac_init_fs(dev);
if (ret < 0)
@@ -1104,7 +1603,12 @@ static int stmmac_open(struct net_device *dev)
phy_start(priv->phydev);
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
- priv->eee_enabled = stmmac_eee_init(priv);
+
+ /* Using PCS we cannot dial with the phy registers at this stage
+ * so we do not support extra feature like EEE.
+ */
+ if (!priv->pcs)
+ priv->eee_enabled = stmmac_eee_init(priv);
stmmac_init_tx_coalesce(priv);
@@ -1113,6 +1617,9 @@ static int stmmac_open(struct net_device *dev)
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
}
+ if (priv->pcs && priv->hw->mac->ctrl_ane)
+ priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
+
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1184,6 +1691,8 @@ static int stmmac_release(struct net_device *dev)
#endif
clk_disable_unprepare(priv->stmmac_clk);
+ stmmac_release_ptp(priv);
+
return 0;
}
@@ -1198,7 +1707,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size;
unsigned int entry;
- int i, csum_insertion = 0;
+ int i, csum_insertion = 0, is_jumbo = 0;
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb);
@@ -1233,7 +1742,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
- desc = priv->dma_tx + entry;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *) (priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
+
first = desc;
#ifdef STMMAC_XMIT_DEBUG
@@ -1244,28 +1757,46 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
priv->tx_skbuff[entry] = skb;
- if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) {
- entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion);
- desc = priv->dma_tx + entry;
+ /* To program the descriptors according to the size of the frame */
+ if (priv->mode == STMMAC_RING_MODE) {
+ is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
+ priv->plat->enh_desc);
+ if (unlikely(is_jumbo))
+ entry = priv->hw->ring->jumbo_frm(priv, skb,
+ csum_insertion);
} else {
+ is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
+ priv->plat->enh_desc);
+ if (unlikely(is_jumbo))
+ entry = priv->hw->chain->jumbo_frm(priv, skb,
+ csum_insertion);
+ }
+ if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
- }
+ csum_insertion, priv->mode);
+ } else
+ desc = first;
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
int len = skb_frag_size(frag);
entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *) (priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
+ priv->tx_skbuff_dma[entry] = desc->des2;
priv->tx_skbuff[entry] = NULL;
- priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
+ priv->mode);
wmb();
priv->hw->desc->set_tx_owner(desc);
wmb();
@@ -1302,7 +1833,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
"first=%p, nfrags=%d\n",
(priv->cur_tx % txsize), (priv->dirty_tx % txsize),
entry, first, nfrags);
- display_ring(priv->dma_tx, txsize);
+ if (priv->extend_desc)
+ stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
+ else
+ stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
+
pr_info(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
}
@@ -1314,7 +1849,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- skb_tx_timestamp(skb);
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ if (!priv->hwts_tx_en)
+ skb_tx_timestamp(skb);
priv->hw->dma->enable_dma_transmission(priv->ioaddr);
@@ -1327,10 +1870,16 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
{
unsigned int rxsize = priv->dma_rx_size;
int bfsize = priv->dma_buf_sz;
- struct dma_desc *p = priv->dma_rx;
for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
unsigned int entry = priv->dirty_rx % rxsize;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *) (priv->dma_erx + entry);
+ else
+ p = priv->dma_rx + entry;
+
if (likely(priv->rx_skbuff[entry] == NULL)) {
struct sk_buff *skb;
@@ -1344,15 +1893,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
dma_map_single(priv->device, skb->data, bfsize,
DMA_FROM_DEVICE);
- (p + entry)->des2 = priv->rx_skbuff_dma[entry];
+ p->des2 = priv->rx_skbuff_dma[entry];
- if (unlikely(priv->plat->has_gmac))
- priv->hw->ring->refill_desc3(bfsize, p + entry);
+ priv->hw->ring->refill_desc3(priv, p);
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
wmb();
- priv->hw->desc->set_rx_owner(p + entry);
+ priv->hw->desc->set_rx_owner(p);
wmb();
}
}
@@ -1363,33 +1911,61 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
unsigned int entry = priv->cur_rx % rxsize;
unsigned int next_entry;
unsigned int count = 0;
- struct dma_desc *p = priv->dma_rx + entry;
- struct dma_desc *p_next;
#ifdef STMMAC_RX_DEBUG
if (netif_msg_hw(priv)) {
pr_debug(">>> stmmac_rx: descriptor ring:\n");
- display_ring(priv->dma_rx, rxsize);
+ if (priv->extend_desc)
+ stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
+ else
+ stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
}
#endif
- while (!priv->hw->desc->get_rx_owner(p)) {
+ while (count < limit) {
int status;
+ struct dma_desc *p, *p_next;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *) (priv->dma_erx + entry);
+ else
+ p = priv->dma_rx + entry ;
- if (count >= limit)
+ if (priv->hw->desc->get_rx_owner(p))
break;
count++;
next_entry = (++priv->cur_rx) % rxsize;
- p_next = priv->dma_rx + next_entry;
+ if (priv->extend_desc)
+ p_next = (struct dma_desc *) (priv->dma_erx +
+ next_entry);
+ else
+ p_next = priv->dma_rx + next_entry;
+
prefetch(p_next);
/* read the status of the incoming frame */
- status = (priv->hw->desc->rx_status(&priv->dev->stats,
- &priv->xstats, p));
- if (unlikely(status == discard_frame))
+ status = priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p);
+ if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
+ priv->hw->desc->rx_extended_status(&priv->dev->stats,
+ &priv->xstats,
+ priv->dma_erx +
+ entry);
+ if (unlikely(status == discard_frame)) {
priv->dev->stats.rx_errors++;
- else {
+ if (priv->hwts_rx_en && !priv->extend_desc) {
+ /* DESC2 & DESC3 will be overwitten by device
+ * with timestamp value, hence reinitialize
+ * them in stmmac_rx_refill() function so that
+ * device can reuse it.
+ */
+ priv->rx_skbuff[entry] = NULL;
+ dma_unmap_single(priv->device,
+ priv->rx_skbuff_dma[entry],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ }
+ } else {
struct sk_buff *skb;
int frame_len;
@@ -1418,6 +1994,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
prefetch(skb->data - NET_IP_ALIGN);
priv->rx_skbuff[entry] = NULL;
+ stmmac_get_rx_hwtstamp(priv, entry, skb);
+
skb_put(skb, frame_len);
dma_unmap_single(priv->device,
priv->rx_skbuff_dma[entry],
@@ -1441,7 +2019,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
priv->dev->stats.rx_bytes += frame_len;
}
entry = next_entry;
- p = p_next; /* use prefetched values */
}
stmmac_rx_refill(priv);
@@ -1604,30 +2181,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
/* To handle GMAC own interrupts */
if (priv->plat->has_gmac) {
int status = priv->hw->mac->host_irq_status((void __iomem *)
- dev->base_addr);
+ dev->base_addr,
+ &priv->xstats);
if (unlikely(status)) {
- if (status & core_mmc_tx_irq)
- priv->xstats.mmc_tx_irq_n++;
- if (status & core_mmc_rx_irq)
- priv->xstats.mmc_rx_irq_n++;
- if (status & core_mmc_rx_csum_offload_irq)
- priv->xstats.mmc_rx_csum_offload_irq_n++;
- if (status & core_irq_receive_pmt_irq)
- priv->xstats.irq_receive_pmt_irq_n++;
-
/* For LPI we need to save the tx status */
- if (status & core_irq_tx_path_in_lpi_mode) {
- priv->xstats.irq_tx_path_in_lpi_mode_n++;
+ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
priv->tx_path_in_lpi_mode = true;
- }
- if (status & core_irq_tx_path_exit_lpi_mode) {
- priv->xstats.irq_tx_path_exit_lpi_mode_n++;
+ if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
- }
- if (status & core_irq_rx_path_in_lpi_mode)
- priv->xstats.irq_rx_path_in_lpi_mode_n++;
- if (status & core_irq_rx_path_exit_lpi_mode)
- priv->xstats.irq_rx_path_exit_lpi_mode_n++;
}
}
@@ -1655,21 +2216,30 @@ static void stmmac_poll_controller(struct net_device *dev)
* a proprietary structure used to pass information to the driver.
* @cmd: IOCTL command
* Description:
- * Currently there are no special functionality supported in IOCTL, just the
- * phy_mii_ioctl(...) can be invoked.
+ * Currently it supports just the phy_mii_ioctl(...) and HW time stamping.
*/
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct stmmac_priv *priv = netdev_priv(dev);
- int ret;
+ int ret = -EOPNOTSUPP;
if (!netif_running(dev))
return -EINVAL;
- if (!priv->phydev)
- return -EINVAL;
-
- ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ return -EINVAL;
+ ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+ case SIOCSHWTSTAMP:
+ ret = stmmac_hwtstamp_ioctl(dev, rq);
+ break;
+ default:
+ break;
+ }
return ret;
}
@@ -1679,40 +2249,51 @@ static struct dentry *stmmac_fs_dir;
static struct dentry *stmmac_rings_status;
static struct dentry *stmmac_dma_cap;
-static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+static void sysfs_display_ring(void *head, int size, int extend_desc,
+ struct seq_file *seq)
{
- struct tmp_s {
- u64 a;
- unsigned int b;
- unsigned int c;
- };
int i;
- struct net_device *dev = seq->private;
- struct stmmac_priv *priv = netdev_priv(dev);
-
- seq_printf(seq, "=======================\n");
- seq_printf(seq, " RX descriptor ring\n");
- seq_printf(seq, "=======================\n");
+ struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
+ struct dma_desc *p = (struct dma_desc *) head;
- for (i = 0; i < priv->dma_rx_size; i++) {
- struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i);
- seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
- i, (unsigned int)(x->a),
- (unsigned int)((x->a) >> 32), x->b, x->c);
+ for (i = 0; i < size; i++) {
+ u64 x;
+ if (extend_desc) {
+ x = *(u64 *) ep;
+ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int) virt_to_phys(ep),
+ (unsigned int) x, (unsigned int) (x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ } else {
+ x = *(u64 *) p;
+ seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int) virt_to_phys(ep),
+ (unsigned int) x, (unsigned int) (x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
seq_printf(seq, "\n");
}
+}
- seq_printf(seq, "\n");
- seq_printf(seq, "=======================\n");
- seq_printf(seq, " TX descriptor ring\n");
- seq_printf(seq, "=======================\n");
+static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int txsize = priv->dma_tx_size;
+ unsigned int rxsize = priv->dma_rx_size;
- for (i = 0; i < priv->dma_tx_size; i++) {
- struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i);
- seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x",
- i, (unsigned int)(x->a),
- (unsigned int)((x->a) >> 32), x->b, x->c);
- seq_printf(seq, "\n");
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended RX descriptor ring:\n");
+ sysfs_display_ring((void *) priv->dma_erx, rxsize, 1, seq);
+ seq_printf(seq, "Extended TX descriptor ring:\n");
+ sysfs_display_ring((void *) priv->dma_etx, txsize, 1, seq);
+ } else {
+ seq_printf(seq, "RX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
+ seq_printf(seq, "TX descriptor ring:\n");
+ sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
}
return 0;
@@ -1877,7 +2458,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
*/
static int stmmac_hw_init(struct stmmac_priv *priv)
{
- int ret = 0;
+ int ret;
struct mac_device_info *mac;
/* Identify the MAC HW device */
@@ -1892,12 +2473,23 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->hw = mac;
- /* To use the chained or ring mode */
- priv->hw->ring = &ring_mode_ops;
-
/* Get and dump the chip ID */
priv->synopsys_id = stmmac_get_synopsys_id(priv);
+ /* To use alternate (extended) or normal descriptor structures */
+ stmmac_selec_desc_mode(priv);
+
+ /* To use the chained or ring mode */
+ if (chain_mode) {
+ priv->hw->chain = &chain_mode_ops;
+ pr_info(" Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ } else {
+ priv->hw->ring = &ring_mode_ops;
+ pr_info(" Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ }
+
/* Get the HW capability (new GMAC newer than 3.50a) */
priv->hw_cap_support = stmmac_get_hw_features(priv);
if (priv->hw_cap_support) {
@@ -1921,9 +2513,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
} else
pr_info(" No HW DMA feature register supported");
- /* Select the enhnaced/normal descriptor structures */
- stmmac_selec_desc_mode(priv);
-
/* Enable the IPC (Checksum Offload) and check if the feature has been
* enabled during the core configuration. */
ret = priv->hw->mac->rx_ipc(priv->ioaddr);
@@ -1943,7 +2532,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
device_set_wakeup_capable(priv->device, 1);
}
- return ret;
+ return 0;
}
/**
@@ -1989,7 +2578,9 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
priv->plat->phy_addr = phyaddr;
/* Init MAC and get the capabilities */
- stmmac_hw_init(priv);
+ ret = stmmac_hw_init(priv);
+ if (ret)
+ goto error_free_netdev;
ndev->netdev_ops = &stmmac_netdev_ops;
@@ -2044,12 +2635,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
else
priv->clk_csr = priv->plat->clk_csr;
- /* MDIO bus Registration */
- ret = stmmac_mdio_register(ndev);
- if (ret < 0) {
- pr_debug("%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
- goto error_mdio_register;
+ stmmac_check_pcs_mode(priv);
+
+ if (!priv->pcs) {
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ pr_debug("%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ goto error_mdio_register;
+ }
}
return priv;
@@ -2060,6 +2655,7 @@ error_clk_get:
unregister_netdev(ndev);
error_netdev_register:
netif_napi_del(&priv->napi);
+error_free_netdev:
free_netdev(ndev);
return NULL;
@@ -2081,7 +2677,8 @@ int stmmac_dvr_remove(struct net_device *ndev)
priv->hw->dma->stop_tx(priv->ioaddr);
stmmac_set_mac(priv->ioaddr, false);
- stmmac_mdio_unregister(ndev);
+ if (!priv->pcs)
+ stmmac_mdio_unregister(ndev);
netif_carrier_off(ndev);
unregister_netdev(ndev);
free_netdev(ndev);
@@ -2093,7 +2690,6 @@ int stmmac_dvr_remove(struct net_device *ndev)
int stmmac_suspend(struct net_device *ndev)
{
struct stmmac_priv *priv = netdev_priv(ndev);
- int dis_ic = 0;
unsigned long flags;
if (!ndev || !netif_running(ndev))
@@ -2107,18 +2703,13 @@ int stmmac_suspend(struct net_device *ndev)
netif_device_detach(ndev);
netif_stop_queue(ndev);
- if (priv->use_riwt)
- dis_ic = 1;
-
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
priv->hw->dma->stop_tx(priv->ioaddr);
priv->hw->dma->stop_rx(priv->ioaddr);
- /* Clear the Rx/Tx descriptors */
- priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
- dis_ic);
- priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+
+ stmmac_clear_descriptors(priv);
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device))
@@ -2257,6 +2848,9 @@ static int __init stmmac_cmdline_opt(char *str)
} else if (!strncmp(opt, "eee_timer:", 10)) {
if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
+ } else if (!strncmp(opt, "chain_mode:", 11)) {
+ if (kstrtoint(opt + 11, 0, &chain_mode))
+ goto err;
}
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
new file mode 100644
index 00000000000..93d4beff92c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -0,0 +1,215 @@
+/*******************************************************************************
+ PTP 1588 clock using the STMMAC.
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+*******************************************************************************/
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+/**
+ * stmmac_adjust_freq
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ppb: desired period change in parts ber billion
+ *
+ * Description: this function will adjust the frequency of hardware clock.
+ */
+static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 diff, addend;
+ int neg_adj = 0;
+ u64 adj;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ addend = priv->default_addend;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+ addend = neg_adj ? (addend - diff) : (addend + diff);
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->config_addend(priv->ioaddr, addend);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_adjust_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @delta: desired change in nanoseconds
+ *
+ * Description: this function will shift/adjust the hardware clock time.
+ */
+static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 sec, nsec;
+ u32 quotient, reminder;
+ int neg_adj = 0;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_get_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: pointer to hold time/result
+ *
+ * Description: this function will read the current time from the
+ * hardware clock and store it in @ts.
+ */
+static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u64 ns;
+ u32 reminder;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ ns = priv->hw->ptp->get_systime(priv->ioaddr);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
+ ts->tv_nsec = reminder;
+
+ return 0;
+}
+
+/**
+ * stmmac_set_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: time value to set
+ *
+ * Description: this function will set the current time on the
+ * hardware clock.
+ */
+static int stmmac_set_time(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ptp_lock, flags);
+
+ priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+static int stmmac_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info stmmac_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac_ptp_clock",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .pps = 0,
+ .adjfreq = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime = stmmac_get_time,
+ .settime = stmmac_set_time,
+ .enable = stmmac_enable,
+};
+
+/**
+ * stmmac_ptp_register
+ *
+ * @ndev: net device pointer
+ *
+ * Description: this function will register the ptp clock driver
+ * to kernel. It also does some house keeping work.
+ */
+int stmmac_ptp_register(struct stmmac_priv *priv)
+{
+ spin_lock_init(&priv->ptp_lock);
+ priv->ptp_clock_ops = stmmac_ptp_clock_ops;
+
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
+ priv->device);
+ if (IS_ERR(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
+ } else
+ pr_debug("Added PTP HW clock successfully on %s\n",
+ priv->dev->name);
+
+ return 0;
+}
+
+/**
+ * stmmac_ptp_unregister
+ *
+ * @ndev: net device pointer
+ *
+ * Description: this function will remove/unregister the ptp clock driver
+ * from the kernel.
+ */
+void stmmac_ptp_unregister(struct stmmac_priv *priv)
+{
+ if (priv->ptp_clock) {
+ ptp_clock_unregister(priv->ptp_clock);
+ pr_debug("Removed PTP HW clock successfully on %s\n",
+ priv->dev->name);
+ }
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
new file mode 100644
index 00000000000..3dbc047622f
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ PTP Header file
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+******************************************************************************/
+
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
+
+#define STMMAC_SYSCLOCK 62500000
+
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x0700 /* Timestamp Control Reg */
+#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
+#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
+#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
+#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
+#define PTP_TSR 0x0728 /* Timestamp Status */
+
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+
+/* PTP TCR defines */
+#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
+/* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSTRIG 0x00000010
+#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
+#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
+/* Timestamp Digital or Binary Rollover Control */
+#define PTP_TCR_TSCTRLSSR 0x00000200
+
+/* Enable PTP packet Processing for Version 2 Format */
+#define PTP_TCR_TSVER2ENA 0x00000400
+/* Enable Processing of PTP over Ethernet Frames */
+#define PTP_TCR_TSIPENA 0x00000800
+/* Enable Processing of PTP Frames Sent over IPv6-UDP */
+#define PTP_TCR_TSIPV6ENA 0x00001000
+/* Enable Processing of PTP Frames Sent over IPv4-UDP */
+#define PTP_TCR_TSIPV4ENA 0x00002000
+/* Enable Timestamp Snapshot for Event Messages */
+#define PTP_TCR_TSEVNTENA 0x00004000
+/* Enable Snapshot for Messages Relevant to Master */
+#define PTP_TCR_TSMSTRENA 0x00008000
+/* Select PTP packets for Taking Snapshots */
+#define PTP_TCR_SNAPTYPSEL_1 0x00010000
+/* Enable MAC address for PTP Frame Filtering */
+#define PTP_TCR_TSENMACADDR 0x00040000
+
+#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index e4c1c88e4c2..95cff98d8a3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6618,7 +6618,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
(len << TXHDR_LEN_SHIFT) |
((l3off / 2) << TXHDR_L3START_SHIFT) |
(ihl << TXHDR_IHL_SHIFT) |
- ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) |
+ ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
(ipv6 ? TXHDR_IP_VER : 0) |
csum_bits);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 5fafca06530..054975939a1 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1169,10 +1169,8 @@ static int bigmac_ether_init(struct platform_device *op,
bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
PAGE_SIZE,
&bp->bblock_dvma, GFP_ATOMIC);
- if (bp->bmac_block == NULL || bp->bblock_dvma == 0) {
- printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
+ if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
goto fail_and_cleanup;
- }
/* Get the board revision of this BigMAC. */
bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index a1bff49a815..436fa9d5a07 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2752,10 +2752,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
&hp->hblock_dvma,
GFP_ATOMIC);
err = -ENOMEM;
- if (!hp->happy_block) {
- printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
+ if (!hp->happy_block)
goto err_out_iounmap;
- }
/* Force check of the link first time we are brought up. */
hp->linkcheck = 0;
@@ -3068,14 +3066,11 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->happy_bursts = DMA_BURSTBITS;
#endif
- hp->happy_block = (struct hmeal_init_block *)
- dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL);
-
+ hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &hp->hblock_dvma, GFP_KERNEL);
err = -ENODEV;
- if (!hp->happy_block) {
- printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
+ if (!hp->happy_block)
goto err_out_iounmap;
- }
hp->linkcheck = 0;
hp->timer_state = asleep;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 49bf3e2eb65..8182591bc18 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -414,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
- int elem = qep->rx_new, drops = 0;
+ int elem = qep->rx_new;
u32 flags;
this = &rxbase[elem];
@@ -436,7 +436,6 @@ static void qe_rx(struct sunqe *qep)
} else {
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
- drops++;
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
@@ -456,8 +455,6 @@ static void qe_rx(struct sunqe *qep)
this = &rxbase[elem];
}
qep->rx_new = elem;
- if (drops)
- printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
}
static void qe_tx_reclaim(struct sunqe *qep);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e15cc71b826..e8824cea093 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1102,10 +1102,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
dno = bdx_rxdb_available(db) - 1;
while (dno > 0) {
skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
- if (!skb) {
- pr_err("NO MEM: netdev_alloc_skb failed\n");
+ if (!skb)
break;
- }
+
skb_reserve(skb, NET_IP_ALIGN);
idx = bdx_rxdb_alloc_elem(db);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 80cad06e5eb..1d740423a05 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -126,6 +126,13 @@ do { \
#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
+#define CPSW_INTPACEEN (0x3f << 16)
+#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
+#define CPSW_CMINTMAX_CNT 63
+#define CPSW_CMINTMIN_CNT 2
+#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
+#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
+
#define cpsw_enable_irq(priv) \
do { \
u32 i; \
@@ -139,6 +146,10 @@ do { \
disable_irq_nosync(priv->irqs_table[i]); \
} while (0);
+#define cpsw_slave_index(priv) \
+ ((priv->data.dual_emac) ? priv->emac_port : \
+ priv->data.active_slave)
+
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@ -160,6 +171,15 @@ struct cpsw_wr_regs {
u32 rx_en;
u32 tx_en;
u32 misc_en;
+ u32 mem_allign1[8];
+ u32 rx_thresh_stat;
+ u32 rx_stat;
+ u32 tx_stat;
+ u32 misc_stat;
+ u32 mem_allign2[8];
+ u32 rx_imax;
+ u32 tx_imax;
+
};
struct cpsw_ss_regs {
@@ -314,6 +334,8 @@ struct cpsw_priv {
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
u32 version;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
struct net_device_stats stats;
int rx_packet_max;
int host_port;
@@ -612,6 +634,77 @@ static void cpsw_adjust_link(struct net_device *ndev)
}
}
+static int cpsw_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ coal->rx_coalesce_usecs = priv->coal_intvl;
+ return 0;
+}
+
+static int cpsw_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl;
+ u32 num_interrupts = 0;
+ u32 prescale = 0;
+ u32 addnl_dvdr = 1;
+ u32 coal_intvl = 0;
+
+ if (!coal->rx_coalesce_usecs)
+ return -EINVAL;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ int_ctrl = readl(&priv->wr_regs->int_control);
+ prescale = priv->bus_freq_mhz * 4;
+
+ if (coal_intvl < CPSW_CMINTMIN_INTVL)
+ coal_intvl = CPSW_CMINTMIN_INTVL;
+
+ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
+ /* Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
+ coal_intvl = (CPSW_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = CPSW_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+ writel(num_interrupts, &priv->wr_regs->rx_imax);
+ writel(num_interrupts, &priv->wr_regs->tx_imax);
+
+ int_ctrl |= CPSW_INTPACEEN;
+ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+ writel(int_ctrl, &priv->wr_regs->int_control);
+
+ cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ priv = netdev_priv(priv->slaves[i].ndev);
+ priv->coal_intvl = coal_intvl;
+ }
+ } else {
+ priv->coal_intvl = coal_intvl;
+ }
+
+ return 0;
+}
+
static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
{
static char *leader = "........................................";
@@ -834,6 +927,14 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
}
+ /* Enable Interrupt pacing if configured */
+ if (priv->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ cpsw_set_coalesce(ndev, &coal);
+ }
+
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
napi_enable(&priv->napi);
@@ -942,7 +1043,7 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
- struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+ struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
u32 ts_en, seq_id;
if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
@@ -971,7 +1072,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->data.dual_emac)
slave = &priv->slaves[priv->emac_port];
else
- slave = &priv->slaves[priv->data.cpts_active_slave];
+ slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
ctrl &= ~CTRL_ALL_TS_MASK;
@@ -1056,14 +1157,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(req);
+ int slave_no = cpsw_slave_index(priv);
+
if (!netif_running(dev))
return -EINVAL;
+ switch (cmd) {
#ifdef CONFIG_TI_CPTS
- if (cmd == SIOCSHWTSTAMP)
+ case SIOCSHWTSTAMP:
return cpsw_hwtstamp_ioctl(dev, req);
#endif
- return -ENOTSUPP;
+ case SIOCGMIIPHY:
+ data->phy_id = priv->slaves[slave_no].phy->addr;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1244,12 +1357,39 @@ static int cpsw_get_ts_info(struct net_device *ndev,
return 0;
}
+static int cpsw_get_settings(struct net_device *ndev,
+ struct ethtool_cmd *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
.set_msglevel = cpsw_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = cpsw_get_ts_info,
+ .get_settings = cpsw_get_settings,
+ .set_settings = cpsw_set_settings,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1282,12 +1422,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->slaves = prop;
- if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
- pr_err("Missing cpts_active_slave property in the DT.\n");
+ if (of_property_read_u32(node, "active_slave", &prop)) {
+ pr_err("Missing active_slave property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- data->cpts_active_slave = prop;
+ data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
pr_err("Missing cpts_clock_mult property in the DT.\n");
@@ -1437,6 +1577,9 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->slaves = priv->slaves;
priv_sl2->clk = priv->clk;
+ priv_sl2->coal_intvl = 0;
+ priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
+
priv_sl2->cpsw_res = priv->cpsw_res;
priv_sl2->regs = priv->regs;
priv_sl2->host_port = priv->host_port;
@@ -1546,6 +1689,8 @@ static int cpsw_probe(struct platform_device *pdev)
ret = -ENODEV;
goto clean_slave_ret;
}
+ priv->coal_intvl = 0;
+ priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!priv->cpsw_res) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 72300bc9e37..6a0b47715a8 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
* Polled functionality used by netconsole and others in non interrupt mode
*
*/
-void emac_poll_controller(struct net_device *ndev)
+static void emac_poll_controller(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
@@ -1865,21 +1865,18 @@ static int davinci_emac_probe(struct platform_device *pdev)
/* obtain emac clock from kernel */
- emac_clk = clk_get(&pdev->dev, NULL);
+ emac_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(emac_clk)) {
dev_err(&pdev->dev, "failed to get EMAC clock\n");
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
- clk_put(emac_clk);
/* TODO: Probe PHY here if possible */
ndev = alloc_etherdev(sizeof(struct emac_priv));
- if (!ndev) {
- rc = -ENOMEM;
- goto no_ndev;
- }
+ if (!ndev)
+ return -ENOMEM;
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
@@ -1893,7 +1890,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
rc = -ENODEV;
- goto probe_quit;
+ goto no_pdata;
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1913,23 +1910,23 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (!res) {
dev_err(&pdev->dev,"error getting res\n");
rc = -ENOENT;
- goto probe_quit;
+ goto no_pdata;
}
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = resource_size(res);
- if (!request_mem_region(res->start, size, ndev->name)) {
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ size, ndev->name)) {
dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
rc = -ENXIO;
- goto probe_quit;
+ goto no_pdata;
}
- priv->remap_addr = ioremap(res->start, size);
+ priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size);
if (!priv->remap_addr) {
dev_err(&pdev->dev, "unable to map IO\n");
rc = -ENOMEM;
- release_mem_region(res->start, size);
- goto probe_quit;
+ goto no_pdata;
}
priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
ndev->base_addr = (unsigned long)priv->remap_addr;
@@ -1962,7 +1959,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (!priv->dma) {
dev_err(&pdev->dev, "error initializing DMA\n");
rc = -ENOMEM;
- goto no_dma;
+ goto no_pdata;
}
priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
@@ -1971,14 +1968,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
emac_rx_handler);
if (WARN_ON(!priv->txchan || !priv->rxchan)) {
rc = -ENOMEM;
- goto no_irq_res;
+ goto no_cpdma_chan;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev, "error getting irq res\n");
rc = -ENOENT;
- goto no_irq_res;
+ goto no_cpdma_chan;
}
ndev->irq = res->start;
@@ -2000,7 +1997,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
- goto no_irq_res;
+ goto no_cpdma_chan;
}
@@ -2015,20 +2012,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
return 0;
-no_irq_res:
+no_cpdma_chan:
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
if (priv->rxchan)
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
-no_dma:
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- iounmap(priv->remap_addr);
-
-probe_quit:
+no_pdata:
free_netdev(ndev);
-no_ndev:
return rc;
}
@@ -2041,14 +2032,12 @@ no_ndev:
*/
static int davinci_emac_remove(struct platform_device *pdev)
{
- struct resource *res;
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_priv *priv = netdev_priv(ndev);
dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
platform_set_drvdata(pdev, NULL);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
@@ -2056,10 +2045,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_chan_destroy(priv->rxchan);
cpdma_ctlr_destroy(priv->dma);
- release_mem_region(res->start, resource_size(res));
-
unregister_netdev(ndev);
- iounmap(priv->remap_addr);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 22725386c5d..bdda36f8e54 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1911,10 +1911,8 @@ static void tlan_reset_lists(struct net_device *dev)
list->frame_size = TLAN_MAX_FRAME_SIZE;
list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
- if (!skb) {
- netdev_err(dev, "Out of memory for received data\n");
+ if (!skb)
break;
- }
list->buffer[0].address = pci_map_single(priv->pci_dev,
skb->data,
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 445c0595c99..ad32af67e61 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -58,13 +58,6 @@ MODULE_DESCRIPTION("Gelic Network driver");
MODULE_LICENSE("GPL");
-static inline void gelic_card_enable_rxdmac(struct gelic_card *card);
-static inline void gelic_card_disable_rxdmac(struct gelic_card *card);
-static inline void gelic_card_disable_txdmac(struct gelic_card *card);
-static inline void gelic_card_reset_chain(struct gelic_card *card,
- struct gelic_descr_chain *chain,
- struct gelic_descr *start_descr);
-
/* set irq_mask */
int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
{
@@ -78,12 +71,12 @@ int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
return status;
}
-static inline void gelic_card_rx_irq_on(struct gelic_card *card)
+static void gelic_card_rx_irq_on(struct gelic_card *card)
{
card->irq_mask |= GELIC_CARD_RXINT;
gelic_card_set_irq_mask(card, card->irq_mask);
}
-static inline void gelic_card_rx_irq_off(struct gelic_card *card)
+static void gelic_card_rx_irq_off(struct gelic_card *card)
{
card->irq_mask &= ~GELIC_CARD_RXINT;
gelic_card_set_irq_mask(card, card->irq_mask);
@@ -127,6 +120,120 @@ static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
return 0;
}
+/**
+ * gelic_card_disable_txdmac - disables the transmit DMA controller
+ * @card: card structure
+ *
+ * gelic_card_disable_txdmac terminates processing on the DMA controller by
+ * turing off DMA and issuing a force end
+ */
+static void gelic_card_disable_txdmac(struct gelic_card *card)
+{
+ int status;
+
+ /* this hvc blocks until the DMA in progress really stopped */
+ status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_stop_tx_dma failed, status=%d\n", status);
+}
+
+/**
+ * gelic_card_enable_rxdmac - enables the receive DMA controller
+ * @card: card structure
+ *
+ * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
+ * in the GDADMACCNTR register
+ */
+static void gelic_card_enable_rxdmac(struct gelic_card *card)
+{
+ int status;
+
+#ifdef DEBUG
+ if (gelic_descr_get_status(card->rx_chain.head) !=
+ GELIC_DESCR_DMA_CARDOWNED) {
+ printk(KERN_ERR "%s: status=%x\n", __func__,
+ be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
+ printk(KERN_ERR "%s: nextphy=%x\n", __func__,
+ be32_to_cpu(card->rx_chain.head->next_descr_addr));
+ printk(KERN_ERR "%s: head=%p\n", __func__,
+ card->rx_chain.head);
+ }
+#endif
+ status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
+ card->rx_chain.head->bus_addr, 0);
+ if (status)
+ dev_info(ctodev(card),
+ "lv1_net_start_rx_dma failed, status=%d\n", status);
+}
+
+/**
+ * gelic_card_disable_rxdmac - disables the receive DMA controller
+ * @card: card structure
+ *
+ * gelic_card_disable_rxdmac terminates processing on the DMA controller by
+ * turing off DMA and issuing a force end
+ */
+static void gelic_card_disable_rxdmac(struct gelic_card *card)
+{
+ int status;
+
+ /* this hvc blocks until the DMA in progress really stopped */
+ status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
+ if (status)
+ dev_err(ctodev(card),
+ "lv1_net_stop_rx_dma failed, %d\n", status);
+}
+
+/**
+ * gelic_descr_set_status -- sets the status of a descriptor
+ * @descr: descriptor to change
+ * @status: status to set in the descriptor
+ *
+ * changes the status to the specified value. Doesn't change other bits
+ * in the status
+ */
+static void gelic_descr_set_status(struct gelic_descr *descr,
+ enum gelic_descr_dma_status status)
+{
+ descr->dmac_cmd_status = cpu_to_be32(status |
+ (be32_to_cpu(descr->dmac_cmd_status) &
+ ~GELIC_DESCR_DMA_STAT_MASK));
+ /*
+ * dma_cmd_status field is used to indicate whether the descriptor
+ * is valid or not.
+ * Usually caller of this function wants to inform that to the
+ * hardware, so we assure here the hardware sees the change.
+ */
+ wmb();
+}
+
+/**
+ * gelic_card_reset_chain - reset status of a descriptor chain
+ * @card: card structure
+ * @chain: address of chain
+ * @start_descr: address of descriptor array
+ *
+ * Reset the status of dma descriptors to ready state
+ * and re-initialize the hardware chain for later use
+ */
+static void gelic_card_reset_chain(struct gelic_card *card,
+ struct gelic_descr_chain *chain,
+ struct gelic_descr *start_descr)
+{
+ struct gelic_descr *descr;
+
+ for (descr = start_descr; start_descr != descr->next; descr++) {
+ gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
+ descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
+ }
+
+ chain->head = start_descr;
+ chain->tail = (descr - 1);
+
+ (descr - 1)->next_descr_addr = 0;
+}
+
void gelic_card_up(struct gelic_card *card)
{
pr_debug("%s: called\n", __func__);
@@ -183,29 +290,6 @@ gelic_descr_get_status(struct gelic_descr *descr)
}
/**
- * gelic_descr_set_status -- sets the status of a descriptor
- * @descr: descriptor to change
- * @status: status to set in the descriptor
- *
- * changes the status to the specified value. Doesn't change other bits
- * in the status
- */
-static void gelic_descr_set_status(struct gelic_descr *descr,
- enum gelic_descr_dma_status status)
-{
- descr->dmac_cmd_status = cpu_to_be32(status |
- (be32_to_cpu(descr->dmac_cmd_status) &
- ~GELIC_DESCR_DMA_STAT_MASK));
- /*
- * dma_cmd_status field is used to indicate whether the descriptor
- * is valid or not.
- * Usually caller of this function wants to inform that to the
- * hardware, so we assure here the hardware sees the change.
- */
- wmb();
-}
-
-/**
* gelic_card_free_chain - free descriptor chain
* @card: card structure
* @descr_in: address of desc
@@ -286,31 +370,6 @@ iommu_error:
}
/**
- * gelic_card_reset_chain - reset status of a descriptor chain
- * @card: card structure
- * @chain: address of chain
- * @start_descr: address of descriptor array
- *
- * Reset the status of dma descriptors to ready state
- * and re-initialize the hardware chain for later use
- */
-static void gelic_card_reset_chain(struct gelic_card *card,
- struct gelic_descr_chain *chain,
- struct gelic_descr *start_descr)
-{
- struct gelic_descr *descr;
-
- for (descr = start_descr; start_descr != descr->next; descr++) {
- gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
- descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
- }
-
- chain->head = start_descr;
- chain->tail = (descr - 1);
-
- (descr - 1)->next_descr_addr = 0;
-}
-/**
* gelic_descr_prepare_rx - reinitializes a rx descriptor
* @card: card structure
* @descr: descriptor to re-init
@@ -599,71 +658,6 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/**
- * gelic_card_enable_rxdmac - enables the receive DMA controller
- * @card: card structure
- *
- * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
- * in the GDADMACCNTR register
- */
-static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
-{
- int status;
-
-#ifdef DEBUG
- if (gelic_descr_get_status(card->rx_chain.head) !=
- GELIC_DESCR_DMA_CARDOWNED) {
- printk(KERN_ERR "%s: status=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
- printk(KERN_ERR "%s: nextphy=%x\n", __func__,
- be32_to_cpu(card->rx_chain.head->next_descr_addr));
- printk(KERN_ERR "%s: head=%p\n", __func__,
- card->rx_chain.head);
- }
-#endif
- status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
- card->rx_chain.head->bus_addr, 0);
- if (status)
- dev_info(ctodev(card),
- "lv1_net_start_rx_dma failed, status=%d\n", status);
-}
-
-/**
- * gelic_card_disable_rxdmac - disables the receive DMA controller
- * @card: card structure
- *
- * gelic_card_disable_rxdmac terminates processing on the DMA controller by
- * turing off DMA and issuing a force end
- */
-static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
-{
- int status;
-
- /* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
- if (status)
- dev_err(ctodev(card),
- "lv1_net_stop_rx_dma failed, %d\n", status);
-}
-
-/**
- * gelic_card_disable_txdmac - disables the transmit DMA controller
- * @card: card structure
- *
- * gelic_card_disable_txdmac terminates processing on the DMA controller by
- * turing off DMA and issuing a force end
- */
-static inline void gelic_card_disable_txdmac(struct gelic_card *card)
-{
- int status;
-
- /* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
- if (status)
- dev_err(ctodev(card),
- "lv1_net_stop_tx_dma failed, status=%d\n", status);
-}
-
-/**
* gelic_net_stop - called upon ifconfig down
* @netdev: interface device structure
*
@@ -746,7 +740,7 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
}
}
-static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
+static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
unsigned short tag)
{
struct vlan_ethhdr *veth;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f1b91fd7e41..fef6b59e69c 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card,
alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
- &chain->dma_addr, GFP_KERNEL);
-
+ &chain->dma_addr, GFP_KERNEL);
if (!chain->hwring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 8fa947a2d92..3c69a046083 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,27 +1308,16 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_alloc_coherent(NULL, rxring_size,
- &data->rxdma, GFP_KERNEL);
-
- if (!data->rxring) {
- printk(KERN_DEBUG
- "TSI108_ETH: failed to allocate memory for rxring!\n");
+ data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!data->rxring)
return -ENOMEM;
- } else {
- memset(data->rxring, 0, rxring_size);
- }
-
- data->txring = dma_alloc_coherent(NULL, txring_size,
- &data->txdma, GFP_KERNEL);
+ data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
+ GFP_KERNEL | __GFP_ZERO);
if (!data->txring) {
- printk(KERN_DEBUG
- "TSI108_ETH: failed to allocate memory for txring!\n");
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM;
- } else {
- memset(data->txring, 0, txring_size);
}
for (i = 0; i < TSI108_RXRING_LEN; i++) {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 545043cc4c0..a518dcab396 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -754,7 +754,7 @@ static int w5100_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int w5100_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -787,7 +787,7 @@ static int w5100_resume(struct device *dev)
}
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 7cbd0e6fc6f..6e00e3f94ce 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -666,7 +666,7 @@ static int w5300_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int w5300_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -699,7 +699,7 @@ static int w5300_resume(struct device *dev)
}
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9fc2ada4c3c..4a7c60f4c83 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -245,39 +245,30 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL);
- if (!lp->tx_bd_v) {
- dev_err(&ndev->dev,
- "unable to allocate DMA TX buffer descriptors");
+ &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ if (!lp->tx_bd_v)
goto out;
- }
+
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL);
- if (!lp->rx_bd_v) {
- dev_err(&ndev->dev,
- "unable to allocate DMA RX buffer descriptors");
+ &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ if (!lp->rx_bd_v)
goto out;
- }
- memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
}
- memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
-
- if (skb == 0) {
- dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ if (!skb)
goto out;
- }
+
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -789,9 +780,7 @@ static void ll_temac_recv(struct net_device *ndev)
new_skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
-
- if (new_skb == 0) {
- dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ if (!new_skb) {
spin_unlock_irqrestore(&lp->rx_lock, flags);
return;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 278c9db3b5b..24748e8367a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -204,41 +204,31 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p,
- GFP_KERNEL);
- if (!lp->tx_bd_v) {
- dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
- "descriptors");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lp->tx_bd_v)
goto out;
- }
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p,
- GFP_KERNEL);
- if (!lp->rx_bd_v) {
- dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
- "descriptors");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!lp->rx_bd_v)
goto out;
- }
- memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) *
((i + 1) % TX_BD_NUM);
}
- memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
for (i = 0; i < RX_BD_NUM; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) *
((i + 1) % RX_BD_NUM);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!skb) {
- dev_err(&ndev->dev, "alloc_skb error %d\n", i);
+ if (!skb)
goto out;
- }
lp->rx_bd_v[i].sw_id_offset = (u32) skb;
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -777,10 +767,9 @@ static void axienet_recv(struct net_device *ndev)
packets++;
new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
- if (!new_skb) {
- dev_err(&ndev->dev, "no memory for new sk_buff\n");
+ if (!new_skb)
return;
- }
+
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 98e09d0d3ce..76210abf2e9 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1041,7 +1041,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
/* 1 extra so we can use insw */
skb = netdev_alloc_skb(dev, pktlen + 3);
if (!skb) {
- pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
dev->stats.rx_dropped++;
} else { /* okay get the packet */
skb_reserve(skb, 2);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 502c8ff1d98..4c8ddc944d5 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1070,13 +1070,10 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
(PI_ALIGN_K_DESC_BLK - 1);
bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
&bp->kmalloced_dma,
- GFP_ATOMIC);
- if (top_v == NULL) {
- printk("%s: Could not allocate memory for host buffers "
- "and structures!\n", print_name);
+ GFP_ATOMIC | __GFP_ZERO);
+ if (top_v == NULL)
return DFX_K_FAILURE;
- }
- memset(top_v, 0, alloc_size); /* zero out memory before continuing */
+
top_p = bp->kmalloced_dma; /* get physical address of buffer */
/*
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4cf8f1017aa..b2d863f2ea4 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -866,7 +866,7 @@ static int yam_open(struct net_device *dev)
printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq);
- if (!dev || !yp->bitrate)
+ if (!yp->bitrate)
return -ENXIO;
if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT ||
dev->irq < 2 || dev->irq > 15) {
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index fc1687ea4a4..6e88eab33f5 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -233,8 +233,8 @@ struct at86rf230_local {
#define STATE_SLEEP 0x0F
#define STATE_BUSY_RX_AACK 0x11
#define STATE_BUSY_TX_ARET 0x12
-#define STATE_BUSY_RX_AACK_ON 0x16
-#define STATE_BUSY_TX_ARET_ON 0x19
+#define STATE_RX_AACK_ON 0x16
+#define STATE_TX_ARET_ON 0x19
#define STATE_RX_ON_NOCLK 0x1C
#define STATE_RX_AACK_ON_NOCLK 0x1D
#define STATE_BUSY_RX_AACK_NOCLK 0x1E
@@ -619,6 +619,52 @@ err:
return -EINVAL;
}
+static int
+at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+ dev_vdbg(&lp->spi->dev,
+ "at86rf230_set_hw_addr_filt called for saddr\n");
+ __at86rf230_write(lp, RG_SHORT_ADDR_0, filt->short_addr);
+ __at86rf230_write(lp, RG_SHORT_ADDR_1, filt->short_addr >> 8);
+ }
+
+ if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+ dev_vdbg(&lp->spi->dev,
+ "at86rf230_set_hw_addr_filt called for pan id\n");
+ __at86rf230_write(lp, RG_PAN_ID_0, filt->pan_id);
+ __at86rf230_write(lp, RG_PAN_ID_1, filt->pan_id >> 8);
+ }
+
+ if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+ dev_vdbg(&lp->spi->dev,
+ "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_0, filt->ieee_addr[7]);
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_1, filt->ieee_addr[6]);
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_2, filt->ieee_addr[5]);
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_3, filt->ieee_addr[4]);
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_4, filt->ieee_addr[3]);
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_5, filt->ieee_addr[2]);
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_6, filt->ieee_addr[1]);
+ at86rf230_write_subreg(lp, SR_IEEE_ADDR_7, filt->ieee_addr[0]);
+ }
+
+ if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+ dev_vdbg(&lp->spi->dev,
+ "at86rf230_set_hw_addr_filt called for panc change\n");
+ if (filt->pan_coord)
+ at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
+ else
+ at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 0);
+ }
+
+ return 0;
+}
+
static struct ieee802154_ops at86rf230_ops = {
.owner = THIS_MODULE,
.xmit = at86rf230_xmit,
@@ -626,6 +672,7 @@ static struct ieee802154_ops at86rf230_ops = {
.set_channel = at86rf230_channel,
.start = at86rf230_start,
.stop = at86rf230_stop,
+ .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
};
static void at86rf230_irqwork(struct work_struct *work)
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 3f2c7aaf28c..0ca8f88ac53 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -22,6 +22,7 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <net/wpan-phy.h>
#include <net/mac802154.h>
@@ -91,9 +92,8 @@ struct mrf24j40 {
#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
-/* Maximum speed to run the device at. TODO: Get the real max value from
- * someone at Microchip since it isn't in the datasheet. */
-#define MAX_SPI_SPEED_HZ 1000000
+/* The datasheet indicates the theoretical maximum for SCK to be 10MHz */
+#define MAX_SPI_SPEED_HZ 10000000
#define printdev(X) (&X->spi->dev)
@@ -361,6 +361,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
if (ret == -ERESTARTSYS)
goto err;
if (ret == 0) {
+ dev_warn(printdev(devrec), "Timeout waiting for TX interrupt\n");
ret = -ETIMEDOUT;
goto err;
}
@@ -477,7 +478,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
int i;
for (i = 0; i < 8; i++)
write_short_reg(devrec, REG_EADR0+i,
- filt->ieee_addr[i]);
+ filt->ieee_addr[7-i]);
#ifdef DEBUG
printk(KERN_DEBUG "Set long addr to: ");
@@ -623,6 +624,7 @@ static int mrf24j40_probe(struct spi_device *spi)
int ret = -ENOMEM;
u8 val;
struct mrf24j40 *devrec;
+ struct pinctrl *pinctrl;
printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
@@ -633,6 +635,11 @@ static int mrf24j40_probe(struct spi_device *spi)
if (!devrec->buf)
goto err_buf;
+ pinctrl = devm_pinctrl_get_select_default(&spi->dev);
+ if (IS_ERR(pinctrl))
+ dev_warn(&spi->dev,
+ "pinctrl pins are not configured from the driver");
+
spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
spi->max_speed_hz = MAX_SPI_SPEED_HZ;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 9cea451a608..3adb43ce138 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -352,21 +352,19 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5151e4ced6..7a1f684edcb 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/types.h>
+#include <linux/ioport.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
@@ -882,12 +883,12 @@ static int au1k_irda_probe(struct platform_device *pdev)
goto out;
err = -EBUSY;
- aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
+ aup->ioarea = request_mem_region(r->start, resource_size(r),
pdev->name);
if (!aup->ioarea)
goto out;
- aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
+ aup->iobase = ioremap_nocache(r->start, resource_size(r));
if (!aup->iobase)
goto out2;
@@ -952,18 +953,7 @@ static struct platform_driver au1k_irda_driver = {
.remove = au1k_irda_remove,
};
-static int __init au1k_irda_load(void)
-{
- return platform_driver_register(&au1k_irda_driver);
-}
-
-static void __exit au1k_irda_unload(void)
-{
- return platform_driver_unregister(&au1k_irda_driver);
-}
+module_platform_driver(au1k_irda_driver);
MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
-
-module_init(au1k_irda_load);
-module_exit(au1k_irda_unload);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index fed4a05d55c..a06fca61c9a 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -389,7 +389,8 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
- port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA);
+ port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_handle, GFP_DMA);
port->rx_dma_buf.head = 0;
port->rx_dma_buf.tail = 0;
port->rx_dma_nrows = 0;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 2a4f2f15324..9cf836b57c4 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -431,22 +431,20 @@ static int __init nsc_ircc_open(chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto out2;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto out3;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 858de05bdb7..964b116a0ab 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -700,12 +700,12 @@ static int pxa_irda_start(struct net_device *dev)
err = -ENOMEM;
si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
- &si->dma_rx_buff_phy, GFP_KERNEL );
+ &si->dma_rx_buff_phy, GFP_KERNEL);
if (!si->dma_rx_buff)
goto err_dma_rx_buff;
si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
- &si->dma_tx_buff_phy, GFP_KERNEL );
+ &si->dma_tx_buff_phy, GFP_KERNEL);
if (!si->dma_tx_buff)
goto err_dma_tx_buff;
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 5290952b60c..aa05dad7533 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -563,24 +563,15 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
- if (self->rx_buff.head == NULL) {
- IRDA_ERROR("%s, Can't allocate memory for receive buffer!\n",
- driver_name);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ if (self->rx_buff.head == NULL)
goto err_out2;
- }
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
- if (self->tx_buff.head == NULL) {
- IRDA_ERROR("%s, Can't allocate memory for transmit buffer!\n",
- driver_name);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ if (self->tx_buff.head == NULL)
goto err_out3;
- }
-
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index f9033c6a888..51f2bc37610 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -364,21 +364,19 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
self->tx_buff.head =
dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index f5bb92f1588..bb8857a158a 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -216,22 +216,19 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* Allocate memory if needed */
self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL);
+ &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out1;
}
- memset(self->rx_buff.head, 0, self->rx_buff.truesize);
-
self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL);
+ &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
- memset(self->tx_buff.head, 0, self->tx_buff.truesize);
self->rx_buff.in_frame = FALSE;
self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 73abbc1655d..70af6dc07d4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -46,9 +46,16 @@ struct macvlan_port {
static void macvlan_port_destroy(struct net_device *dev);
-#define macvlan_port_get_rcu(dev) \
- ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data))
-#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data)
+static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
+static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
+{
+ return rtnl_dereference(dev->rx_handler_data);
+}
+
#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
@@ -703,7 +710,7 @@ static int macvlan_port_create(struct net_device *dev)
static void macvlan_port_destroy(struct net_device *dev)
{
- struct macvlan_port *port = macvlan_port_get(dev);
+ struct macvlan_port *port = macvlan_port_get_rtnl(dev);
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
@@ -772,7 +779,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
}
- port = macvlan_port_get(lowerdev);
+ port = macvlan_port_get_rtnl(lowerdev);
/* Only 1 macvlan device can be created in passthru mode */
if (port->passthru)
@@ -921,7 +928,7 @@ static int macvlan_device_event(struct notifier_block *unused,
if (!macvlan_port_exists(dev))
return NOTIFY_DONE;
- port = macvlan_port_get(dev);
+ port = macvlan_port_get_rtnl(dev);
switch (event) {
case NETDEV_CHANGE:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a449439bd65..59e9605de31 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -725,6 +725,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
goto err_kfree;
}
+ skb_probe_transport_header(skb, ETH_HLEN);
+
rcu_read_lock_bh();
vlan = rcu_dereference_bh(q->vlan);
/* copy skb_ubuf_info for callback when skb has no error */
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ec40ba882f6..ff2e45e9cb5 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -159,7 +159,7 @@ static int lxt973a2_update_link(struct phy_device *phydev)
return 0;
}
-int lxt973a2_read_status(struct phy_device *phydev)
+static int lxt973a2_read_status(struct phy_device *phydev)
{
int adv;
int err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 22dec9c7ef0..202fe1ff198 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -7,6 +7,8 @@
*
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*
+ * Copyright (c) 2013 Michael Stapelberg <michael@stapelberg.de>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -80,6 +82,28 @@
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
+/* Copper Specific Interrupt Enable Register */
+#define MII_88E1318S_PHY_CSIER 0x12
+/* WOL Event Interrupt Enable */
+#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7)
+
+/* LED Timer Control Register */
+#define MII_88E1318S_PHY_LED_PAGE 0x03
+#define MII_88E1318S_PHY_LED_TCR 0x12
+#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
+#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
+#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11)
+
+/* Magic Packet MAC address registers */
+#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17
+#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18
+#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19
+
+#define MII_88E1318S_PHY_WOL_PAGE 0x11
+#define MII_88E1318S_PHY_WOL_CTRL 0x10
+#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
+#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
+
#define MII_88E1121_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
@@ -696,6 +720,107 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
return 0;
}
+static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if (phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_WOL_PAGE) < 0)
+ return;
+
+ if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
+ MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00) < 0)
+ return;
+}
+
+static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ int err, oldpage, temp;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ /* Explicitly switch to page 0x00, just to be sure */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00);
+ if (err < 0)
+ return err;
+
+ /* Enable the WOL interrupt */
+ temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
+ temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
+ err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_LED_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Setup LED[2] as interrupt pin (active low) */
+ temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
+ temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
+ temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
+ temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
+ err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_WOL_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Store the device address for the magic packet */
+ err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
+ ((phydev->attached_dev->dev_addr[5] << 8) |
+ phydev->attached_dev->dev_addr[4]));
+ if (err < 0)
+ return err;
+ err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
+ ((phydev->attached_dev->dev_addr[3] << 8) |
+ phydev->attached_dev->dev_addr[2]));
+ if (err < 0)
+ return err;
+ err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
+ ((phydev->attached_dev->dev_addr[1] << 8) |
+ phydev->attached_dev->dev_addr[0]));
+ if (err < 0)
+ return err;
+
+ /* Clear WOL status and enable magic packet matching */
+ temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+ temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
+ temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
+ err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ if (err < 0)
+ return err;
+ } else {
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
+ MII_88E1318S_PHY_WOL_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Clear WOL status and disable magic packet matching */
+ temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
+ temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
+ temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
+ err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
+ if (err < 0)
+ return err;
+ }
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -772,6 +897,8 @@ static struct phy_driver marvell_drivers[] = {
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .get_wol = &m88e1318_get_wol,
+ .set_wol = &m88e1318_set_wol,
.driver = { .owner = THIS_MODULE },
},
{
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 27274986ab5..a47f9236d96 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -235,17 +235,7 @@ static struct platform_driver mdio_gpio_driver = {
},
};
-static int __init mdio_gpio_init(void)
-{
- return platform_driver_register(&mdio_gpio_driver);
-}
-module_init(mdio_gpio_init);
-
-static void __exit mdio_gpio_exit(void)
-{
- platform_driver_unregister(&mdio_gpio_driver);
-}
-module_exit(mdio_gpio_exit);
+module_platform_driver(mdio_gpio_driver);
MODULE_ALIAS("platform:mdio-gpio");
MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas");
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 09297fe05ae..c2c878d496a 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -197,18 +197,7 @@ void octeon_mdiobus_force_mod_depencency(void)
}
EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
-static int __init octeon_mdiobus_mod_init(void)
-{
- return platform_driver_register(&octeon_mdiobus_driver);
-}
-
-static void __exit octeon_mdiobus_mod_exit(void)
-{
- platform_driver_unregister(&octeon_mdiobus_driver);
-}
-
-module_init(octeon_mdiobus_mod_init);
-module_exit(octeon_mdiobus_mod_exit);
+module_platform_driver(octeon_mdiobus_driver);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index abf7b6153d0..2510435f34e 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -53,6 +53,18 @@
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
+static int ksz_config_flags(struct phy_device *phydev)
+{
+ int regval;
+
+ if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+ regval = phy_read(phydev, MII_KSZPHY_CTRL);
+ regval |= KSZ8051_RMII_50MHZ_CLK;
+ return phy_write(phydev, MII_KSZPHY_CTRL, regval);
+ }
+ return 0;
+}
+
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
/* bit[7..0] int status, which is a read and clear register. */
@@ -114,22 +126,19 @@ static int kszphy_config_init(struct phy_device *phydev)
static int ksz8021_config_init(struct phy_device *phydev)
{
+ int rc;
const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
phy_write(phydev, MII_KSZPHY_OMSO, val);
- return 0;
+ rc = ksz_config_flags(phydev);
+ return rc < 0 ? rc : 0;
}
static int ks8051_config_init(struct phy_device *phydev)
{
- int regval;
-
- if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
- regval = phy_read(phydev, MII_KSZPHY_CTRL);
- regval |= KSZ8051_RMII_50MHZ_CLK;
- phy_write(phydev, MII_KSZPHY_CTRL, regval);
- }
+ int rc;
- return 0;
+ rc = ksz_config_flags(phydev);
+ return rc < 0 ? rc : 0;
}
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -192,6 +201,19 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_KSZ8031,
+ .phy_id_mask = 0x00ffffff,
+ .name = "Micrel KSZ8031",
+ .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = ksz8021_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KSZ8041",
@@ -325,6 +347,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KSZ8021, 0x00ffffff },
+ { PHY_ID_KSZ8031, 0x00ffffff },
{ PHY_ID_KSZ8041, 0x00fffff0 },
{ PHY_ID_KSZ8051, 0x00fffff0 },
{ PHY_ID_KSZ8061, 0x00fffff0 },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef9ea924822..c14f14741b3 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -463,33 +463,6 @@ void phy_stop_machine(struct phy_device *phydev)
}
/**
- * phy_force_reduction - reduce PHY speed/duplex settings by one step
- * @phydev: target phy_device struct
- *
- * Description: Reduces the speed/duplex settings by one notch,
- * in this order--
- * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
- * The function bottoms out at 10/HALF.
- */
-static void phy_force_reduction(struct phy_device *phydev)
-{
- int idx;
-
- idx = phy_find_setting(phydev->speed, phydev->duplex);
-
- idx++;
-
- idx = phy_find_valid(idx, phydev->supported);
-
- phydev->speed = settings[idx].speed;
- phydev->duplex = settings[idx].duplex;
-
- pr_info("Trying %d/%s\n",
- phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
-}
-
-
-/**
* phy_error - enter HALTED state for this PHY device
* @phydev: target phy_device struct
*
@@ -818,30 +791,11 @@ void phy_state_machine(struct work_struct *work)
phydev->adjust_link(phydev->attached_dev);
} else if (0 == phydev->link_timeout--) {
- int idx;
-
needs_aneg = 1;
/* If we have the magic_aneg bit,
* we try again */
if (phydev->drv->flags & PHY_HAS_MAGICANEG)
break;
-
- /* The timer expired, and we still
- * don't have a setting, so we try
- * forcing it until we find one that
- * works, starting from the fastest speed,
- * and working our way down */
- idx = phy_find_valid(0, phydev->supported);
-
- phydev->speed = settings[idx].speed;
- phydev->duplex = settings[idx].duplex;
-
- phydev->autoneg = AUTONEG_DISABLE;
-
- pr_info("Trying %d/%s\n",
- phydev->speed,
- DUPLEX_FULL == phydev->duplex ?
- "FULL" : "HALF");
}
break;
case PHY_NOLINK:
@@ -866,10 +820,8 @@ void phy_state_machine(struct work_struct *work)
phydev->state = PHY_RUNNING;
netif_carrier_on(phydev->attached_dev);
} else {
- if (0 == phydev->link_timeout--) {
- phy_force_reduction(phydev);
+ if (0 == phydev->link_timeout--)
needs_aneg = 1;
- }
}
phydev->adjust_link(phydev->attached_dev);
@@ -1188,3 +1140,19 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
return 0;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
+
+int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ if (phydev->drv->set_wol)
+ return phydev->drv->set_wol(phydev, wol);
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(phy_ethtool_set_wol);
+
+void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
+{
+ if (phydev->drv->get_wol)
+ phydev->drv->get_wol(phydev, wol);
+}
+EXPORT_SYMBOL(phy_ethtool_get_wol);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2585c383e62..3492b539127 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -61,7 +61,7 @@ MODULE_DESCRIPTION("Vitesse PHY driver");
MODULE_AUTHOR("Kriston Carson");
MODULE_LICENSE("GPL");
-int vsc824x_add_skew(struct phy_device *phydev)
+static int vsc824x_add_skew(struct phy_device *phydev)
{
int err;
int extcon;
@@ -81,7 +81,6 @@ int vsc824x_add_skew(struct phy_device *phydev)
return err;
}
-EXPORT_SYMBOL(vsc824x_add_skew);
static int vsc824x_config_init(struct phy_device *phydev)
{
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index bed62d9c53c..1f7bef90b46 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -560,7 +560,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
* so don't forget to remove it.
*/
- if (ntohs(eth->h_proto) >= 1536)
+ if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
return eth->h_proto;
rawp = skb->data;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 1a12033d2ef..090c834d7db 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -105,64 +105,15 @@ static const struct ppp_channel_ops sync_ops = {
};
/*
- * Utility procedures to print a buffer in hex/ascii
+ * Utility procedure to print a buffer in hex/ascii
*/
static void
-ppp_print_hex (register __u8 * out, const __u8 * in, int count)
-{
- register __u8 next_ch;
- static const char hex[] = "0123456789ABCDEF";
-
- while (count-- > 0) {
- next_ch = *in++;
- *out++ = hex[(next_ch >> 4) & 0x0F];
- *out++ = hex[next_ch & 0x0F];
- ++out;
- }
-}
-
-static void
-ppp_print_char (register __u8 * out, const __u8 * in, int count)
-{
- register __u8 next_ch;
-
- while (count-- > 0) {
- next_ch = *in++;
-
- if (next_ch < 0x20 || next_ch > 0x7e)
- *out++ = '.';
- else {
- *out++ = next_ch;
- if (next_ch == '%') /* printk/syslogd has a bug !! */
- *out++ = '%';
- }
- }
- *out = '\0';
-}
-
-static void
ppp_print_buffer (const char *name, const __u8 *buf, int count)
{
- __u8 line[44];
-
if (name != NULL)
printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
- while (count > 8) {
- memset (line, 32, 44);
- ppp_print_hex (line, buf, 8);
- ppp_print_char (&line[8 * 3], buf, 8);
- printk(KERN_DEBUG "%s\n", line);
- count -= 8;
- buf += 8;
- }
-
- if (count > 0) {
- memset (line, 32, 44);
- ppp_print_hex (line, buf, count);
- ppp_print_char (&line[8 * 3], buf, count);
- printk(KERN_DEBUG "%s\n", line);
- }
+ print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
}
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index c3011af68e9..c853d84fd99 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -37,6 +37,18 @@ config NET_TEAM_MODE_ROUNDROBIN
To compile this team mode as a module, choose M here: the module
will be called team_mode_roundrobin.
+config NET_TEAM_MODE_RANDOM
+ tristate "Random mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where port used for transmitting packets is selected
+ randomly.
+
+ All added ports are setup to have team's device address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_random.
+
config NET_TEAM_MODE_ACTIVEBACKUP
tristate "Active-backup mode support"
depends on NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 975763014e5..c57e8588975 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,5 +5,6 @@
obj-$(CONFIG_NET_TEAM) += team.o
obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_RANDOM) += team_mode_random.o
obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bf341929787..621c1bddeee 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -73,11 +73,24 @@ static int team_port_set_orig_dev_addr(struct team_port *port)
return __set_port_dev_addr(port->dev, port->orig.dev_addr);
}
-int team_port_set_team_dev_addr(struct team_port *port)
+static int team_port_set_team_dev_addr(struct team *team,
+ struct team_port *port)
+{
+ return __set_port_dev_addr(port->dev, team->dev->dev_addr);
+}
+
+int team_modeop_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_dev_addr(team, port);
+}
+EXPORT_SYMBOL(team_modeop_port_enter);
+
+void team_modeop_port_change_dev_addr(struct team *team,
+ struct team_port *port)
{
- return __set_port_dev_addr(port->dev, port->team->dev->dev_addr);
+ team_port_set_team_dev_addr(team, port);
}
-EXPORT_SYMBOL(team_port_set_team_dev_addr);
+EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
static void team_refresh_port_linkup(struct team_port *port)
{
@@ -490,9 +503,9 @@ static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
return false;
}
-rx_handler_result_t team_dummy_receive(struct team *team,
- struct team_port *port,
- struct sk_buff *skb)
+static rx_handler_result_t team_dummy_receive(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb)
{
return RX_HANDLER_ANOTHER;
}
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c5db428e73f..c366cd299c0 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -46,20 +46,10 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
return sum_ret;
}
-static int bc_port_enter(struct team *team, struct team_port *port)
-{
- return team_port_set_team_dev_addr(port);
-}
-
-static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
-{
- team_port_set_team_dev_addr(port);
-}
-
static const struct team_mode_ops bc_mode_ops = {
.transmit = bc_transmit,
- .port_enter = bc_port_enter,
- .port_change_dev_addr = bc_port_change_dev_addr,
+ .port_enter = team_modeop_port_enter,
+ .port_change_dev_addr = team_modeop_port_change_dev_addr,
};
static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
new file mode 100644
index 00000000000..9eabfaa22f3
--- /dev/null
+++ b/drivers/net/team/team_mode_random.c
@@ -0,0 +1,71 @@
+/*
+ * drivers/net/team/team_mode_random.c - Random mode for team
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/if_team.h>
+
+static u32 random_N(unsigned int N)
+{
+ return reciprocal_divide(random32(), N);
+}
+
+static bool rnd_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *port;
+ int port_index;
+
+ port_index = random_N(team->en_port_count);
+ port = team_get_port_by_index_rcu(team, port_index);
+ port = team_get_first_port_txable_rcu(team, port);
+ if (unlikely(!port))
+ goto drop;
+ if (team_dev_queue_xmit(team, port, skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static const struct team_mode_ops rnd_mode_ops = {
+ .transmit = rnd_transmit,
+ .port_enter = team_modeop_port_enter,
+ .port_change_dev_addr = team_modeop_port_change_dev_addr,
+};
+
+static const struct team_mode rnd_mode = {
+ .kind = "random",
+ .owner = THIS_MODULE,
+ .ops = &rnd_mode_ops,
+};
+
+static int __init rnd_init_module(void)
+{
+ return team_mode_register(&rnd_mode);
+}
+
+static void __exit rnd_cleanup_module(void)
+{
+ team_mode_unregister(&rnd_mode);
+}
+
+module_init(rnd_init_module);
+module_exit(rnd_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
+MODULE_DESCRIPTION("Random mode for team");
+MODULE_ALIAS("team-mode-random");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 105135aa8f0..d268e4de781 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -25,26 +25,6 @@ static struct rr_priv *rr_priv(struct team *team)
return (struct rr_priv *) &team->mode_priv;
}
-static struct team_port *__get_first_port_up(struct team *team,
- struct team_port *port)
-{
- struct team_port *cur;
-
- if (team_port_txable(port))
- return port;
- cur = port;
- list_for_each_entry_continue_rcu(cur, &team->port_list, list)
- if (team_port_txable(port))
- return cur;
- list_for_each_entry_rcu(cur, &team->port_list, list) {
- if (cur == port)
- break;
- if (team_port_txable(port))
- return cur;
- }
- return NULL;
-}
-
static bool rr_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *port;
@@ -52,7 +32,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
port = team_get_port_by_index_rcu(team, port_index);
- port = __get_first_port_up(team, port);
+ port = team_get_first_port_txable_rcu(team, port);
if (unlikely(!port))
goto drop;
if (team_dev_queue_xmit(team, port, skb))
@@ -64,20 +44,10 @@ drop:
return false;
}
-static int rr_port_enter(struct team *team, struct team_port *port)
-{
- return team_port_set_team_dev_addr(port);
-}
-
-static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
-{
- team_port_set_team_dev_addr(port);
-}
-
static const struct team_mode_ops rr_mode_ops = {
.transmit = rr_transmit,
- .port_enter = rr_port_enter,
- .port_change_dev_addr = rr_port_change_dev_addr,
+ .port_enter = team_modeop_port_enter,
+ .port_change_dev_addr = team_modeop_port_change_dev_addr,
};
static const struct team_mode rr_mode = {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b7c457adc0d..29538e6e914 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -409,14 +409,12 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
struct tun_struct *tun;
- struct net_device *dev;
tun = rtnl_dereference(tfile->tun);
if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
- dev = tun->dev;
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
@@ -1205,6 +1203,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
skb_reset_network_header(skb);
+ skb_probe_transport_header(skb, 0);
+
rxhash = skb_get_rxhash(skb);
netif_rx_ni(skb);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4709fa3497c..44a989cd9fb 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -362,8 +362,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
u8 iface_no;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (ctx == NULL)
- return -ENODEV;
+ if (!ctx)
+ return -ENOMEM;
hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 57ac4b0294b..f7d67e8eb1a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -154,7 +154,7 @@ struct padded_vnet_hdr {
*/
static int vq2txq(struct virtqueue *vq)
{
- return (virtqueue_get_queue_index(vq) - 1) / 2;
+ return (vq->index - 1) / 2;
}
static int txq2vq(int txq)
@@ -164,7 +164,7 @@ static int txq2vq(int txq)
static int vq2rxq(struct virtqueue *vq)
{
- return virtqueue_get_queue_index(vq) / 2;
+ return vq->index / 2;
}
static int rxq2vq(int rxq)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7cee7a3068e..62a4438c608 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -33,7 +33,7 @@
#include <net/arp.h>
#include <net/ndisc.h>
#include <net/ip.h>
-#include <net/ipip.h>
+#include <net/ip_tunnels.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/rtnetlink.h>
@@ -81,31 +81,30 @@ struct vxlan_net {
struct hlist_head vni_list[VNI_HASH_SIZE];
};
+struct vxlan_rdst {
+ struct rcu_head rcu;
+ __be32 remote_ip;
+ __be16 remote_port;
+ u32 remote_vni;
+ u32 remote_ifindex;
+ struct vxlan_rdst *remote_next;
+};
+
/* Forwarding table entry */
struct vxlan_fdb {
struct hlist_node hlist; /* linked list of entries */
struct rcu_head rcu;
unsigned long updated; /* jiffies */
unsigned long used;
- __be32 remote_ip;
+ struct vxlan_rdst remote;
u16 state; /* see ndm_state */
u8 eth_addr[ETH_ALEN];
};
-/* Per-cpu network traffic stats */
-struct vxlan_stats {
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- struct u64_stats_sync syncp;
-};
-
/* Pseudo network device */
struct vxlan_dev {
struct hlist_node hlist;
struct net_device *dev;
- struct vxlan_stats __percpu *stats;
__u32 vni; /* virtual network id */
__be32 gaddr; /* multicast group */
__be32 saddr; /* source address */
@@ -157,7 +156,8 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
/* Fill in neighbour message in skbuff. */
static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
- u32 portid, u32 seq, int type, unsigned int flags)
+ u32 portid, u32 seq, int type, unsigned int flags,
+ const struct vxlan_rdst *rdst)
{
unsigned long now = jiffies;
struct nda_cacheinfo ci;
@@ -176,7 +176,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (type == RTM_GETNEIGH) {
ndm->ndm_family = AF_INET;
- send_ip = fdb->remote_ip != 0;
+ send_ip = rdst->remote_ip != htonl(INADDR_ANY);
send_eth = !is_zero_ether_addr(fdb->eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
@@ -188,7 +188,17 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
- if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
+ if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
+ goto nla_put_failure;
+
+ if (rdst->remote_port && rdst->remote_port != vxlan_port &&
+ nla_put_be16(skb, NDA_PORT, rdst->remote_port))
+ goto nla_put_failure;
+ if (rdst->remote_vni != vxlan->vni &&
+ nla_put_be32(skb, NDA_VNI, rdst->remote_vni))
+ goto nla_put_failure;
+ if (rdst->remote_ifindex &&
+ nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
goto nla_put_failure;
ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
@@ -211,6 +221,9 @@ static inline size_t vxlan_nlmsg_size(void)
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(__be32)) /* NDA_DST */
+ + nla_total_size(sizeof(__be32)) /* NDA_PORT */
+ + nla_total_size(sizeof(__be32)) /* NDA_VNI */
+ + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
+ nla_total_size(sizeof(struct nda_cacheinfo));
}
@@ -225,7 +238,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
if (skb == NULL)
goto errout;
- err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
+ err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, &fdb->remote);
if (err < 0) {
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -247,7 +260,8 @@ static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
memset(&f, 0, sizeof f);
f.state = NUD_STALE;
- f.remote_ip = ipa; /* goes to NDA_DST */
+ f.remote.remote_ip = ipa; /* goes to NDA_DST */
+ f.remote.remote_vni = VXLAN_N_VID;
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
}
@@ -300,10 +314,38 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
return NULL;
}
+/* Add/update destinations for multicast */
+static int vxlan_fdb_append(struct vxlan_fdb *f,
+ __be32 ip, __u32 port, __u32 vni, __u32 ifindex)
+{
+ struct vxlan_rdst *rd_prev, *rd;
+
+ rd_prev = NULL;
+ for (rd = &f->remote; rd; rd = rd->remote_next) {
+ if (rd->remote_ip == ip &&
+ rd->remote_port == port &&
+ rd->remote_vni == vni &&
+ rd->remote_ifindex == ifindex)
+ return 0;
+ rd_prev = rd;
+ }
+ rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
+ if (rd == NULL)
+ return -ENOBUFS;
+ rd->remote_ip = ip;
+ rd->remote_port = port;
+ rd->remote_vni = vni;
+ rd->remote_ifindex = ifindex;
+ rd->remote_next = NULL;
+ rd_prev->remote_next = rd;
+ return 1;
+}
+
/* Add new entry to forwarding table -- assumes lock held */
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, __be32 ip,
- __u16 state, __u16 flags)
+ __u16 state, __u16 flags,
+ __u32 port, __u32 vni, __u32 ifindex)
{
struct vxlan_fdb *f;
int notify = 0;
@@ -320,6 +362,14 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
f->updated = jiffies;
notify = 1;
}
+ if ((flags & NLM_F_APPEND) &&
+ is_multicast_ether_addr(f->eth_addr)) {
+ int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
+
+ if (rc < 0)
+ return rc;
+ notify |= rc;
+ }
} else {
if (!(flags & NLM_F_CREATE))
return -ENOENT;
@@ -333,7 +383,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return -ENOMEM;
notify = 1;
- f->remote_ip = ip;
+ f->remote.remote_ip = ip;
+ f->remote.remote_port = port;
+ f->remote.remote_vni = vni;
+ f->remote.remote_ifindex = ifindex;
+ f->remote.remote_next = NULL;
f->state = state;
f->updated = f->used = jiffies;
memcpy(f->eth_addr, mac, ETH_ALEN);
@@ -349,6 +403,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return 0;
}
+void vxlan_fdb_free(struct rcu_head *head)
+{
+ struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
+
+ while (f->remote.remote_next) {
+ struct vxlan_rdst *rd = f->remote.remote_next;
+
+ f->remote.remote_next = rd->remote_next;
+ kfree(rd);
+ }
+ kfree(f);
+}
+
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
{
netdev_dbg(vxlan->dev,
@@ -358,7 +425,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
- kfree_rcu(f, rcu);
+ call_rcu(&f->rcu, vxlan_fdb_free);
}
/* Add static entry (via netlink) */
@@ -367,7 +434,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
const unsigned char *addr, u16 flags)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct net *net = dev_net(vxlan->dev);
__be32 ip;
+ u32 port, vni, ifindex;
int err;
if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -384,8 +453,36 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
ip = nla_get_be32(tb[NDA_DST]);
+ if (tb[NDA_PORT]) {
+ if (nla_len(tb[NDA_PORT]) != sizeof(u32))
+ return -EINVAL;
+ port = nla_get_u32(tb[NDA_PORT]);
+ } else
+ port = vxlan_port;
+
+ if (tb[NDA_VNI]) {
+ if (nla_len(tb[NDA_VNI]) != sizeof(u32))
+ return -EINVAL;
+ vni = nla_get_u32(tb[NDA_VNI]);
+ } else
+ vni = vxlan->vni;
+
+ if (tb[NDA_IFINDEX]) {
+ struct net_device *tdev;
+
+ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+ return -EINVAL;
+ ifindex = nla_get_u32(tb[NDA_IFINDEX]);
+ tdev = dev_get_by_index(net, ifindex);
+ if (!tdev)
+ return -EADDRNOTAVAIL;
+ dev_put(tdev);
+ } else
+ ifindex = 0;
+
spin_lock_bh(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
+ err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags, port,
+ vni, ifindex);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -423,18 +520,21 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
int err;
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
- if (idx < cb->args[0])
- goto skip;
-
- err = vxlan_fdb_info(skb, vxlan, f,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWNEIGH,
- NLM_F_MULTI);
- if (err < 0)
- break;
+ struct vxlan_rdst *rd;
+ for (rd = &f->remote; rd; rd = rd->remote_next) {
+ if (idx < cb->args[0])
+ goto skip;
+
+ err = vxlan_fdb_info(skb, vxlan, f,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI, rd);
+ if (err < 0)
+ break;
skip:
- ++idx;
+ ++idx;
+ }
}
}
@@ -454,22 +554,23 @@ static void vxlan_snoop(struct net_device *dev,
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
f->used = jiffies;
- if (likely(f->remote_ip == src_ip))
+ if (likely(f->remote.remote_ip == src_ip))
return;
if (net_ratelimit())
netdev_info(dev,
"%pM migrated from %pI4 to %pI4\n",
- src_mac, &f->remote_ip, &src_ip);
+ src_mac, &f->remote.remote_ip, &src_ip);
- f->remote_ip = src_ip;
+ f->remote.remote_ip = src_ip;
f->updated = jiffies;
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
err = vxlan_fdb_create(vxlan, src_mac, src_ip,
NUD_REACHABLE,
- NLM_F_EXCL|NLM_F_CREATE);
+ NLM_F_EXCL|NLM_F_CREATE,
+ vxlan_port, vxlan->vni, 0);
spin_unlock(&vxlan->hash_lock);
}
}
@@ -556,7 +657,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
struct iphdr *oip;
struct vxlanhdr *vxh;
struct vxlan_dev *vxlan;
- struct vxlan_stats *stats;
+ struct pcpu_tstats *stats;
__u32 vni;
int err;
@@ -632,7 +733,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
}
}
- stats = this_cpu_ptr(vxlan->stats);
+ stats = this_cpu_ptr(vxlan->dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += skb->len;
@@ -691,7 +792,6 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
n = neigh_lookup(&arp_tbl, &tip, dev);
if (n) {
- struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
struct sk_buff *reply;
@@ -701,7 +801,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
}
f = vxlan_find_mac(vxlan, n->ha);
- if (f && f->remote_ip == 0) {
+ if (f && f->remote.remote_ip == htonl(INADDR_ANY)) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
@@ -763,28 +863,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
return false;
}
-/* Extract dsfield from inner protocol */
-static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
- const struct sk_buff *skb)
-{
- if (skb->protocol == htons(ETH_P_IP))
- return iph->tos;
- else if (skb->protocol == htons(ETH_P_IPV6))
- return ipv6_get_dsfield((const struct ipv6hdr *)iph);
- else
- return 0;
-}
-
-/* Propogate ECN bits out */
-static inline u8 vxlan_ecn_encap(u8 tos,
- const struct iphdr *iph,
- const struct sk_buff *skb)
-{
- u8 inner = vxlan_get_dsfield(iph, skb);
-
- return INET_ECN_encapsulate(tos, inner);
-}
-
static void vxlan_sock_free(struct sk_buff *skb)
{
sock_put(skb->sk);
@@ -820,48 +898,40 @@ static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
return (((u64) hash * range) >> 32) + vxlan->port_min;
}
-/* Transmit local packets over Vxlan
- *
- * Outer IP header inherits ECN and DF from inner header.
- * Outer UDP destination is the VXLAN assigned port.
- * source port is based on hash of flow
- */
-static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+static int handle_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ int err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+
+ skb_shinfo(skb)->gso_type |= (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP);
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 0;
+}
+
+static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
+ struct vxlan_rdst *rdst, bool did_rsc)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct rtable *rt;
const struct iphdr *old_iph;
- struct ethhdr *eth;
struct iphdr *iph;
struct vxlanhdr *vxh;
struct udphdr *uh;
struct flowi4 fl4;
unsigned int pkt_len = skb->len;
__be32 dst;
- __u16 src_port;
+ __u16 src_port, dst_port;
+ u32 vni;
__be16 df = 0;
__u8 tos, ttl;
- int err;
- bool did_rsc = false;
- const struct vxlan_fdb *f;
-
- skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
- if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
- return arp_reduce(dev, skb);
- else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
- did_rsc = route_shortcircuit(dev, skb);
-
- f = vxlan_find_mac(vxlan, eth->h_dest);
- if (f == NULL) {
- did_rsc = false;
- dst = vxlan->gaddr;
- if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
- !is_multicast_ether_addr(eth->h_dest))
- vxlan_fdb_miss(vxlan, eth->h_dest);
- } else
- dst = f->remote_ip;
+ dst_port = rdst->remote_port ? rdst->remote_port : vxlan_port;
+ vni = rdst->remote_vni;
+ dst = rdst->remote_ip;
if (!dst) {
if (did_rsc) {
@@ -871,8 +941,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
/* short-circuited back to local bridge */
if (netif_rx(skb) == NET_RX_SUCCESS) {
- struct vxlan_stats *stats =
- this_cpu_ptr(vxlan->stats);
+ struct pcpu_tstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
@@ -904,12 +973,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
tos = vxlan->tos;
if (tos == 1)
- tos = vxlan_get_dsfield(old_iph, skb);
+ tos = ip_tunnel_get_dsfield(old_iph, skb);
src_port = vxlan_src_port(vxlan, skb);
memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = vxlan->link;
+ fl4.flowi4_oif = rdst->remote_ifindex;
fl4.flowi4_tos = RT_TOS(tos);
fl4.daddr = dst;
fl4.saddr = vxlan->saddr;
@@ -936,13 +1005,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = htonl(vxlan->vni << 8);
+ vxh->vx_vni = htonl(vni << 8);
__skb_push(skb, sizeof(*uh));
skb_reset_transport_header(skb);
uh = udp_hdr(skb);
- uh->dest = htons(vxlan_port);
+ uh->dest = htons(dst_port);
uh->source = htons(src_port);
uh->len = htons(skb->len);
@@ -955,7 +1024,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
iph->ihl = sizeof(struct iphdr) >> 2;
iph->frag_off = df;
iph->protocol = IPPROTO_UDP;
- iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
+ iph->tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -965,22 +1034,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
vxlan_set_owner(dev, skb);
- /* See iptunnel_xmit() */
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- skb->ip_summed = CHECKSUM_NONE;
-
- err = ip_local_out(skb);
- if (likely(net_xmit_eval(err) == 0)) {
- struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
+ if (handle_offloads(skb))
+ goto drop;
- u64_stats_update_begin(&stats->syncp);
- stats->tx_packets++;
- stats->tx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
- } else {
- dev->stats.tx_errors++;
- dev->stats.tx_aborted_errors++;
- }
+ iptunnel_xmit(skb, dev);
return NETDEV_TX_OK;
drop:
@@ -994,6 +1051,64 @@ tx_free:
return NETDEV_TX_OK;
}
+/* Transmit local packets over Vxlan
+ *
+ * Outer IP header inherits ECN and DF from inner header.
+ * Outer UDP destination is the VXLAN assigned port.
+ * source port is based on hash of flow
+ */
+static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct ethhdr *eth;
+ bool did_rsc = false;
+ struct vxlan_rdst group, *rdst0, *rdst;
+ struct vxlan_fdb *f;
+ int rc1, rc;
+
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+
+ if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
+ return arp_reduce(dev, skb);
+ else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
+ did_rsc = route_shortcircuit(dev, skb);
+
+ f = vxlan_find_mac(vxlan, eth->h_dest);
+ if (f == NULL) {
+ did_rsc = false;
+ group.remote_port = vxlan_port;
+ group.remote_vni = vxlan->vni;
+ group.remote_ip = vxlan->gaddr;
+ group.remote_ifindex = vxlan->link;
+ group.remote_next = 0;
+ rdst0 = &group;
+
+ if (group.remote_ip == htonl(INADDR_ANY) &&
+ (vxlan->flags & VXLAN_F_L2MISS) &&
+ !is_multicast_ether_addr(eth->h_dest))
+ vxlan_fdb_miss(vxlan, eth->h_dest);
+ } else
+ rdst0 = &f->remote;
+
+ rc = NETDEV_TX_OK;
+
+ /* if there are multiple destinations, send copies */
+ for (rdst = rdst0->remote_next; rdst; rdst = rdst->remote_next) {
+ struct sk_buff *skb1;
+
+ skb1 = skb_clone(skb, GFP_ATOMIC);
+ rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+ if (rc == NETDEV_TX_OK)
+ rc = rc1;
+ }
+
+ rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
+ if (rc == NETDEV_TX_OK)
+ rc = rc1;
+ return rc;
+}
+
/* Walk the forwarding table and purge stale entries */
static void vxlan_cleanup(unsigned long arg)
{
@@ -1034,10 +1149,8 @@ static void vxlan_cleanup(unsigned long arg)
/* Setup stats when device is created */
static int vxlan_init(struct net_device *dev)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
-
- vxlan->stats = alloc_percpu(struct vxlan_stats);
- if (!vxlan->stats)
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
return -ENOMEM;
return 0;
@@ -1093,49 +1206,6 @@ static int vxlan_stop(struct net_device *dev)
return 0;
}
-/* Merge per-cpu statistics */
-static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_stats tmp, sum = { 0 };
- unsigned int cpu;
-
- for_each_possible_cpu(cpu) {
- unsigned int start;
- const struct vxlan_stats *stats
- = per_cpu_ptr(vxlan->stats, cpu);
-
- do {
- start = u64_stats_fetch_begin_bh(&stats->syncp);
- memcpy(&tmp, stats, sizeof(tmp));
- } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
-
- sum.tx_bytes += tmp.tx_bytes;
- sum.tx_packets += tmp.tx_packets;
- sum.rx_bytes += tmp.rx_bytes;
- sum.rx_packets += tmp.rx_packets;
- }
-
- stats->tx_bytes = sum.tx_bytes;
- stats->tx_packets = sum.tx_packets;
- stats->rx_bytes = sum.rx_bytes;
- stats->rx_packets = sum.rx_packets;
-
- stats->multicast = dev->stats.multicast;
- stats->rx_length_errors = dev->stats.rx_length_errors;
- stats->rx_frame_errors = dev->stats.rx_frame_errors;
- stats->rx_errors = dev->stats.rx_errors;
-
- stats->tx_dropped = dev->stats.tx_dropped;
- stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
- stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
- stats->collisions = dev->stats.collisions;
- stats->tx_errors = dev->stats.tx_errors;
-
- return stats;
-}
-
/* Stub, nothing needs to be done. */
static void vxlan_set_multicast_list(struct net_device *dev)
{
@@ -1146,7 +1216,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
.ndo_open = vxlan_open,
.ndo_stop = vxlan_stop,
.ndo_start_xmit = vxlan_xmit,
- .ndo_get_stats64 = vxlan_stats64,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
.ndo_set_rx_mode = vxlan_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -1163,9 +1233,7 @@ static struct device_type vxlan_type = {
static void vxlan_free(struct net_device *dev)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
-
- free_percpu(vxlan->stats);
+ free_percpu(dev->tstats);
free_netdev(dev);
}
@@ -1189,8 +1257,10 @@ static void vxlan_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -1555,6 +1625,7 @@ static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
unregister_pernet_device(&vxlan_net_ops);
+ rcu_barrier();
}
module_exit(vxlan_cleanup_module);
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 3150def1719..2d691b8b95b 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1523,7 +1523,8 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah);
/* EEPROM access functions */
int ath5k_eeprom_init(struct ath5k_hw *ah);
void ath5k_eeprom_detach(struct ath5k_hw *ah);
-
+int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel);
/* Protocol Control Unit Functions */
/* Helpers */
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index b7e0258887e..94d34ee0226 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1779,7 +1779,8 @@ ath5k_eeprom_detach(struct ath5k_hw *ah)
}
int
-ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
+ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
{
switch (channel->hw_value) {
case AR5K_MODE_11A:
@@ -1789,6 +1790,7 @@ ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
case AR5K_MODE_11B:
return AR5K_EEPROM_MODE_11B;
default:
- return -1;
+ ATH5K_WARN(ah, "channel is not A/B/G!");
+ return AR5K_EEPROM_MODE_11A;
}
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 94a9bbea687..693296ee969 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -493,6 +493,3 @@ struct ath5k_eeprom_info {
/* Antenna raw switch tables */
u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
};
-
-int
-ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index a78afa98c65..d6bc7cb61bf 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1612,11 +1612,7 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
- ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
- if (WARN_ON(ee_mode < 0)) {
- ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
- return;
- }
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, ah->ah_current_channel);
/* completed NF calibration, test threshold */
nf = ath5k_hw_read_measured_noise_floor(ah);
@@ -2317,12 +2313,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
def_ant = ah->ah_def_ant;
- ee_mode = ath5k_eeprom_mode_from_channel(channel);
- if (ee_mode < 0) {
- ATH5K_ERR(ah,
- "invalid channel: %d\n", channel->center_freq);
- return;
- }
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
switch (ant_mode) {
case AR5K_ANTMODE_DEFAULT:
@@ -3622,12 +3613,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return -EINVAL;
}
- ee_mode = ath5k_eeprom_mode_from_channel(channel);
- if (ee_mode < 0) {
- ATH5K_ERR(ah,
- "invalid channel: %d\n", channel->center_freq);
- return -EINVAL;
- }
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
/* Initialize TX power table */
switch (ah->ah_radio) {
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index e2d8b2cf19e..a3399c4f13a 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -984,9 +984,7 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
if (ah->ah_version == AR5K_AR5210)
return;
- ee_mode = ath5k_eeprom_mode_from_channel(channel);
- if (WARN_ON(ee_mode < 0))
- return;
+ ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
/* Adjust power delta for channel 14 */
if (channel->center_freq == 2484)
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index 630c83db056..e39e5860a2e 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -30,6 +30,15 @@ config ATH6KL_DEBUG
---help---
Enables debug support
+config ATH6KL_TRACING
+ bool "Atheros ath6kl tracing support"
+ depends on ATH6KL
+ depends on EVENT_TRACING
+ ---help---
+ Select this to ath6kl use tracing infrastructure.
+
+ If unsure, say Y to make it easier to debug problems.
+
config ATH6KL_REGDOMAIN
bool "Atheros ath6kl regdomain support"
depends on ATH6KL
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index cab0ec0d538..dc2b3b46781 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -35,10 +35,15 @@ ath6kl_core-y += txrx.o
ath6kl_core-y += wmi.o
ath6kl_core-y += core.o
ath6kl_core-y += recovery.o
+
ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
ath6kl_sdio-y += sdio.o
obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
ath6kl_usb-y += usb.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 752ffc4f416..5c9736a94e5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -402,7 +402,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
if (type == NL80211_IFTYPE_STATION ||
type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
for (i = 0; i < ar->vif_max; i++) {
- if ((ar->avail_idx_map >> i) & BIT(0)) {
+ if ((ar->avail_idx_map) & BIT(i)) {
*if_idx = i;
return true;
}
@@ -412,7 +412,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
if (type == NL80211_IFTYPE_P2P_CLIENT ||
type == NL80211_IFTYPE_P2P_GO) {
for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
- if ((ar->avail_idx_map >> i) & BIT(0)) {
+ if ((ar->avail_idx_map) & BIT(i)) {
*if_idx = i;
return true;
}
@@ -1535,7 +1535,9 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
+ rtnl_lock();
ath6kl_cfg80211_vif_cleanup(vif);
+ rtnl_unlock();
return 0;
}
@@ -2990,13 +2992,15 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
{
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_vif *vif = netdev_priv(dev);
+ int err;
if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
- /* Use this only for authorizing/unauthorizing a station */
- if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
- return -EOPNOTSUPP;
+ err = cfg80211_check_station_change(wiphy, params,
+ CFG80211_STA_AP_MLME_CLIENT);
+ if (err)
+ return err;
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
@@ -3659,7 +3663,6 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
vif->sme_state = SME_DISCONNECTED;
set_bit(WLAN_ENABLED, &vif->flags);
ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
- set_bit(NETDEV_REGISTERED, &vif->flags);
if (type == NL80211_IFTYPE_ADHOC)
ar->ibss_if_active = true;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 61b2f98b4e7..26b0f92424e 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -560,7 +560,6 @@ enum ath6kl_vif_state {
WMM_ENABLED,
NETQ_STOPPED,
DTIM_EXPIRED,
- NETDEV_REGISTERED,
CLEAR_BSSFILTER_ON_BEACON,
DTIM_PERIOD_AVAIL,
WLAN_ENABLED,
@@ -936,8 +935,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
u8 win_sz);
void ath6kl_wakeup_event(void *dev);
-void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
- bool wait_fot_compltn, bool cold_reset);
void ath6kl_init_control_info(struct ath6kl_vif *vif);
struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 15cfe30e54f..fe38b836cb2 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -56,6 +56,60 @@ int ath6kl_printk(const char *level, const char *fmt, ...)
}
EXPORT_SYMBOL(ath6kl_printk);
+int ath6kl_info(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ret = ath6kl_printk(KERN_INFO, "%pV", &vaf);
+ trace_ath6kl_log_info(&vaf);
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath6kl_info);
+
+int ath6kl_err(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ret = ath6kl_printk(KERN_ERR, "%pV", &vaf);
+ trace_ath6kl_log_err(&vaf);
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath6kl_err);
+
+int ath6kl_warn(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf);
+ trace_ath6kl_log_warn(&vaf);
+ va_end(args);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath6kl_warn);
+
#ifdef CONFIG_ATH6KL_DEBUG
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
@@ -63,15 +117,15 @@ void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
struct va_format vaf;
va_list args;
- if (!(debug_mask & mask))
- return;
-
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
+ if (debug_mask & mask)
+ ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
+
+ trace_ath6kl_log_dbg(mask, &vaf);
va_end(args);
}
@@ -87,6 +141,10 @@ void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
}
+
+ /* tracing code doesn't like null strings :/ */
+ trace_ath6kl_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
+ buf, len);
}
EXPORT_SYMBOL(ath6kl_dbg_dump);
@@ -1752,8 +1810,10 @@ int ath6kl_debug_init_fs(struct ath6kl *ar)
debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_tgt_stats);
- debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
- &fops_credit_dist_stats);
+ if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO)
+ debugfs_create_file("credit_dist_stats", S_IRUSR,
+ ar->debugfs_phy, ar,
+ &fops_credit_dist_stats);
debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
ar->debugfs_phy, ar, &fops_endpoint_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index f97cd4ead54..74369de00fb 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -19,6 +19,7 @@
#define DEBUG_H
#include "hif.h"
+#include "trace.h"
enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_CREDIT = BIT(0),
@@ -51,13 +52,9 @@ enum ATH6K_DEBUG_MASK {
extern unsigned int debug_mask;
extern __printf(2, 3)
int ath6kl_printk(const char *level, const char *fmt, ...);
-
-#define ath6kl_info(fmt, ...) \
- ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__)
-#define ath6kl_err(fmt, ...) \
- ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
-#define ath6kl_warn(fmt, ...) \
- ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
+extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
+extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
+extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index a6b614421fa..fea7709b5dd 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -22,6 +22,7 @@
#include "target.h"
#include "hif-ops.h"
#include "debug.h"
+#include "trace.h"
#define MAILBOX_FOR_BLOCK_SIZE 1
@@ -436,6 +437,8 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
ath6kl_dump_registers(dev, &dev->irq_proc_reg,
&dev->irq_en_reg);
+ trace_ath6kl_sdio_irq(&dev->irq_en_reg,
+ sizeof(dev->irq_en_reg));
/* Update only those registers that are enabled */
host_int_status = dev->irq_proc_reg.host_int_status &
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index fbb78dfe078..65e5b719093 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -19,6 +19,8 @@
#include "hif.h"
#include "debug.h"
#include "hif-ops.h"
+#include "trace.h"
+
#include <asm/unaligned.h>
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
@@ -537,6 +539,8 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
packet->buf, padded_len,
HIF_WR_ASYNC_BLOCK_INC, packet);
+ trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
+
return status;
}
@@ -757,7 +761,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
{
struct htc_target *target = endpoint->target;
struct hif_scatter_req *scat_req = NULL;
- int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0;
+ int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
+ struct htc_packet *packet;
int status;
u32 txb_mask;
u8 ac = WMM_NUM_AC;
@@ -832,6 +837,13 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc tx scatter bytes %d entries %d\n",
scat_req->len, scat_req->scat_entries);
+
+ for (i = 0; i < scat_req->scat_entries; i++) {
+ packet = scat_req->scat_list[i].packet;
+ trace_ath6kl_htc_tx(packet->status, packet->endpoint,
+ packet->buf, packet->act_len);
+ }
+
ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
if (status)
@@ -1903,6 +1915,7 @@ static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
ath6kl_dbg(ATH6KL_DBG_HTC,
"htc rx complete ep %d packet 0x%p\n",
endpoint->eid, packet);
+
endpoint->ep_cb.rx(endpoint->target, packet);
}
@@ -2011,6 +2024,9 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
ep = &target->endpoint[packet->endpoint];
+ trace_ath6kl_htc_rx(packet->status, packet->endpoint,
+ packet->buf, packet->act_len);
+
/* process header for each of the recv packet */
status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
n_lk_ahd);
@@ -2291,6 +2307,9 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
goto fail_ctrl_rx;
+ trace_ath6kl_htc_rx(packet->status, packet->endpoint,
+ packet->buf, packet->act_len);
+
/* process receive header */
packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 281390178e3..67aa924ed8b 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -988,8 +988,6 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
htc_hdr = (struct htc_frame_hdr *) netdata;
- ep = &target->endpoint[htc_hdr->eid];
-
if (htc_hdr->eid >= ENDPOINT_MAX) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Rx: invalid EndpointID=%d\n",
@@ -997,6 +995,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
status = -EINVAL;
goto free_skb;
}
+ ep = &target->endpoint[htc_hdr->eid];
payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
@@ -1168,8 +1167,8 @@ static int htc_wait_recv_ctrl_message(struct htc_target *target)
}
if (count <= 0) {
- ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
- return -ECOMM;
+ ath6kl_warn("htc pipe control receive timeout!\n");
+ return -ETIMEDOUT;
}
return 0;
@@ -1582,16 +1581,16 @@ static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
return status;
if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
- ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
- target->pipe.ctrl_response_len);
+ ath6kl_warn("invalid htc pipe ready msg len: %d\n",
+ target->pipe.ctrl_response_len);
return -ECOMM;
}
ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
- ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
- ready_msg->ver2_0_info.msg_id);
+ ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
+ ready_msg->ver2_0_info.msg_id);
return -ECOMM;
}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 5d434cf88f3..40ffee6184f 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -201,8 +201,8 @@ struct sk_buff *ath6kl_buf_alloc(int size)
u16 reserved;
/* Add chacheline space at front and back of buffer */
- reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
- sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES;
+ reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
+ sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4);
skb = dev_alloc_skb(size + reserved);
if (skb)
@@ -1549,10 +1549,89 @@ static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
return NULL;
}
+
+static const struct fw_capa_str_map {
+ int id;
+ const char *name;
+} fw_capa_map[] = {
+ { ATH6KL_FW_CAPABILITY_HOST_P2P, "host-p2p" },
+ { ATH6KL_FW_CAPABILITY_SCHED_SCAN, "sched-scan" },
+ { ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, "sta-p2pdev-duplex" },
+ { ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, "inactivity-timeout" },
+ { ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, "rsn-cap-override" },
+ { ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, "wow-mc-filter" },
+ { ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, "bmiss-enhance" },
+ { ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, "sscan-match-list" },
+ { ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, "rssi-scan-thold" },
+ { ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, "custom-mac-addr" },
+ { ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, "tx-err-notify" },
+ { ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" },
+ { ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" },
+ { ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" },
+};
+
+static const char *ath6kl_init_get_fw_capa_name(unsigned int id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_capa_map); i++) {
+ if (fw_capa_map[i].id == id)
+ return fw_capa_map[i].name;
+ }
+
+ return "<unknown>";
+}
+
+static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len)
+{
+ u8 *data = (u8 *) ar->fw_capabilities;
+ size_t trunc_len, len = 0;
+ int i, index, bit;
+ char *trunc = "...";
+
+ for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index >= sizeof(ar->fw_capabilities) * 4)
+ break;
+
+ if (buf_len - len < 4) {
+ ath6kl_warn("firmware capability buffer too small!\n");
+
+ /* add "..." to the end of string */
+ trunc_len = strlen(trunc) + 1;
+ strncpy(buf + buf_len - trunc_len, trunc, trunc_len);
+
+ return;
+ }
+
+ if (data[index] & (1 << bit)) {
+ len += scnprintf(buf + len, buf_len - len, "%s,",
+ ath6kl_init_get_fw_capa_name(i));
+ }
+ }
+
+ /* overwrite the last comma */
+ if (len > 0)
+ len--;
+
+ buf[len] = '\0';
+}
+
+static int ath6kl_init_hw_reset(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "cold resetting the device");
+
+ return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS,
+ cpu_to_le32(RESET_CONTROL_COLD_RST));
+}
+
static int __ath6kl_init_hw_start(struct ath6kl *ar)
{
long timeleft;
int ret, i;
+ char buf[200];
ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
@@ -1569,24 +1648,35 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
goto err_power_off;
/* Do we need to finish the BMI phase */
- /* FIXME: return error from ath6kl_bmi_done() */
- if (ath6kl_bmi_done(ar)) {
- ret = -EIO;
+ ret = ath6kl_bmi_done(ar);
+ if (ret)
goto err_power_off;
- }
/*
* The reason we have to wait for the target here is that the
* driver layer has to init BMI in order to set the host block
* size.
*/
- if (ath6kl_htc_wait_target(ar->htc_target)) {
- ret = -EIO;
+ ret = ath6kl_htc_wait_target(ar->htc_target);
+
+ if (ret == -ETIMEDOUT) {
+ /*
+ * Most likely USB target is in odd state after reboot and
+ * needs a reset. A cold reset makes the whole device
+ * disappear from USB bus and initialisation starts from
+ * beginning.
+ */
+ ath6kl_warn("htc wait target timed out, resetting device\n");
+ ath6kl_init_hw_reset(ar);
+ goto err_power_off;
+ } else if (ret) {
+ ath6kl_err("htc wait target failed: %d\n", ret);
goto err_power_off;
}
- if (ath6kl_init_service_ep(ar)) {
- ret = -EIO;
+ ret = ath6kl_init_service_ep(ar);
+ if (ret) {
+ ath6kl_err("Endpoint service initilisation failed: %d\n", ret);
goto err_cleanup_scatter;
}
@@ -1617,6 +1707,8 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
ar->wiphy->fw_version,
ar->fw_api,
test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+ ath6kl_init_get_fwcaps(ar, buf, sizeof(buf));
+ ath6kl_info("firmware supports: %s\n", buf);
}
if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
@@ -1765,9 +1857,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
* Try to reset the device if we can. The driver may have been
* configure NOT to reset the target during a debug session.
*/
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "attempting to reset target on instance destroy\n");
- ath6kl_reset_device(ar, ar->target_type, true, true);
+ ath6kl_init_hw_reset(ar);
up(&ar->sem);
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index bd50b6b7b49..d4fcfcad57d 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -345,39 +345,6 @@ out:
return ret;
}
-/* FIXME: move to a better place, target.h? */
-#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
-#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
-
-void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
- bool wait_fot_compltn, bool cold_reset)
-{
- int status = 0;
- u32 address;
- __le32 data;
-
- if (target_type != TARGET_TYPE_AR6003 &&
- target_type != TARGET_TYPE_AR6004)
- return;
-
- data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
- cpu_to_le32(RESET_CONTROL_MBOX_RST);
-
- switch (target_type) {
- case TARGET_TYPE_AR6003:
- address = AR6003_RESET_CONTROL_ADDRESS;
- break;
- case TARGET_TYPE_AR6004:
- address = AR6004_RESET_CONTROL_ADDRESS;
- break;
- }
-
- status = ath6kl_diag_write32(ar, address, data);
-
- if (status)
- ath6kl_err("failed to reset target\n");
-}
-
static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
{
u8 index;
@@ -1327,9 +1294,11 @@ void init_netdev(struct net_device *dev)
dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
dev->needed_headroom = ETH_HLEN;
- dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) +
- sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH
- + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES;
+ dev->needed_headroom += roundup(sizeof(struct ath6kl_llc_snap_hdr) +
+ sizeof(struct wmi_data_hdr) +
+ HTC_HDR_LENGTH +
+ WMI_MAX_TX_META_SZ +
+ ATH6KL_HTC_ALIGN_BYTES, 4);
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index d111980d44c..fb141454c6d 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -28,6 +28,7 @@
#include "target.h"
#include "debug.h"
#include "cfg80211.h"
+#include "trace.h"
struct ath6kl_sdio {
struct sdio_func *func;
@@ -179,6 +180,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
+ trace_ath6kl_sdio(addr, request, buf, len);
+
return ret;
}
@@ -309,6 +312,13 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
sdio_claim_host(ar_sdio->func);
mmc_set_data_timeout(&data, ar_sdio->func->card);
+
+ trace_ath6kl_sdio_scat(scat_req->addr,
+ scat_req->req,
+ scat_req->len,
+ scat_req->scat_entries,
+ scat_req->scat_list);
+
/* synchronous call to process request */
mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
@@ -1123,10 +1133,12 @@ static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
HIF_WR_SYNC_BYTE_INC);
- if (ret)
+ if (ret) {
ath6kl_err("unable to send the bmi data to the device\n");
+ return ret;
+ }
- return ret;
+ return 0;
}
static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a98c12ba70c..a580a629a0d 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -25,7 +25,7 @@
#define AR6004_BOARD_DATA_SZ 6144
#define AR6004_BOARD_EXT_DATA_SZ 0
-#define RESET_CONTROL_ADDRESS 0x00000000
+#define RESET_CONTROL_ADDRESS 0x00004000
#define RESET_CONTROL_COLD_RST 0x00000100
#define RESET_CONTROL_MBOX_RST 0x00000004
diff --git a/drivers/net/wireless/ath/ath6kl/trace.c b/drivers/net/wireless/ath/ath6kl/trace.c
new file mode 100644
index 00000000000..e7d64b1285c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.c
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio);
+EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio_scat);
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
new file mode 100644
index 00000000000..1a1ea7881b4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -0,0 +1,332 @@
+#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <net/cfg80211.h>
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+#include "wmi.h"
+#include "hif.h"
+
+#if !defined(_ATH6KL_TRACE_H)
+static inline unsigned int ath6kl_get_wmi_id(void *buf, size_t buf_len)
+{
+ struct wmi_cmd_hdr *hdr = buf;
+
+ if (buf_len < sizeof(*hdr))
+ return 0;
+
+ return le16_to_cpu(hdr->cmd_id);
+}
+#endif /* __ATH6KL_TRACE_H */
+
+#define _ATH6KL_TRACE_H
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH6KL_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH6KL_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath6kl
+
+TRACE_EVENT(ath6kl_wmi_cmd,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = ath6kl_get_wmi_id(buf, buf_len);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "id %d len %zd",
+ __entry->id, __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_wmi_event,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->id = ath6kl_get_wmi_id(buf, buf_len);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "id %d len %zd",
+ __entry->id, __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_sdio,
+ TP_PROTO(unsigned int addr, int flags,
+ void *buf, size_t buf_len),
+
+ TP_ARGS(addr, flags, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, tx)
+ __field(unsigned int, addr)
+ __field(int, flags)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->flags = flags;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+
+ if (flags & HIF_WRITE)
+ __entry->tx = 1;
+ else
+ __entry->tx = 0;
+ ),
+
+ TP_printk(
+ "%s addr 0x%x flags 0x%x len %zd\n",
+ __entry->tx ? "tx" : "rx",
+ __entry->addr,
+ __entry->flags,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_sdio_scat,
+ TP_PROTO(unsigned int addr, int flags, unsigned int total_len,
+ unsigned int entries, struct hif_scatter_item *list),
+
+ TP_ARGS(addr, flags, total_len, entries, list),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, tx)
+ __field(unsigned int, addr)
+ __field(int, flags)
+ __field(unsigned int, entries)
+ __field(size_t, total_len)
+ __dynamic_array(unsigned int, len_array, entries)
+ __dynamic_array(u8, data, total_len)
+ ),
+
+ TP_fast_assign(
+ unsigned int *len_array;
+ int i, offset = 0;
+ size_t len;
+
+ __entry->addr = addr;
+ __entry->flags = flags;
+ __entry->entries = entries;
+ __entry->total_len = total_len;
+
+ if (flags & HIF_WRITE)
+ __entry->tx = 1;
+ else
+ __entry->tx = 0;
+
+ len_array = __get_dynamic_array(len_array);
+
+ for (i = 0; i < entries; i++) {
+ len = list[i].len;
+
+ memcpy((u8 *) __get_dynamic_array(data) + offset,
+ list[i].buf, len);
+
+ len_array[i] = len;
+ offset += len;
+ }
+ ),
+
+ TP_printk(
+ "%s addr 0x%x flags 0x%x entries %d total_len %zd\n",
+ __entry->tx ? "tx" : "rx",
+ __entry->addr,
+ __entry->flags,
+ __entry->entries,
+ __entry->total_len
+ )
+);
+
+TRACE_EVENT(ath6kl_sdio_irq,
+ TP_PROTO(void *buf, size_t buf_len),
+
+ TP_ARGS(buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "irq len %zd\n", __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_htc_rx,
+ TP_PROTO(int status, int endpoint, void *buf,
+ size_t buf_len),
+
+ TP_ARGS(status, endpoint, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(int, endpoint)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->endpoint = endpoint;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "status %d endpoint %d len %zd\n",
+ __entry->status,
+ __entry->endpoint,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath6kl_htc_tx,
+ TP_PROTO(int status, int endpoint, void *buf,
+ size_t buf_len),
+
+ TP_ARGS(status, endpoint, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __field(int, status)
+ __field(int, endpoint)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->endpoint = endpoint;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "status %d endpoint %d len %zd\n",
+ __entry->status,
+ __entry->endpoint,
+ __entry->buf_len
+ )
+);
+
+#define ATH6KL_MSG_MAX 200
+
+DECLARE_EVENT_CLASS(ath6kl_log_event,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ ),
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH6KL_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH6KL_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(ath6kl_log_event, ath6kl_log_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath6kl_log_event, ath6kl_log_warn,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(ath6kl_log_event, ath6kl_log_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+TRACE_EVENT(ath6kl_log_dbg,
+ TP_PROTO(unsigned int level, struct va_format *vaf),
+ TP_ARGS(level, vaf),
+ TP_STRUCT__entry(
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH6KL_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH6KL_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH6KL_MSG_MAX);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(ath6kl_log_dbg_dump,
+ TP_PROTO(const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ __assign_str(prefix, prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s/%s\n", __get_str(prefix), __get_str(msg)
+ )
+);
+
+#endif /* _ ATH6KL_TRACE_H || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 78b36928657..ebb24045a8a 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -20,6 +20,7 @@
#include "core.h"
#include "debug.h"
#include "htc-ops.h"
+#include "trace.h"
/*
* tid - tid_mux0..tid_mux3
@@ -288,6 +289,8 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
int status = 0;
struct ath6kl_cookie *cookie = NULL;
+ trace_ath6kl_wmi_cmd(skb->data, skb->len);
+
if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
dev_kfree_skb(skb);
return -EACCES;
@@ -1324,7 +1327,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
__func__, ar, ept, skb, packet->buf,
packet->act_len, status);
- if (status || !(skb->data + HTC_HDR_LENGTH)) {
+ if (status || packet->act_len < HTC_HDR_LENGTH) {
dev_kfree_skb(skb);
return;
}
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 5fcd342762d..bed0d337712 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -856,11 +856,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
int ret;
if (size > 0) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
-
- memcpy(buf, data, size);
}
/* note: if successful returns number of bytes transfered */
@@ -872,8 +870,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
size, 1000);
if (ret < 0) {
- ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
- __func__, ret);
+ ath6kl_warn("Failed to submit usb control message: %d\n", ret);
+ kfree(buf);
+ return ret;
}
kfree(buf);
@@ -903,8 +902,9 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
size, 2 * HZ);
if (ret < 0) {
- ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n",
- __func__, ret);
+ ath6kl_warn("Failed to read usb control message: %d\n", ret);
+ kfree(buf);
+ return ret;
}
memcpy((u8 *) data, buf, size);
@@ -961,8 +961,10 @@ static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
ar_usb->diag_resp_buffer, &resp_len);
- if (ret)
+ if (ret) {
+ ath6kl_warn("diag read32 failed: %d\n", ret);
return ret;
+ }
resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
ar_usb->diag_resp_buffer;
@@ -976,6 +978,7 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
{
struct ath6kl_usb *ar_usb = ar->hif_priv;
struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
+ int ret;
cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
@@ -984,12 +987,17 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
cmd->address = cpu_to_le32(address);
cmd->value = data;
- return ath6kl_usb_ctrl_msg_exchange(ar_usb,
- ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
- (u8 *) cmd,
- sizeof(*cmd),
- 0, NULL, NULL);
+ ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
+ ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *) cmd,
+ sizeof(*cmd),
+ 0, NULL, NULL);
+ if (ret) {
+ ath6kl_warn("diag_write32 failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
}
static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
@@ -1001,7 +1009,7 @@ static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
ret = ath6kl_usb_submit_ctrl_in(ar_usb,
ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
0, 0, buf, len);
- if (ret != 0) {
+ if (ret) {
ath6kl_err("Unable to read the bmi data from the device: %d\n",
ret);
return ret;
@@ -1019,7 +1027,7 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
ret = ath6kl_usb_submit_ctrl_out(ar_usb,
ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
0, 0, buf, len);
- if (ret != 0) {
+ if (ret) {
ath6kl_err("unable to send the bmi data to the device: %d\n",
ret);
return ret;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index d76b5bd81a0..87aefb4c4c2 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -20,6 +20,7 @@
#include "core.h"
#include "debug.h"
#include "testmode.h"
+#include "trace.h"
#include "../regd.h"
#include "../regd_common.h"
@@ -2028,6 +2029,9 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
if (!sband)
continue;
+ if (WARN_ON(band >= ATH6KL_NUM_BANDS))
+ break;
+
ratemask = rates[band];
supp_rates = sc->supp_rates[band].rates;
num_rates = 0;
@@ -4086,6 +4090,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
return -EINVAL;
}
+ trace_ath6kl_wmi_event(skb->data, skb->len);
+
return ath6kl_wmi_proc_events(wmi, skb);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 881e989ea47..e6b92ff265f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3606,6 +3606,12 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
+ if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
+ value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
+ REG_RMW_FIELD(ah, switch_chain_reg[0],
+ AR_SWITCH_TABLE_ALL, value);
+ }
+
for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
if ((ah->rxchainmask & BIT(chain)) ||
(ah->txchainmask & BIT(chain))) {
@@ -3772,6 +3778,17 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
AR_PHY_EXT_ATTEN_CTL_2,
};
+ if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
+ value = ar9003_hw_atten_chain_get(ah, 1, chan);
+ REG_RMW_FIELD(ah, ext_atten_reg[0],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
+
+ value = ar9003_hw_atten_chain_get_margin(ah, 1, chan);
+ REG_RMW_FIELD(ah, ext_atten_reg[0],
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
+ value);
+ }
+
/* Test value. if 0 then attenuation is unused. Don't load anything. */
for (i = 0; i < 3; i++) {
if (ah->txchainmask & BIT(i)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index ccc42a71b43..999ab08c34e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -37,28 +37,28 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18253ede},
{0x00018c04, 0x000801d8},
- {0x00018c08, 0x0003580c},
+ {0x00018c08, 0x0003780c},
};
static const u32 ar9462_2p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
- {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
{0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
- {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -82,9 +82,9 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
{0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
- {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
{0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
{0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
{0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
@@ -363,14 +363,14 @@ static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18213ede},
{0x00018c04, 0x000801d8},
- {0x00018c08, 0x0003580c},
+ {0x00018c08, 0x0003780c},
};
static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
/* Addr allmodes */
{0x00018c00, 0x18212ede},
{0x00018c04, 0x000801d8},
- {0x00018c08, 0x0003580c},
+ {0x00018c08, 0x0003780c},
};
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
@@ -775,7 +775,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x00009fc0, 0x803e4788},
{0x00009fc4, 0x0001efb5},
{0x00009fcc, 0x40000014},
- {0x00009fd0, 0x01193b93},
+ {0x00009fd0, 0x0a193b93},
{0x0000a20c, 0x00000000},
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
@@ -850,7 +850,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a7cc, 0x00000000},
{0x0000a7d0, 0x00000000},
{0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000001},
+ {0x0000a7dc, 0x00000000},
{0x0000a7f0, 0x80000000},
{0x0000a8d0, 0x004b6a8e},
{0x0000a8d4, 0x00000820},
@@ -886,7 +886,7 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
{0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
+ {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
@@ -906,20 +906,20 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
+ {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
{0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
+ {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
+ {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
+ {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
+ {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1053,7 +1053,6 @@ static const u32 ar9462_2p0_mac_core[][2] = {
{0x00008044, 0x00000000},
{0x00008048, 0x00000000},
{0x0000804c, 0xffffffff},
- {0x00008050, 0xffffffff},
{0x00008054, 0x00000000},
{0x00008058, 0x00000000},
{0x0000805c, 0x000fc78f},
@@ -1117,9 +1116,9 @@ static const u32 ar9462_2p0_mac_core[][2] = {
{0x000081f8, 0x00000000},
{0x000081fc, 0x00000000},
{0x00008240, 0x00100000},
- {0x00008244, 0x0010f424},
+ {0x00008244, 0x0010f400},
{0x00008248, 0x00000800},
- {0x0000824c, 0x0001e848},
+ {0x0000824c, 0x0001e800},
{0x00008250, 0x00000000},
{0x00008254, 0x00000000},
{0x00008258, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 1e8508530e9..7bdd726c7a8 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -369,7 +369,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
struct ieee80211_channel *c = chan->chan;
struct ath9k_hw_cal_data *caldata = ah->caldata;
- chan->channelFlags &= (~CHANNEL_CW_INT);
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
ath_dbg(common, CALIBRATE,
"NF did not complete in calibration window\n");
@@ -384,7 +383,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
ath_dbg(common, CALIBRATE,
"noise floor failed detected; detected %d, threshold %d\n",
nf, nfThresh);
- chan->channelFlags |= CHANNEL_CW_INT;
}
if (!caldata) {
@@ -410,7 +408,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
int i, j;
ah->caldata->channel = chan->channel;
- ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
+ ah->caldata->channelFlags = chan->channelFlags;
ah->caldata->chanmode = chan->chanmode;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 050ca4a4850..6102476a65d 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -40,7 +40,7 @@
x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
} while (0)
#define ATH_EP_RND(x, mul) \
- ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
+ (((x) + ((mul)/2)) / (mul))
int ath9k_cmn_padpos(__le16 frame_control);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3714b971d18..67a2a4b3b88 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -537,6 +537,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
PR("AMPDUs Completed:", a_completed);
PR("AMPDUs Retried: ", a_retries);
PR("AMPDUs XRetried: ", a_xretries);
+ PR("TXERR Filtered: ", txerr_filtered);
PR("FIFO Underrun: ", fifo_underrun);
PR("TXOP Exceeded: ", xtxop);
PR("TXTIMER Expiry: ", timer_exp);
@@ -756,6 +757,8 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
TX_STAT_INC(qnum, completed);
}
+ if (ts->ts_status & ATH9K_TXERR_FILT)
+ TX_STAT_INC(qnum, txerr_filtered);
if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP)
@@ -1909,6 +1912,7 @@ static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
AMKSTR(d_tx_desc_cfg_err),
AMKSTR(d_tx_data_underrun),
AMKSTR(d_tx_delim_underrun),
+ "d_rx_crc_err",
"d_rx_decrypt_crc_err",
"d_rx_phy_err",
"d_rx_mic_err",
@@ -1989,6 +1993,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
AWDATA(data_underrun);
AWDATA(delim_underrun);
+ AWDATA_RX(crc_err);
AWDATA_RX(decrypt_crc_err);
AWDATA_RX(phy_err);
AWDATA_RX(mic_err);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 410d6d8f1aa..794a7ec83a2 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -142,6 +142,7 @@ struct ath_interrupt_stats {
* @a_completed: Total AMPDUs completed
* @a_retries: No. of AMPDUs retried (SW)
* @a_xretries: No. of AMPDUs dropped due to xretries
+ * @txerr_filtered: No. of frames with TXERR_FILT flag set.
* @fifo_underrun: FIFO underrun occurrences
Valid only for:
- non-aggregate condition.
@@ -168,6 +169,7 @@ struct ath_tx_stats {
u32 a_completed;
u32 a_retries;
u32 a_xretries;
+ u32 txerr_filtered;
u32 fifo_underrun;
u32 xtxop;
u32 timer_exp;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 07e25260c31..4fa2bb16705 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1669,6 +1669,104 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_check_alive);
+static void ath9k_hw_init_mfp(struct ath_hw *ah)
+{
+ /* Setup MFP options for CCMP */
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
+ * frames when constructing CCMP AAD. */
+ REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
+ 0xc7ff);
+ ah->sw_mgmt_crypto = false;
+ } else if (AR_SREV_9160_10_OR_LATER(ah)) {
+ /* Disable hardware crypto for management frames */
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
+ REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
+ AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
+ ah->sw_mgmt_crypto = true;
+ } else {
+ ah->sw_mgmt_crypto = true;
+ }
+}
+
+static void ath9k_hw_reset_opmode(struct ath_hw *ah,
+ u32 macStaId1, u32 saveDefAntenna)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
+ REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
+ | macStaId1
+ | AR_STA_ID1_RTS_USE_DEF
+ | (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
+ | ah->sta_id1_defaults);
+ ath_hw_setbssidmask(common);
+ REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
+ ath9k_hw_write_associd(ah);
+ REG_WRITE(ah, AR_ISR, ~0);
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ ath9k_hw_set_operating_mode(ah, ah->opmode);
+}
+
+static void ath9k_hw_init_queues(struct ath_hw *ah)
+{
+ int i;
+
+ ENABLE_REGWRITE_BUFFER(ah);
+
+ for (i = 0; i < AR_NUM_DCU; i++)
+ REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
+
+ REGWRITE_BUFFER_FLUSH(ah);
+
+ ah->intr_txqs = 0;
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ ath9k_hw_resettxqueue(ah, i);
+}
+
+/*
+ * For big endian systems turn on swapping for descriptors
+ */
+static void ath9k_hw_init_desc(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (AR_SREV_9100(ah)) {
+ u32 mask;
+ mask = REG_READ(ah, AR_CFG);
+ if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
+ ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
+ mask);
+ } else {
+ mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
+ REG_WRITE(ah, AR_CFG, mask);
+ ath_dbg(common, RESET, "Setting CFG 0x%x\n",
+ REG_READ(ah, AR_CFG));
+ }
+ } else {
+ if (common->bus_ops->ath_bus_type == ATH_USB) {
+ /* Configure AR9271 target WLAN */
+ if (AR_SREV_9271(ah))
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
+ else
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
+ }
+#ifdef __BIG_ENDIAN
+ else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9550(ah))
+ REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
+ else
+ REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
+#endif
+ }
+}
+
/*
* Fast channel change:
* (Change synthesizer based on channel freq without resetting chip)
@@ -1746,7 +1844,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
- int i, r;
+ int r;
bool start_mci_reset = false;
bool save_fullsleep = ah->chip_fullsleep;
@@ -1763,10 +1861,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_getnf(ah, ah->curchan);
ah->caldata = caldata;
- if (caldata &&
- (chan->channel != caldata->channel ||
- (chan->channelFlags & ~CHANNEL_CW_INT) !=
- (caldata->channelFlags & ~CHANNEL_CW_INT))) {
+ if (caldata && (chan->channel != caldata->channel ||
+ chan->channelFlags != caldata->channelFlags)) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
@@ -1853,22 +1949,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_settsf64(ah, tsf);
}
- /* Setup MFP options for CCMP */
- if (AR_SREV_9280_20_OR_LATER(ah)) {
- /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
- * frames when constructing CCMP AAD. */
- REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
- 0xc7ff);
- ah->sw_mgmt_crypto = false;
- } else if (AR_SREV_9160_10_OR_LATER(ah)) {
- /* Disable hardware crypto for management frames */
- REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
- REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
- AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
- ah->sw_mgmt_crypto = true;
- } else
- ah->sw_mgmt_crypto = true;
+ ath9k_hw_init_mfp(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
@@ -1876,24 +1957,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
- ENABLE_REGWRITE_BUFFER(ah);
-
- REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
- REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
- | macStaId1
- | AR_STA_ID1_RTS_USE_DEF
- | (ah->config.
- ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
- | ah->sta_id1_defaults);
- ath_hw_setbssidmask(common);
- REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
- ath9k_hw_write_associd(ah);
- REG_WRITE(ah, AR_ISR, ~0);
- REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
-
- REGWRITE_BUFFER_FLUSH(ah);
-
- ath9k_hw_set_operating_mode(ah, ah->opmode);
+ ath9k_hw_reset_opmode(ah, macStaId1, saveDefAntenna);
r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
@@ -1901,17 +1965,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_set_clockrate(ah);
- ENABLE_REGWRITE_BUFFER(ah);
-
- for (i = 0; i < AR_NUM_DCU; i++)
- REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
-
- REGWRITE_BUFFER_FLUSH(ah);
-
- ah->intr_txqs = 0;
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- ath9k_hw_resettxqueue(ah, i);
-
+ ath9k_hw_init_queues(ah);
ath9k_hw_init_interrupt_masks(ah, ah->opmode);
ath9k_hw_ani_cache_ini_regs(ah);
ath9k_hw_init_qos(ah);
@@ -1966,38 +2020,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REGWRITE_BUFFER_FLUSH(ah);
- /*
- * For big endian systems turn on swapping for descriptors
- */
- if (AR_SREV_9100(ah)) {
- u32 mask;
- mask = REG_READ(ah, AR_CFG);
- if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
- ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
- mask);
- } else {
- mask =
- INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
- REG_WRITE(ah, AR_CFG, mask);
- ath_dbg(common, RESET, "Setting CFG 0x%x\n",
- REG_READ(ah, AR_CFG));
- }
- } else {
- if (common->bus_ops->ath_bus_type == ATH_USB) {
- /* Configure AR9271 target WLAN */
- if (AR_SREV_9271(ah))
- REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
- else
- REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
- }
-#ifdef __BIG_ENDIAN
- else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
- AR_SREV_9550(ah))
- REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
- else
- REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
-#endif
- }
+ ath9k_hw_init_desc(ah);
if (ath9k_hw_btcoex_is_enabled(ah))
ath9k_hw_btcoex_enable(ah);
@@ -2010,7 +2033,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
-
ar9003_hw_disable_phy_restart(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 784e81ccb90..30e62d92d46 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -363,7 +363,6 @@ enum ath9k_int {
ATH9K_INT_NOCARD = 0xffffffff
};
-#define CHANNEL_CW_INT 0x00002
#define CHANNEL_CCK 0x00020
#define CHANNEL_OFDM 0x00040
#define CHANNEL_2GHZ 0x00080
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 9c0b150d5b8..c61cafa2665 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -387,8 +387,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
u8 tid;
if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
- txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
- (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
+ txinfo->flags & IEEE80211_TX_CTL_INJECTED)
return;
rcu_read_lock();
@@ -981,30 +980,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
txc->s.ampdu_settings, factor);
-
- for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
- txrate = &info->control.rates[i];
- if (txrate->idx >= 0) {
- txc->s.ri[i] =
- CARL9170_TX_SUPER_RI_AMPDU;
-
- if (WARN_ON(!(txrate->flags &
- IEEE80211_TX_RC_MCS))) {
- /*
- * Not sure if it's even possible
- * to aggregate non-ht rates with
- * this HW.
- */
- goto err_out;
- }
- continue;
- }
-
- txrate->idx = 0;
- txrate->count = ar->hw->max_rate_tries;
- }
-
- mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
}
/*
@@ -1012,11 +987,31 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
* taken from mac_control. For all fallback rate, the firmware
* updates the mac_control flags from the rate info field.
*/
- for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
+ for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
+ __le32 phy_set;
txrate = &info->control.rates[i];
if (txrate->idx < 0)
break;
+ phy_set = carl9170_tx_physet(ar, info, txrate);
+ if (i == 0) {
+ /* first rate - part of the hw's frame header */
+ txc->f.phy_control = phy_set;
+
+ if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+ if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
+ else if (carl9170_tx_cts_check(ar, txrate))
+ mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
+
+ } else {
+ /* fallback rates are stored in the firmware's
+ * retry rate set array.
+ */
+ txc->s.rr[i - 1] = phy_set;
+ }
+
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
txrate->count);
@@ -1027,21 +1022,13 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
CARL9170_TX_SUPER_RI_ERP_PROT_S);
- txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
+ if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
+ txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
}
- txrate = &info->control.rates[0];
- SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
-
- if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
- mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
- else if (carl9170_tx_cts_check(ar, txrate))
- mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
-
txc->s.len = cpu_to_le16(skb->len);
txc->f.length = cpu_to_le16(len + FCS_LEN);
txc->f.mac_control = mac_tmp;
- txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
arinfo = (void *)info->rate_driver_data;
arinfo->timeout = jiffies;
@@ -1381,9 +1368,9 @@ static void carl9170_tx(struct ar9170 *ar)
}
static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
- struct ieee80211_sta *sta, struct sk_buff *skb)
+ struct ieee80211_sta *sta, struct sk_buff *skb,
+ struct ieee80211_tx_info *txinfo)
{
- struct _carl9170_tx_superframe *super = (void *) skb->data;
struct carl9170_sta_info *sta_info;
struct carl9170_sta_tid *agg;
struct sk_buff *iter;
@@ -1450,7 +1437,7 @@ err_unlock:
err_unlock_rcu:
rcu_read_unlock();
- super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
+ txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU;
carl9170_tx_status(ar, skb, false);
ar->tx_dropped++;
return false;
@@ -1492,7 +1479,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
* sta == NULL checks are redundant in this
* special case.
*/
- run = carl9170_tx_ampdu_queue(ar, sta, skb);
+ run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
if (run)
carl9170_tx_ampdu(ar);
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 9396dc9fe3c..d288eea0a26 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,5 +9,7 @@ wil6210-objs += wmi.o
wil6210-objs += interrupt.o
wil6210-objs += txrx.o
-subdir-ccflags-y += -Werror
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ subdir-ccflags-y += -Werror
+endif
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9ecc1968262..c5d4a87abaa 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -14,16 +14,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/slab.h>
-#include <linux/version.h>
-#include <net/cfg80211.h>
-
#include "wil6210.h"
#include "wmi.h"
@@ -292,7 +282,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
/* WMI_CONNECT_CMD */
memset(&conn, 0, sizeof(conn));
- switch (bss->capability & 0x03) {
+ switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
case WLAN_CAPABILITY_DMG_TYPE_AP:
conn.network_type = WMI_NETTYPE_INFRA;
break;
@@ -437,17 +427,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
if (rc)
return rc;
- rc = wmi_set_channel(wil, channel->hw_value);
- if (rc)
- return rc;
-
/* MAC address - pre-requisite for other commands */
wmi_set_mac_address(wil, ndev->dev_addr);
/* IE's */
/* bcon 'head IE's are not relevant for 60g band */
- wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
- bcon->beacon_ies);
+ /*
+ * FW do not form regular beacon, so bcon IE's are not set
+ * For the DMG bcon, when it will be supported, bcon IE's will
+ * be reused; add something like:
+ * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+ * bcon->beacon_ies);
+ */
wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
bcon->proberesp_ies);
wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
@@ -455,7 +446,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
wil->secure_pcp = info->privacy;
- rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype);
+ rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
+ channel->hw_value);
if (rc)
return rc;
@@ -472,11 +464,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
{
int rc = 0;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
- u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
- /* To stop beaconing, set BI to 0 */
- rc = wmi_set_bcon(wil, 0, wmi_nettype);
+ rc = wmi_pcp_stop(wil);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
deleted file mode 100644
index e5712f026c4..00000000000
--- a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef WIL_DBG_HEXDUMP_H_
-#define WIL_DBG_HEXDUMP_H_
-
-#include <linux/printk.h>
-#include <linux/dynamic_debug.h>
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii) \
- dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii)
-
-#else /* defined(CONFIG_DYNAMIC_DEBUG) */
-#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii) \
- print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
- groupsize, buf, len, ascii)
-#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
-
-#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 65fc9683bfd..4be07f5e22b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -312,14 +312,6 @@ static const struct file_operations fops_memread = {
.llseek = seq_lseek,
};
-static int wil_default_open(struct inode *inode, struct file *file)
-{
- if (inode->i_private)
- file->private_data = inode->i_private;
-
- return 0;
-}
-
static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -361,7 +353,7 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
static const struct file_operations fops_ioblob = {
.read = wil_read_file_ioblob,
- .open = wil_default_open,
+ .open = simple_open,
.llseek = default_llseek,
};
@@ -396,7 +388,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
static const struct file_operations fops_reset = {
.write = wil_write_file_reset,
- .open = wil_default_open,
+ .open = simple_open,
};
/*---------Tx descriptor------------*/
@@ -526,7 +518,50 @@ static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
static const struct file_operations fops_ssid = {
.read = wil_read_file_ssid,
.write = wil_write_file_ssid,
- .open = wil_default_open,
+ .open = simple_open,
+};
+
+/*---------temp------------*/
+static void print_temp(struct seq_file *s, const char *prefix, u32 t)
+{
+ switch (t) {
+ case 0:
+ case ~(u32)0:
+ seq_printf(s, "%s N/A\n", prefix);
+ break;
+ default:
+ seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
+ break;
+ }
+}
+
+static int wil_temp_debugfs_show(struct seq_file *s, void *data)
+{
+ struct wil6210_priv *wil = s->private;
+ u32 t_m, t_r;
+
+ int rc = wmi_get_temperature(wil, &t_m, &t_r);
+ if (rc) {
+ seq_printf(s, "Failed\n");
+ return 0;
+ }
+
+ print_temp(s, "MAC temperature :", t_m);
+ print_temp(s, "Radio temperature :", t_r);
+
+ return 0;
+}
+
+static int wil_temp_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wil_temp_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations fops_temp = {
+ .open = wil_temp_seq_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
};
/*----------------*/
@@ -563,6 +598,7 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
+ debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp);
wil->rgf_blob.data = (void * __force)wil->csr + 0;
wil->rgf_blob.size = 0xa000;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index dc97e7b2609..e3c1e7684f9 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -240,6 +240,15 @@ static void wil_notify_fw_error(struct wil6210_priv *wil)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
+static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+{
+ /* make shadow copy of registers that should not change on run time */
+ wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
+ sizeof(struct wil6210_mbox_ctl));
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
+ wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+}
+
static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
@@ -257,14 +266,19 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
wil6210_mask_irq_misc(wil);
if (isr & ISR_MISC_FW_ERROR) {
- wil_dbg_irq(wil, "IRQ: Firmware error\n");
+ wil_err(wil, "Firmware error detected\n");
clear_bit(wil_status_fwready, &wil->status);
- wil_notify_fw_error(wil);
- isr &= ~ISR_MISC_FW_ERROR;
+ /*
+ * do not clear @isr here - we do 2-nd part in thread
+ * there, user space get notified, and it should be done
+ * in non-atomic context
+ */
}
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
+ wil_cache_mbox_regs(wil);
+ set_bit(wil_status_reset_done, &wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
@@ -289,6 +303,11 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
+ if (isr & ISR_MISC_FW_ERROR) {
+ wil_notify_fw_error(wil);
+ isr &= ~ISR_MISC_FW_ERROR;
+ }
+
if (isr & ISR_MISC_MBOX_EVT) {
wil_dbg_irq(wil, "MBOX event\n");
wmi_recv_cmd(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 761c389586d..a0478e2f686 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -14,12 +14,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/ieee80211.h>
-#include <linux/wireless.h>
-#include <linux/slab.h>
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
@@ -109,13 +103,24 @@ static void wil_connect_timer_fn(ulong x)
schedule_work(&wil->disconnect_worker);
}
-static void wil_cache_mbox_regs(struct wil6210_priv *wil)
+static void wil_connect_worker(struct work_struct *work)
{
- /* make shadow copy of registers that should not change on run time */
- wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
- sizeof(struct wil6210_mbox_ctl));
- wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
- wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
+ int rc;
+ struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
+ connect_worker);
+ int cid = wil->pending_connect_cid;
+
+ if (cid < 0) {
+ wil_err(wil, "No connection pending\n");
+ return;
+ }
+
+ wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
+
+ rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0);
+ wil->pending_connect_cid = -1;
+ if (rc == 0)
+ wil_link_on(wil);
}
int wil_priv_init(struct wil6210_priv *wil)
@@ -130,7 +135,7 @@ int wil_priv_init(struct wil6210_priv *wil)
wil->pending_connect_cid = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
- INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker);
+ INIT_WORK(&wil->connect_worker, wil_connect_worker);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
@@ -147,8 +152,6 @@ int wil_priv_init(struct wil6210_priv *wil)
return -EAGAIN;
}
- wil_cache_mbox_regs(wil);
-
return 0;
}
@@ -185,15 +188,11 @@ static void wil_target_reset(struct wil6210_priv *wil)
W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
- msleep(100);
-
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
- msleep(100);
-
W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
@@ -203,12 +202,6 @@ static void wil_target_reset(struct wil6210_priv *wil)
W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
- msleep(2000);
-
- W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
-
- msleep(2000);
-
wil_dbg_misc(wil, "Reset completed\n");
#undef W
@@ -265,8 +258,6 @@ int wil_reset(struct wil6210_priv *wil)
wil->pending_connect_cid = -1;
INIT_COMPLETION(wil->wmi_ready);
- wil_cache_mbox_regs(wil);
-
/* TODO: release MAC reset */
wil6210_enable_irq(wil);
@@ -352,9 +343,9 @@ static int __wil_up(struct wil6210_priv *wil)
wil_err(wil, "SSID not set\n");
return -EINVAL;
}
- wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
- if (channel)
- wmi_set_channel(wil, channel->hw_value);
+ rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
+ if (rc)
+ return rc;
break;
default:
break;
@@ -364,9 +355,12 @@ static int __wil_up(struct wil6210_priv *wil)
wmi_set_mac_address(wil, ndev->dev_addr);
/* Set up beaconing if required. */
- rc = wmi_set_bcon(wil, bi, wmi_nettype);
- if (rc)
- return rc;
+ if (bi > 0) {
+ rc = wmi_pcp_start(wil, bi, wmi_nettype,
+ (channel ? channel->hw_value : 0));
+ if (rc)
+ return rc;
+ }
/* Rx VRING. After MAC and beacon */
wil_rx_init(wil);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 8ce2e33dce2..098a8ec6b84 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -14,10 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/slab.h>
#include "wil6210.h"
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 81c35c6e383..eb1dc7ad80f 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -14,10 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/moduleparam.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d1315b44237..79d4e3271b0 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -14,10 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/hardirq.h>
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/moduleparam.h>
@@ -83,8 +80,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
*/
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
if (!vring->va) {
- wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
- vring->size);
kfree(vring->ctx);
vring->ctx = NULL;
return -ENOMEM;
@@ -560,7 +555,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
if (rc)
goto out_free;
- if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) {
+ if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
wil_err(wil, "Tx config failed, status 0x%02x\n",
reply.cmd.status);
rc = -EINVAL;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index aea961ff8f0..8f76ecd8a7e 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -21,8 +21,6 @@
#include <linux/wireless.h>
#include <net/cfg80211.h>
-#include "dbg_hexdump.h"
-
#define WIL_NAME "wil6210"
/**
@@ -188,6 +186,7 @@ enum { /* for wil6210_priv.status */
wil_status_fwready = 0,
wil_status_fwconnected,
wil_status_dontscan,
+ wil_status_reset_done,
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
};
@@ -210,6 +209,8 @@ struct wil6210_priv {
struct wireless_dev *wdev;
void __iomem *csr;
ulong status;
+ u32 fw_version;
+ u8 n_mids; /* number of additional MIDs as reported by FW */
/* profile */
u32 monitor_flags;
u32 secure_pcp; /* create secure PCP? */
@@ -227,7 +228,7 @@ struct wil6210_priv {
struct workqueue_struct *wmi_wq; /* for deferred calls */
struct work_struct wmi_event_worker;
struct workqueue_struct *wmi_wq_conn; /* for connect worker */
- struct work_struct wmi_connect_worker;
+ struct work_struct connect_worker;
struct work_struct disconnect_worker;
struct timer_list connect_timer;
int pending_connect_cid;
@@ -277,13 +278,13 @@ struct wil6210_priv {
#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
- wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\
+ print_hex_dump_debug("DBG[TXRX]" prefix_str,\
prefix_type, rowsize, \
groupsize, buf, len, ascii)
#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
- wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\
+ print_hex_dump_debug("DBG[ WMI]" prefix_str,\
prefix_type, rowsize, \
groupsize, buf, len, ascii)
@@ -313,7 +314,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
void wmi_recv_cmd(struct wil6210_priv *wil);
int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
u16 reply_id, void *reply, u8 reply_size, int to_msec);
-void wmi_connect_worker(struct work_struct *work);
void wmi_event_worker(struct work_struct *work);
void wmi_event_flush(struct wil6210_priv *wil);
int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
@@ -328,6 +328,8 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
+int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wil6210_init_irq(struct wil6210_priv *wil, int irq);
void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
@@ -341,7 +343,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev);
void wil_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
-int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype);
+int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
+int wmi_pcp_stop(struct wil6210_priv *wil);
void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
int wil_rx_init(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0bb3b76b4b5..45b04e383f9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -14,9 +14,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/list.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
@@ -272,16 +269,18 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil->wdev;
struct wmi_ready_event *evt = d;
- u32 ver = le32_to_cpu(evt->sw_version);
+ wil->fw_version = le32_to_cpu(evt->sw_version);
+ wil->n_mids = evt->numof_additional_mids;
- wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac);
+ wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
+ evt->mac, wil->n_mids);
if (!is_valid_ether_addr(ndev->dev_addr)) {
memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
}
snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
- "%d", ver);
+ "%d", wil->fw_version);
}
static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
@@ -324,17 +323,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
struct cfg80211_bss *bss;
- u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp);
- u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info);
- u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int);
- const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
- size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
- wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
-
- bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
- tsf, cap, bi, ie_buf, ie_len,
- signal, GFP_KERNEL);
+
+ bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
+ d_len, signal, GFP_KERNEL);
if (bss) {
wil_dbg_wmi(wil, "Added BSS %pM\n",
rx_mgmt_frame->bssid);
@@ -342,6 +333,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
} else {
wil_err(wil, "cfg80211_inform_bss() failed\n");
}
+ } else {
+ cfg80211_rx_mgmt(wil->wdev, freq, signal,
+ (void *)rx_mgmt_frame, d_len, GFP_KERNEL);
}
}
@@ -443,7 +437,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
wil->pending_connect_cid = evt->cid;
- queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker);
+ queue_work(wil->wmi_wq_conn, &wil->connect_worker);
}
static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
@@ -528,6 +522,37 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
}
}
+static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_data_port_open_event *evt = d;
+
+ wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
+
+ netif_carrier_on(ndev);
+}
+
+static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
+{
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wmi_wbe_link_down_event *evt = d;
+
+ wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
+ evt->cid, le32_to_cpu(evt->reason));
+
+ netif_carrier_off(ndev);
+}
+
+static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
+ int len)
+{
+ struct wmi_vring_ba_status_event *evt = d;
+
+ wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
+ evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
+ __le16_to_cpu(evt->ba_timeout));
+}
+
static const struct {
int eventid;
void (*handler)(struct wil6210_priv *wil, int eventid,
@@ -541,6 +566,9 @@ static const struct {
{WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
{WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
{WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
+ {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
+ {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
+ {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
};
/*
@@ -559,6 +587,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
void __iomem *src;
ulong flags;
+ if (!test_bit(wil_status_reset_done, &wil->status)) {
+ wil_err(wil, "Reset not completed\n");
+ return;
+ }
+
for (;;) {
u16 len;
@@ -683,18 +716,39 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
}
-int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype)
+int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
{
- struct wmi_bcon_ctrl_cmd cmd = {
+ int rc;
+
+ struct wmi_pcp_start_cmd cmd = {
.bcon_interval = cpu_to_le16(bi),
.network_type = wmi_nettype,
.disable_sec_offload = 1,
+ .channel = chan,
};
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_pcp_started_event evt;
+ } __packed reply;
if (!wil->secure_pcp)
cmd.disable_sec = 1;
- return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd));
+ rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
+ WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ return rc;
+
+ if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+int wmi_pcp_stop(struct wil6210_priv *wil)
+{
+ return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
+ WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
}
int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
@@ -765,6 +819,16 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
return 0;
}
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
+{
+ struct wmi_p2p_cfg_cmd cmd = {
+ .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
+ .channel = channel - 1,
+ };
+
+ return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
+}
+
int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
{
struct wmi_eapol_tx_cmd *cmd;
@@ -843,7 +907,7 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
/* BUG: FW API define ieLen as u8. Will fix FW */
cmd->ie_len = cpu_to_le16(ie_len);
memcpy(cmd->ie_info, ie, ie_len);
- rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len);
+ rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
kfree(cmd);
return rc;
@@ -898,6 +962,31 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
return rc;
}
+int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
+{
+ int rc;
+ struct wmi_temp_sense_cmd cmd = {
+ .measure_marlon_m_en = cpu_to_le32(!!t_m),
+ .measure_marlon_r_en = cpu_to_le32(!!t_r),
+ };
+ struct {
+ struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_temp_sense_done_event evt;
+ } __packed reply;
+
+ rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd),
+ WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
+ if (rc)
+ return rc;
+
+ if (t_m)
+ *t_m = le32_to_cpu(reply.evt.marlon_m_t1000);
+ if (t_r)
+ *t_r = le32_to_cpu(reply.evt.marlon_r_t1000);
+
+ return 0;
+}
+
void wmi_event_flush(struct wil6210_priv *wil)
{
struct pending_wmi_event *evt, *t;
@@ -997,24 +1086,3 @@ void wmi_event_worker(struct work_struct *work)
kfree(evt);
}
}
-
-void wmi_connect_worker(struct work_struct *work)
-{
- int rc;
- struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
- wmi_connect_worker);
-
- if (wil->pending_connect_cid < 0) {
- wil_err(wil, "No connection pending\n");
- return;
- }
-
- wil_dbg_wmi(wil, "Configure for connection CID %d\n",
- wil->pending_connect_cid);
-
- rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
- wil->pending_connect_cid, 0);
- wil->pending_connect_cid = -1;
- if (rc == 0)
- wil_link_on(wil);
-}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 3bbf87572b0..50b8528394f 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -36,6 +36,7 @@
enum wmi_command_id {
WMI_CONNECT_CMDID = 0x0001,
WMI_DISCONNECT_CMDID = 0x0003,
+ WMI_DISCONNECT_STA_CMDID = 0x0004,
WMI_START_SCAN_CMDID = 0x0007,
WMI_SET_BSS_FILTER_CMDID = 0x0009,
WMI_SET_PROBED_SSID_CMDID = 0x000a,
@@ -44,7 +45,6 @@ enum wmi_command_id {
WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
WMI_SET_APPIE_CMDID = 0x003f,
- WMI_GET_APPIE_CMDID = 0x0040,
WMI_SET_WSC_STATUS_CMDID = 0x0041,
WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
@@ -55,11 +55,11 @@ enum wmi_command_id {
WMI_DEEP_ECHO_CMDID = 0x0804,
WMI_CONFIG_MAC_CMDID = 0x0805,
WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
- WMI_ADD_STATION_CMDID = 0x0807,
WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
WMI_FS_TUNE_CMDID = 0x080a,
WMI_CORR_MEASURE_CMDID = 0x080b,
+ WMI_READ_RSSI_CMDID = 0x080c,
WMI_TEMP_SENSE_CMDID = 0x080e,
WMI_DC_CALIB_CMDID = 0x080f,
WMI_SEND_TONE_CMDID = 0x0810,
@@ -75,9 +75,9 @@ enum wmi_command_id {
MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
+ WMI_RF_RX_TEST_CMDID = 0x081e,
WMI_CFG_RX_CHAIN_CMDID = 0x0820,
WMI_VRING_CFG_CMDID = 0x0821,
- WMI_RX_ON_CMDID = 0x0822,
WMI_VRING_BA_EN_CMDID = 0x0823,
WMI_VRING_BA_DIS_CMDID = 0x0824,
WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
@@ -87,7 +87,6 @@ enum wmi_command_id {
WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
WMI_SW_TX_REQ_CMDID = 0x082b,
- WMI_RX_OFF_CMDID = 0x082c,
WMI_READ_MAC_RXQ_CMDID = 0x0830,
WMI_READ_MAC_TXQ_CMDID = 0x0831,
WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
@@ -112,6 +111,18 @@ enum wmi_command_id {
WMI_FLASH_READ_CMDID = 0x0902,
WMI_FLASH_WRITE_CMDID = 0x0903,
WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
+ /*P2P*/
+ WMI_P2P_CFG_CMDID = 0x0910,
+ WMI_PORT_ALLOCATE_CMDID = 0x0911,
+ WMI_PORT_DELETE_CMDID = 0x0912,
+ WMI_POWER_MGMT_CFG_CMDID = 0x0913,
+ WMI_START_LISTEN_CMDID = 0x0914,
+ WMI_START_SEARCH_CMDID = 0x0915,
+ WMI_DISCOVERY_START_CMDID = 0x0916,
+ WMI_DISCOVERY_STOP_CMDID = 0x0917,
+ WMI_PCP_START_CMDID = 0x0918,
+ WMI_PCP_STOP_CMDID = 0x0919,
+ WMI_GET_PCP_FACTOR_CMDID = 0x091b,
WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
WMI_ABORT_SCAN_CMDID = 0xf007,
@@ -132,18 +143,6 @@ enum wmi_command_id {
*/
/*
- * Frame Types
- */
-enum wmi_mgmt_frame_type {
- WMI_FRAME_BEACON = 0,
- WMI_FRAME_PROBE_REQ = 1,
- WMI_FRAME_PROBE_RESP = 2,
- WMI_FRAME_ASSOC_REQ = 3,
- WMI_FRAME_ASSOC_RESP = 4,
- WMI_NUM_MGMT_FRAME,
-};
-
-/*
* WMI_CONNECT_CMDID
*/
enum wmi_network_type {
@@ -184,7 +183,7 @@ enum wmi_crypto_type {
enum wmi_connect_ctrl_flag_bits {
WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
WMI_CONNECT_SEND_REASSOC = 0x0002,
- WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004,
+ WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004,
WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
@@ -212,6 +211,13 @@ struct wmi_connect_cmd {
u8 reserved1[2];
} __packed;
+/*
+ * WMI_DISCONNECT_STA_CMDID
+ */
+struct wmi_disconnect_sta_cmd {
+ u8 dst_mac[WMI_MAC_LEN];
+ __le16 disconnect_reason;
+} __packed;
/*
* WMI_RECONNECT_CMDID
@@ -289,10 +295,12 @@ struct wmi_delete_cipher_key_cmd {
enum wmi_scan_type {
WMI_LONG_SCAN = 0,
WMI_SHORT_SCAN = 1,
+ WMI_PBC_SCAN = 2,
};
struct wmi_start_scan_cmd {
u8 reserved[8];
+
__le32 home_dwell_time; /* Max duration in the home channel(ms) */
__le32 force_scan_interval; /* Time interval between scans (ms)*/
u8 scan_type; /* wmi_scan_type */
@@ -309,7 +317,7 @@ struct wmi_start_scan_cmd {
/*
* WMI_SET_PROBED_SSID_CMDID
*/
-#define MAX_PROBED_SSID_INDEX (15)
+#define MAX_PROBED_SSID_INDEX (3)
enum wmi_ssid_flag {
WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
@@ -328,6 +336,20 @@ struct wmi_probed_ssid_cmd {
* WMI_SET_APPIE_CMDID
* Add Application specified IE to a management frame
*/
+#define WMI_MAX_IE_LEN (1024)
+
+/*
+ * Frame Types
+ */
+enum wmi_mgmt_frame_type {
+ WMI_FRAME_BEACON = 0,
+ WMI_FRAME_PROBE_REQ = 1,
+ WMI_FRAME_PROBE_RESP = 2,
+ WMI_FRAME_ASSOC_REQ = 3,
+ WMI_FRAME_ASSOC_RESP = 4,
+ WMI_NUM_MGMT_FRAME,
+};
+
struct wmi_set_appie_cmd {
u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
u8 reserved;
@@ -335,13 +357,18 @@ struct wmi_set_appie_cmd {
u8 ie_info[0];
} __packed;
-#define WMI_MAX_IE_LEN (1024)
+/*
+ * WMI_PXMT_RANGE_CFG_CMDID
+ */
struct wmi_pxmt_range_cfg_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 range;
} __packed;
+/*
+ * WMI_PXMT_SNR2_RANGE_CFG_CMDID
+ */
struct wmi_pxmt_snr2_range_cfg_cmd {
s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
} __packed;
@@ -359,6 +386,23 @@ struct wmi_rf_mgmt_cmd {
__le32 rf_mgmt_type;
} __packed;
+
+/*
+ * WMI_RF_RX_TEST_CMDID
+ */
+struct wmi_rf_rx_test_cmd {
+ __le32 sector;
+} __packed;
+
+/*
+ * WMI_CORR_MEASURE_CMDID
+ */
+struct wmi_corr_measure_cmd {
+ s32 freq_mhz;
+ __le32 length_samples;
+ __le32 iterations;
+} __packed;
+
/*
* WMI_SET_SSID_CMDID
*/
@@ -388,6 +432,74 @@ struct wmi_bcon_ctrl_cmd {
u8 disable_sec;
} __packed;
+
+/******* P2P ***********/
+
+/*
+ * WMI_PORT_ALLOCATE_CMDID
+ */
+enum wmi_port_role {
+ WMI_PORT_STA = 0,
+ WMI_PORT_PCP = 1,
+ WMI_PORT_AP = 2,
+ WMI_PORT_P2P_DEV = 3,
+ WMI_PORT_P2P_CLIENT = 4,
+ WMI_PORT_P2P_GO = 5,
+};
+
+struct wmi_port_allocate_cmd {
+ u8 mac[WMI_MAC_LEN];
+ u8 port_role;
+ u8 midid;
+} __packed;
+
+/*
+ * WMI_PORT_DELETE_CMDID
+ */
+struct wmi_delete_port_cmd {
+ u8 mid;
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_P2P_CFG_CMDID
+ */
+enum wmi_discovery_mode {
+ WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
+ WMI_DISCOVERY_MODE_OFFLOAD = 1,
+};
+
+struct wmi_p2p_cfg_cmd {
+ u8 discovery_mode; /* wmi_discovery_mode */
+ u8 channel;
+ __le16 bcon_interval; /* base to listen/search duration calculation */
+} __packed;
+
+/*
+ * WMI_POWER_MGMT_CFG_CMDID
+ */
+enum wmi_power_source_type {
+ WMI_POWER_SOURCE_BATTERY = 0,
+ WMI_POWER_SOURCE_OTHER = 1,
+};
+
+struct wmi_power_mgmt_cfg_cmd {
+ u8 power_source; /* wmi_power_source_type */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_PCP_START_CMDID
+ */
+struct wmi_pcp_start_cmd {
+ __le16 bcon_interval;
+ u8 reserved0[10];
+ u8 network_type;
+ u8 channel;
+ u8 disable_sec_offload;
+ u8 disable_sec;
+} __packed;
+
/*
* WMI_SW_TX_REQ_CMDID
*/
@@ -435,16 +547,17 @@ enum wmi_vring_cfg_schd_params_priority {
WMI_SCH_PRIO_HIGH = 1,
};
+#define CIDXTID_CID_POS (0)
+#define CIDXTID_CID_LEN (4)
+#define CIDXTID_CID_MSK (0xF)
+#define CIDXTID_TID_POS (4)
+#define CIDXTID_TID_LEN (4)
+#define CIDXTID_TID_MSK (0xF0)
+
struct wmi_vring_cfg {
struct wmi_sw_ring_cfg tx_sw_ring;
u8 ringid; /* 0-23 vrings */
- #define CIDXTID_CID_POS (0)
- #define CIDXTID_CID_LEN (4)
- #define CIDXTID_CID_MSK (0xF)
- #define CIDXTID_TID_POS (4)
- #define CIDXTID_TID_LEN (4)
- #define CIDXTID_TID_MSK (0xF0)
u8 cidxtid;
u8 encap_trans_type;
@@ -501,8 +614,14 @@ struct wmi_vring_ba_dis_cmd {
*/
struct wmi_notify_req_cmd {
u8 cid;
- u8 reserved[3];
+ u8 year;
+ u8 month;
+ u8 day;
__le32 interval_usec;
+ u8 hour;
+ u8 minute;
+ u8 second;
+ u8 miliseconds;
} __packed;
/*
@@ -548,6 +667,11 @@ enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
};
+enum wmi_cfg_rx_chain_cmd_reorder_type {
+ WMI_RX_HW_REORDER = 0,
+ WMI_RX_SW_REORDER = 1,
+};
+
struct wmi_cfg_rx_chain_cmd {
__le32 action;
struct wmi_sw_ring_cfg rx_sw_ring;
@@ -596,7 +720,8 @@ struct wmi_cfg_rx_chain_cmd {
__le16 wb_thrsh;
__le32 itr_value;
__le16 host_thrsh;
- u8 reserved[2];
+ u8 reorder_type;
+ u8 reserved;
struct wmi_sniffer_cfg sniffer_cfg;
} __packed;
@@ -604,15 +729,7 @@ struct wmi_cfg_rx_chain_cmd {
* WMI_RCP_ADDBA_RESP_CMDID
*/
struct wmi_rcp_addba_resp_cmd {
-
- #define CIDXTID_CID_POS (0)
- #define CIDXTID_CID_LEN (4)
- #define CIDXTID_CID_MSK (0xF)
- #define CIDXTID_TID_POS (4)
- #define CIDXTID_TID_LEN (4)
- #define CIDXTID_TID_MSK (0xF0)
u8 cidxtid;
-
u8 dialog_token;
__le16 status_code;
__le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
@@ -623,15 +740,7 @@ struct wmi_rcp_addba_resp_cmd {
* WMI_RCP_DELBA_CMDID
*/
struct wmi_rcp_delba_cmd {
-
- #define CIDXTID_CID_POS (0)
- #define CIDXTID_CID_LEN (4)
- #define CIDXTID_CID_MSK (0xF)
- #define CIDXTID_TID_POS (4)
- #define CIDXTID_TID_LEN (4)
- #define CIDXTID_TID_MSK (0xF0)
u8 cidxtid;
-
u8 reserved;
__le16 reason;
} __packed;
@@ -640,15 +749,7 @@ struct wmi_rcp_delba_cmd {
* WMI_RCP_ADDBA_REQ_CMDID
*/
struct wmi_rcp_addba_req_cmd {
-
- #define CIDXTID_CID_POS (0)
- #define CIDXTID_CID_LEN (4)
- #define CIDXTID_CID_MSK (0xF)
- #define CIDXTID_TID_POS (4)
- #define CIDXTID_TID_LEN (4)
- #define CIDXTID_TID_MSK (0xF0)
u8 cidxtid;
-
u8 dialog_token;
/* ieee80211_ba_parameterset field as it received */
__le16 ba_param_set;
@@ -665,7 +766,6 @@ struct wmi_set_mac_address_cmd {
u8 reserved[2];
} __packed;
-
/*
* WMI_EAPOL_TX_CMDID
*/
@@ -692,6 +792,17 @@ struct wmi_echo_cmd {
} __packed;
/*
+ * WMI_TEMP_SENSE_CMDID
+ *
+ * Measure MAC and radio temperatures
+ */
+struct wmi_temp_sense_cmd {
+ __le32 measure_marlon_m_en;
+ __le32 measure_marlon_r_en;
+} __packed;
+
+
+/*
* WMI Events
*/
@@ -699,7 +810,6 @@ struct wmi_echo_cmd {
* List of Events (target to host)
*/
enum wmi_event_id {
- WMI_IMM_RSP_EVENTID = 0x0000,
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
@@ -709,13 +819,9 @@ enum wmi_event_id {
WMI_FW_READY_EVENTID = 0x1801,
WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
WMI_ECHO_RSP_EVENTID = 0x1803,
- WMI_CONFIG_MAC_DONE_EVENTID = 0x1805,
- WMI_CONFIG_PHY_DEBUG_DONE_EVENTID = 0x1806,
- WMI_ADD_STATION_DONE_EVENTID = 0x1807,
- WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID = 0x1808,
- WMI_PHY_GET_STATISTICS_EVENTID = 0x1809,
WMI_FS_TUNE_DONE_EVENTID = 0x180a,
- WMI_CORR_MEASURE_DONE_EVENTID = 0x180b,
+ WMI_CORR_MEASURE_EVENTID = 0x180b,
+ WMI_READ_RSSI_EVENTID = 0x180c,
WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
WMI_DC_CALIB_DONE_EVENTID = 0x180f,
WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
@@ -727,10 +833,9 @@ enum wmi_event_id {
WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
-
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181e,
WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
- WMI_RX_ON_DONE_EVENTID = 0x1822,
WMI_BA_STATUS_EVENTID = 0x1823,
WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
@@ -738,7 +843,6 @@ enum wmi_event_id {
WMI_GET_SSID_EVENTID = 0x1828,
WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
- WMI_RX_OFF_DONE_EVENTID = 0x182c,
WMI_READ_MAC_RXQ_EVENTID = 0x1830,
WMI_READ_MAC_TXQ_EVENTID = 0x1831,
@@ -765,7 +869,16 @@ enum wmi_event_id {
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
-
+ /*P2P*/
+ WMI_PORT_ALLOCATED_EVENTID = 0x1911,
+ WMI_PORT_DELETED_EVENTID = 0x1912,
+ WMI_LISTEN_STARTED_EVENTID = 0x1914,
+ WMI_SEARCH_STARTED_EVENTID = 0x1915,
+ WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
+ WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
+ WMI_PCP_STARTED_EVENTID = 0x1918,
+ WMI_PCP_STOPPED_EVENTID = 0x1919,
+ WMI_PCP_FACTOR_EVENTID = 0x191a,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -777,6 +890,12 @@ enum wmi_event_id {
* Events data structures
*/
+
+enum wmi_fw_status {
+ WMI_FW_STATUS_SUCCESS,
+ WMI_FW_STATUS_FAILURE,
+};
+
/*
* WMI_RF_MGMT_STATUS_EVENTID
*/
@@ -857,7 +976,7 @@ struct wmi_ready_event {
__le32 abi_version;
u8 mac[WMI_MAC_LEN];
u8 phy_capability; /* enum wmi_phy_capability */
- u8 reserved;
+ u8 numof_additional_mids;
} __packed;
/*
@@ -876,6 +995,8 @@ struct wmi_notify_req_done_event {
__le16 other_rx_sector;
__le16 other_tx_sector;
__le16 range;
+ u8 sqi;
+ u8 reserved[3];
} __packed;
/*
@@ -951,27 +1072,15 @@ struct wmi_vring_ba_status_event {
* WMI_DELBA_EVENTID
*/
struct wmi_delba_event {
-
- #define CIDXTID_CID_POS (0)
- #define CIDXTID_CID_LEN (4)
- #define CIDXTID_CID_MSK (0xF)
- #define CIDXTID_TID_POS (4)
- #define CIDXTID_TID_LEN (4)
- #define CIDXTID_TID_MSK (0xF0)
u8 cidxtid;
-
u8 from_initiator;
__le16 reason;
} __packed;
+
/*
* WMI_VRING_CFG_DONE_EVENTID
*/
-enum wmi_vring_cfg_done_event_status {
- WMI_VRING_CFG_SUCCESS = 0,
- WMI_VRING_CFG_FAILURE = 1,
-};
-
struct wmi_vring_cfg_done_event {
u8 ringid;
u8 status;
@@ -982,21 +1091,8 @@ struct wmi_vring_cfg_done_event {
/*
* WMI_ADDBA_RESP_SENT_EVENTID
*/
-enum wmi_rcp_addba_resp_sent_event_status {
- WMI_ADDBA_SUCCESS = 0,
- WMI_ADDBA_FAIL = 1,
-};
-
struct wmi_rcp_addba_resp_sent_event {
-
- #define CIDXTID_CID_POS (0)
- #define CIDXTID_CID_LEN (4)
- #define CIDXTID_CID_MSK (0xF)
- #define CIDXTID_TID_POS (4)
- #define CIDXTID_TID_LEN (4)
- #define CIDXTID_TID_MSK (0xF0)
u8 cidxtid;
-
u8 reserved;
__le16 status;
} __packed;
@@ -1005,15 +1101,7 @@ struct wmi_rcp_addba_resp_sent_event {
* WMI_RCP_ADDBA_REQ_EVENTID
*/
struct wmi_rcp_addba_req_event {
-
- #define CIDXTID_CID_POS (0)
- #define CIDXTID_CID_LEN (4)
- #define CIDXTID_CID_MSK (0xF)
- #define CIDXTID_TID_POS (4)
- #define CIDXTID_TID_LEN (4)
- #define CIDXTID_TID_MSK (0xF0)
u8 cidxtid;
-
u8 dialog_token;
__le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
__le16 ba_timeout;
@@ -1055,6 +1143,7 @@ struct wmi_data_port_open_event {
u8 reserved[3];
} __packed;
+
/*
* WMI_GET_PCP_CHANNEL_EVENTID
*/
@@ -1063,6 +1152,54 @@ struct wmi_get_pcp_channel_event {
u8 reserved[3];
} __packed;
+
+/*
+* WMI_PORT_ALLOCATED_EVENTID
+*/
+struct wmi_port_allocated_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+* WMI_PORT_DELETED_EVENTID
+*/
+struct wmi_port_deleted_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_LISTEN_STARTED_EVENTID
+ */
+struct wmi_listen_started_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_SEARCH_STARTED_EVENTID
+ */
+struct wmi_search_started_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_PCP_STARTED_EVENTID
+ */
+struct wmi_pcp_started_event {
+ u8 status; /* wmi_fw_status */
+ u8 reserved[3];
+} __packed;
+
+/*
+ * WMI_PCP_FACTOR_EVENTID
+ */
+struct wmi_pcp_factor_event {
+ __le32 pcp_factor;
+} __packed;
+
/*
* WMI_SW_TX_COMPLETE_EVENTID
*/
@@ -1078,6 +1215,23 @@ struct wmi_sw_tx_complete_event {
} __packed;
/*
+ * WMI_CORR_MEASURE_EVENTID
+ */
+struct wmi_corr_measure_event {
+ s32 i;
+ s32 q;
+ s32 image_i;
+ s32 image_q;
+} __packed;
+
+/*
+ * WMI_READ_RSSI_EVENTID
+ */
+struct wmi_read_rssi_event {
+ __le32 ina_rssi_adc_dbm;
+} __packed;
+
+/*
* WMI_GET_SSID_EVENTID
*/
struct wmi_get_ssid_event {
@@ -1091,7 +1245,8 @@ struct wmi_get_ssid_event {
struct wmi_rx_mgmt_info {
u8 mcs;
s8 snr;
- __le16 range;
+ u8 range;
+ u8 sqi;
__le16 stype;
__le16 status;
__le32 len;
@@ -1113,4 +1268,14 @@ struct wmi_echo_event {
__le32 echoed_value;
} __packed;
+/*
+ * WMI_TEMP_SENSE_DONE_EVENTID
+ *
+ * Measure MAC and radio temperatures
+ */
+struct wmi_temp_sense_done_event {
+ __le32 marlon_m_t1000;
+ __le32 marlon_r_t1000;
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 287c6b670a3..078e6f3477a 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -131,7 +131,7 @@ config B43_PHY_LP
config B43_PHY_HT
bool "Support for HT-PHY (high throughput) devices"
- depends on B43
+ depends on B43 && B43_BCMA
---help---
Support for the HT-PHY.
@@ -166,8 +166,8 @@ config B43_DEBUG
Broadcom 43xx debugging.
This adds additional runtime sanity checks and statistics to the driver.
- These checks and statistics might me expensive and hurt runtime performance
- of your system.
+ These checks and statistics might be expensive and hurt the runtime
+ performance of your system.
This also adds the b43 debugfs interface.
Do not enable this, unless you are debugging the driver.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 10e288d470e..fe4a77ee05c 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -473,6 +473,12 @@ enum {
#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */
#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */
+/* See BCMA_CLKCTLST_EXTRESREQ and BCMA_CLKCTLST_EXTRESST */
+#define B43_BCMA_CLKCTLST_80211_PLL_REQ 0x00000100
+#define B43_BCMA_CLKCTLST_PHY_PLL_REQ 0x00000200
+#define B43_BCMA_CLKCTLST_80211_PLL_ST 0x01000000
+#define B43_BCMA_CLKCTLST_PHY_PLL_ST 0x02000000
+
/* BCMA 802.11 core specific IO Control (BCMA_IOCTL) flags */
#define B43_BCMA_IOCTL_PHY_CLKEN 0x00000004 /* PHY Clock Enable */
#define B43_BCMA_IOCTL_PHY_RESET 0x00000008 /* PHY Reset */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 122146943bf..523355b8765 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -419,8 +419,6 @@ static inline
static int alloc_ringmemory(struct b43_dmaring *ring)
{
- gfp_t flags = GFP_KERNEL;
-
/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
* alignment and 8K buffers for 64-bit DMA with 8K alignment.
* In practice we could use smaller buffers for the latter, but the
@@ -435,12 +433,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
ring_mem_size, &(ring->dmabase),
- flags);
- if (!ring->descbase) {
- b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ring->descbase)
return -ENOMEM;
- }
- memset(ring->descbase, 0, ring_mem_size);
return 0;
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 05682736e46..c4d0cc58255 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1189,10 +1189,15 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
{
+ u32 req = B43_BCMA_CLKCTLST_80211_PLL_REQ |
+ B43_BCMA_CLKCTLST_PHY_PLL_REQ;
+ u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
+ B43_BCMA_CLKCTLST_PHY_PLL_ST;
+
b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
b43_bcma_phy_reset(dev);
- bcma_core_pll_ctl(dev->dev->bdev, 0x300, 0x3000000, true);
+ bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
}
#endif
diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/b43/phy_ht.c
index 7416c5e9154..b8667706fc2 100644
--- a/drivers/net/wireless/b43/phy_ht.c
+++ b/drivers/net/wireless/b43/phy_ht.c
@@ -154,9 +154,84 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
}
/**************************************************
+ * RF
+ **************************************************/
+
+static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
+{
+ u8 i;
+
+ u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
+ b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);
+
+ b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
+ for (i = 0; i < 200; i++) {
+ if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
+ i = 0;
+ break;
+ }
+ msleep(1);
+ }
+ if (i)
+ b43err(dev->wl, "Forcing RF sequence timeout\n");
+
+ b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
+}
+
+static void b43_phy_ht_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_ht *htphy = dev->phy.ht;
+ static const u16 regs[3] = { B43_PHY_HT_RF_CTL_INT_C1,
+ B43_PHY_HT_RF_CTL_INT_C2,
+ B43_PHY_HT_RF_CTL_INT_C3 };
+ int i;
+
+ if (enable) {
+ for (i = 0; i < 3; i++)
+ b43_phy_write(dev, regs[i], htphy->rf_ctl_int_save[i]);
+ } else {
+ for (i = 0; i < 3; i++)
+ htphy->rf_ctl_int_save[i] = b43_phy_read(dev, regs[i]);
+ /* TODO: Does 5GHz band use different value (not 0x0400)? */
+ for (i = 0; i < 3; i++)
+ b43_phy_write(dev, regs[i], 0x0400);
+ }
+}
+
+/**************************************************
* Various PHY ops
**************************************************/
+static u16 b43_phy_ht_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+ u16 allowed = B43_PHY_HT_CLASS_CTL_CCK_EN |
+ B43_PHY_HT_CLASS_CTL_OFDM_EN |
+ B43_PHY_HT_CLASS_CTL_WAITED_EN;
+
+ tmp = b43_phy_read(dev, B43_PHY_HT_CLASS_CTL);
+ tmp &= allowed;
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_PHY_HT_CLASS_CTL, ~allowed, tmp);
+
+ return tmp;
+}
+
+static void b43_phy_ht_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_phy_force_clock(dev, true);
+ bbcfg = b43_phy_read(dev, B43_PHY_HT_BBCFG);
+ b43_phy_write(dev, B43_PHY_HT_BBCFG, bbcfg | B43_PHY_HT_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_PHY_HT_BBCFG, bbcfg & ~B43_PHY_HT_BBCFG_RSTCCA);
+ b43_phy_force_clock(dev, false);
+
+ b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
+}
+
static void b43_phy_ht_zero_extg(struct b43_wldev *dev)
{
u8 i, j;
@@ -176,10 +251,10 @@ static void b43_phy_ht_afe_unk1(struct b43_wldev *dev)
{
u8 i;
- const u16 ctl_regs[3][2] = {
- { B43_PHY_HT_AFE_CTL1, B43_PHY_HT_AFE_CTL2 },
- { B43_PHY_HT_AFE_CTL3, B43_PHY_HT_AFE_CTL4 },
- { B43_PHY_HT_AFE_CTL5, B43_PHY_HT_AFE_CTL6},
+ static const u16 ctl_regs[3][2] = {
+ { B43_PHY_HT_AFE_C1_OVER, B43_PHY_HT_AFE_C1 },
+ { B43_PHY_HT_AFE_C2_OVER, B43_PHY_HT_AFE_C2 },
+ { B43_PHY_HT_AFE_C3_OVER, B43_PHY_HT_AFE_C3},
};
for (i = 0; i < 3; i++) {
@@ -193,27 +268,6 @@ static void b43_phy_ht_afe_unk1(struct b43_wldev *dev)
}
}
-static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
-{
- u8 i;
-
- u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
- b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);
-
- b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
- for (i = 0; i < 200; i++) {
- if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
- i = 0;
- break;
- }
- msleep(1);
- }
- if (i)
- b43err(dev->wl, "Forcing RF sequence timeout\n");
-
- b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
-}
-
static void b43_phy_ht_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
{
clip_st[0] = b43_phy_read(dev, B43_PHY_HT_C1_CLIP1THRES);
@@ -240,15 +294,426 @@ static void b43_phy_ht_bphy_init(struct b43_wldev *dev)
}
/**************************************************
+ * Samples
+ **************************************************/
+
+static void b43_phy_ht_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_ht *phy_ht = dev->phy.ht;
+ u16 tmp;
+ int i;
+
+ tmp = b43_phy_read(dev, B43_PHY_HT_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_PHY_HT_SAMP_CMD, B43_PHY_HT_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_PHY_HT_IQLOCAL_CMDGCTL, 0x7FFF);
+
+ b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0x0004);
+
+ for (i = 0; i < 3; i++) {
+ if (phy_ht->bb_mult_save[i] >= 0) {
+ b43_httab_write(dev, B43_HTTAB16(13, 0x63 + i * 4),
+ phy_ht->bb_mult_save[i]);
+ b43_httab_write(dev, B43_HTTAB16(13, 0x67 + i * 4),
+ phy_ht->bb_mult_save[i]);
+ }
+ }
+}
+
+static u16 b43_phy_ht_load_samples(struct b43_wldev *dev)
+{
+ int i;
+ u16 len = 20 << 3;
+
+ b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, 0x4400);
+
+ for (i = 0; i < len; i++) {
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI, 0);
+ b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, 0);
+ }
+
+ return len;
+}
+
+static void b43_phy_ht_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
+ u16 wait)
+{
+ struct b43_phy_ht *phy_ht = dev->phy.ht;
+ u16 save_seq_mode;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (phy_ht->bb_mult_save[i] < 0)
+ phy_ht->bb_mult_save[i] = b43_httab_read(dev, B43_HTTAB16(13, 0x63 + i * 4));
+ }
+
+ b43_phy_write(dev, B43_PHY_HT_SAMP_DEP_CNT, samps - 1);
+ if (loops != 0xFFFF)
+ loops--;
+ b43_phy_write(dev, B43_PHY_HT_SAMP_LOOP_CNT, loops);
+ b43_phy_write(dev, B43_PHY_HT_SAMP_WAIT_CNT, wait);
+
+ save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
+ b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE,
+ B43_PHY_HT_RF_SEQ_MODE_CA_OVER);
+
+ /* TODO: find out mask bits! Do we need more function arguments? */
+ b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
+ b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
+ b43_phy_mask(dev, B43_PHY_HT_IQLOCAL_CMDGCTL, ~0);
+ b43_phy_set(dev, B43_PHY_HT_SAMP_CMD, 0x1);
+
+ for (i = 0; i < 100; i++) {
+ if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & 1)) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ b43err(dev->wl, "run samples timeout\n");
+
+ b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
+}
+
+static void b43_phy_ht_tx_tone(struct b43_wldev *dev)
+{
+ u16 samp;
+
+ samp = b43_phy_ht_load_samples(dev);
+ b43_phy_ht_run_samples(dev, samp, 0xFFFF, 0);
+}
+
+/**************************************************
+ * RSSI
+ **************************************************/
+
+static void b43_phy_ht_rssi_select(struct b43_wldev *dev, u8 core_sel,
+ u8 rssi_type)
+{
+ static const u16 ctl_regs[3][2] = {
+ { B43_PHY_HT_AFE_C1, B43_PHY_HT_AFE_C1_OVER, },
+ { B43_PHY_HT_AFE_C2, B43_PHY_HT_AFE_C2_OVER, },
+ { B43_PHY_HT_AFE_C3, B43_PHY_HT_AFE_C3_OVER, },
+ };
+ static const u16 radio_r[] = { R2059_SYN, R2059_TXRX0, R2059_RXRX1, };
+ int core;
+
+ if (core_sel == 0) {
+ b43err(dev->wl, "RSSI selection for core off not implemented yet\n");
+ } else {
+ for (core = 0; core < 3; core++) {
+ /* Check if caller requested a one specific core */
+ if ((core_sel == 1 && core != 0) ||
+ (core_sel == 2 && core != 1) ||
+ (core_sel == 3 && core != 2))
+ continue;
+
+ switch (rssi_type) {
+ case 4:
+ b43_phy_set(dev, ctl_regs[core][0], 0x3 << 8);
+ b43_phy_set(dev, ctl_regs[core][0], 0x3 << 10);
+ b43_phy_set(dev, ctl_regs[core][1], 0x1 << 9);
+ b43_phy_set(dev, ctl_regs[core][1], 0x1 << 10);
+
+ b43_radio_set(dev, R2059_RXRX1 | 0xbf, 0x1);
+ b43_radio_write(dev, radio_r[core] | 0x159,
+ 0x11);
+ break;
+ default:
+ b43err(dev->wl, "RSSI selection for type %d not implemented yet\n",
+ rssi_type);
+ }
+ }
+ }
+}
+
+static void b43_phy_ht_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
+ u8 nsamp)
+{
+ u16 phy_regs_values[12];
+ static const u16 phy_regs_to_save[] = {
+ B43_PHY_HT_AFE_C1, B43_PHY_HT_AFE_C1_OVER,
+ 0x848, 0x841,
+ B43_PHY_HT_AFE_C2, B43_PHY_HT_AFE_C2_OVER,
+ 0x868, 0x861,
+ B43_PHY_HT_AFE_C3, B43_PHY_HT_AFE_C3_OVER,
+ 0x888, 0x881,
+ };
+ u16 tmp[3];
+ int i;
+
+ for (i = 0; i < 12; i++)
+ phy_regs_values[i] = b43_phy_read(dev, phy_regs_to_save[i]);
+
+ b43_phy_ht_rssi_select(dev, 5, type);
+
+ for (i = 0; i < 6; i++)
+ buf[i] = 0;
+
+ for (i = 0; i < nsamp; i++) {
+ tmp[0] = b43_phy_read(dev, B43_PHY_HT_RSSI_C1);
+ tmp[1] = b43_phy_read(dev, B43_PHY_HT_RSSI_C2);
+ tmp[2] = b43_phy_read(dev, B43_PHY_HT_RSSI_C3);
+
+ buf[0] += ((s8)((tmp[0] & 0x3F) << 2)) >> 2;
+ buf[1] += ((s8)(((tmp[0] >> 8) & 0x3F) << 2)) >> 2;
+ buf[2] += ((s8)((tmp[1] & 0x3F) << 2)) >> 2;
+ buf[3] += ((s8)(((tmp[1] >> 8) & 0x3F) << 2)) >> 2;
+ buf[4] += ((s8)((tmp[2] & 0x3F) << 2)) >> 2;
+ buf[5] += ((s8)(((tmp[2] >> 8) & 0x3F) << 2)) >> 2;
+ }
+
+ for (i = 0; i < 12; i++)
+ b43_phy_write(dev, phy_regs_to_save[i], phy_regs_values[i]);
+}
+
+/**************************************************
+ * Tx/Rx
+ **************************************************/
+
+static void b43_phy_ht_tx_power_fix(struct b43_wldev *dev)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ u16 mask;
+ u32 tmp = b43_httab_read(dev, B43_HTTAB32(26, 0xE8));
+
+ if (0) /* FIXME */
+ mask = 0x2 << (i * 4);
+ else
+ mask = 0;
+ b43_phy_mask(dev, B43_PHY_EXTG(0x108), mask);
+
+ b43_httab_write(dev, B43_HTTAB16(7, 0x110 + i), tmp >> 16);
+ b43_httab_write(dev, B43_HTTAB8(13, 0x63 + (i * 4)),
+ tmp & 0xFF);
+ b43_httab_write(dev, B43_HTTAB8(13, 0x73 + (i * 4)),
+ tmp & 0xFF);
+ }
+}
+
+static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_ht *phy_ht = dev->phy.ht;
+ u16 en_bits = B43_PHY_HT_TXPCTL_CMD_C1_COEFF |
+ B43_PHY_HT_TXPCTL_CMD_C1_HWPCTLEN |
+ B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN;
+ static const u16 cmd_regs[3] = { B43_PHY_HT_TXPCTL_CMD_C1,
+ B43_PHY_HT_TXPCTL_CMD_C2,
+ B43_PHY_HT_TXPCTL_CMD_C3 };
+ int i;
+
+ if (!enable) {
+ if (b43_phy_read(dev, B43_PHY_HT_TXPCTL_CMD_C1) & en_bits) {
+ /* We disable enabled TX pwr ctl, save it's state */
+ /*
+ * TODO: find the registers. On N-PHY they were 0x1ed
+ * and 0x1ee, we need 3 such a registers for HT-PHY
+ */
+ }
+ b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, ~en_bits);
+ } else {
+ b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ for (i = 0; i < 3; i++)
+ b43_phy_write(dev, cmd_regs[i], 0x32);
+ }
+
+ for (i = 0; i < 3; i++)
+ if (phy_ht->tx_pwr_idx[i] <=
+ B43_PHY_HT_TXPCTL_CMD_C1_INIT)
+ b43_phy_write(dev, cmd_regs[i],
+ phy_ht->tx_pwr_idx[i]);
+ }
+
+ phy_ht->tx_pwr_ctl = enable;
+}
+
+static void b43_phy_ht_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
+{
+ struct b43_phy_ht *phy_ht = dev->phy.ht;
+ s32 rssi_buf[6];
+
+ /* TODO */
+
+ b43_phy_ht_tx_tone(dev);
+ udelay(20);
+ b43_phy_ht_poll_rssi(dev, 4, rssi_buf, 1);
+ b43_phy_ht_stop_playback(dev);
+ b43_phy_ht_reset_cca(dev);
+
+ phy_ht->idle_tssi[0] = rssi_buf[0] & 0xff;
+ phy_ht->idle_tssi[1] = rssi_buf[2] & 0xff;
+ phy_ht->idle_tssi[2] = rssi_buf[4] & 0xff;
+
+ /* TODO */
+}
+
+static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_ht *phy_ht = dev->phy.ht;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ u8 *idle = phy_ht->idle_tssi;
+ u8 target[3];
+ s16 a1[3], b0[3], b1[3];
+
+ u16 freq = dev->phy.channel_freq;
+ int i, c;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ for (c = 0; c < 3; c++) {
+ target[c] = sprom->core_pwr_info[c].maxpwr_2g;
+ a1[c] = sprom->core_pwr_info[c].pa_2g[0];
+ b0[c] = sprom->core_pwr_info[c].pa_2g[1];
+ b1[c] = sprom->core_pwr_info[c].pa_2g[2];
+ }
+ } else if (freq >= 4900 && freq < 5100) {
+ for (c = 0; c < 3; c++) {
+ target[c] = sprom->core_pwr_info[c].maxpwr_5gl;
+ a1[c] = sprom->core_pwr_info[c].pa_5gl[0];
+ b0[c] = sprom->core_pwr_info[c].pa_5gl[1];
+ b1[c] = sprom->core_pwr_info[c].pa_5gl[2];
+ }
+ } else if (freq >= 5100 && freq < 5500) {
+ for (c = 0; c < 3; c++) {
+ target[c] = sprom->core_pwr_info[c].maxpwr_5g;
+ a1[c] = sprom->core_pwr_info[c].pa_5g[0];
+ b0[c] = sprom->core_pwr_info[c].pa_5g[1];
+ b1[c] = sprom->core_pwr_info[c].pa_5g[2];
+ }
+ } else if (freq >= 5500) {
+ for (c = 0; c < 3; c++) {
+ target[c] = sprom->core_pwr_info[c].maxpwr_5gh;
+ a1[c] = sprom->core_pwr_info[c].pa_5gh[0];
+ b0[c] = sprom->core_pwr_info[c].pa_5gh[1];
+ b1[c] = sprom->core_pwr_info[c].pa_5gh[2];
+ }
+ } else {
+ target[0] = target[1] = target[2] = 52;
+ a1[0] = a1[1] = a1[2] = -424;
+ b0[0] = b0[1] = b0[2] = 5612;
+ b1[0] = b1[1] = b1[2] = -1393;
+ }
+
+ b43_phy_set(dev, B43_PHY_HT_TSSIMODE, B43_PHY_HT_TSSIMODE_EN);
+ b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1,
+ ~B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN & 0xFFFF);
+
+ /* TODO: Does it depend on sprom->fem.ghz2.tssipos? */
+ b43_phy_set(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI, 0x4000);
+
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1,
+ ~B43_PHY_HT_TXPCTL_CMD_C1_INIT, 0x19);
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C2,
+ ~B43_PHY_HT_TXPCTL_CMD_C2_INIT, 0x19);
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C3,
+ ~B43_PHY_HT_TXPCTL_CMD_C3_INIT, 0x19);
+
+ b43_phy_set(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
+ B43_PHY_HT_TXPCTL_IDLE_TSSI_BINF);
+
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
+ ~B43_PHY_HT_TXPCTL_IDLE_TSSI_C1,
+ idle[0] << B43_PHY_HT_TXPCTL_IDLE_TSSI_C1_SHIFT);
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
+ ~B43_PHY_HT_TXPCTL_IDLE_TSSI_C2,
+ idle[1] << B43_PHY_HT_TXPCTL_IDLE_TSSI_C2_SHIFT);
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI2,
+ ~B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3,
+ idle[2] << B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3_SHIFT);
+
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_N, ~B43_PHY_HT_TXPCTL_N_TSSID,
+ 0xf0);
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_N, ~B43_PHY_HT_TXPCTL_N_NPTIL2,
+ 0x3 << B43_PHY_HT_TXPCTL_N_NPTIL2_SHIFT);
+#if 0
+ /* TODO: what to mask/set? */
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0x800, 0)
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0x400, 0)
+#endif
+
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR,
+ ~B43_PHY_HT_TXPCTL_TARG_PWR_C1,
+ target[0] << B43_PHY_HT_TXPCTL_TARG_PWR_C1_SHIFT);
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR,
+ ~B43_PHY_HT_TXPCTL_TARG_PWR_C2 & 0xFFFF,
+ target[1] << B43_PHY_HT_TXPCTL_TARG_PWR_C2_SHIFT);
+ b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR2,
+ ~B43_PHY_HT_TXPCTL_TARG_PWR2_C3,
+ target[2] << B43_PHY_HT_TXPCTL_TARG_PWR2_C3_SHIFT);
+
+ for (c = 0; c < 3; c++) {
+ s32 num, den, pwr;
+ u32 regval[64];
+
+ for (i = 0; i < 64; i++) {
+ num = 8 * (16 * b0[c] + b1[c] * i);
+ den = 32768 + a1[c] * i;
+ pwr = max((4 * num + den / 2) / den, -8);
+ regval[i] = pwr;
+ }
+ b43_httab_write_bulk(dev, B43_HTTAB16(26 + c, 0), 64, regval);
+ }
+}
+
+/**************************************************
* Channel switching ops.
**************************************************/
+static void b43_phy_ht_spur_avoid(struct b43_wldev *dev,
+ struct ieee80211_channel *new_channel)
+{
+ struct bcma_device *core = dev->dev->bdev;
+ int spuravoid = 0;
+ u16 tmp;
+
+ /* Check for 13 and 14 is just a guess, we don't have enough logs. */
+ if (new_channel->hw_value == 13 || new_channel->hw_value == 14)
+ spuravoid = 1;
+ bcma_core_pll_ctl(core, B43_BCMA_CLKCTLST_PHY_PLL_REQ, 0, false);
+ bcma_pmu_spuravoid_pllupdate(&core->bus->drv_cc, spuravoid);
+ bcma_core_pll_ctl(core,
+ B43_BCMA_CLKCTLST_80211_PLL_REQ |
+ B43_BCMA_CLKCTLST_PHY_PLL_REQ,
+ B43_BCMA_CLKCTLST_80211_PLL_ST |
+ B43_BCMA_CLKCTLST_PHY_PLL_ST, false);
+
+ /* Values has been taken from wlc_bmac_switch_macfreq comments */
+ switch (spuravoid) {
+ case 2: /* 126MHz */
+ tmp = 0x2082;
+ break;
+ case 1: /* 123MHz */
+ tmp = 0x5341;
+ break;
+ default: /* 120MHz */
+ tmp = 0x8889;
+ }
+
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, tmp);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+
+ /* TODO: reset PLL */
+
+ if (spuravoid)
+ b43_phy_set(dev, B43_PHY_HT_BBCFG, B43_PHY_HT_BBCFG_RSTRX);
+ else
+ b43_phy_mask(dev, B43_PHY_HT_BBCFG,
+ ~B43_PHY_HT_BBCFG_RSTRX & 0xFFFF);
+
+ b43_phy_ht_reset_cca(dev);
+}
+
static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
const struct b43_phy_ht_channeltab_e_phy *e,
struct ieee80211_channel *new_channel)
{
bool old_band_5ghz;
- u8 i;
old_band_5ghz = b43_phy_read(dev, B43_PHY_HT_BANDCTL) & 0; /* FIXME */
if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
@@ -264,25 +729,20 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
b43_phy_write(dev, B43_PHY_HT_BW5, e->bw5);
b43_phy_write(dev, B43_PHY_HT_BW6, e->bw6);
- /* TODO: some ops on PHY regs 0x0B0 and 0xC0A */
+ if (new_channel->hw_value == 14) {
+ b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN, 0);
+ b43_phy_set(dev, B43_PHY_HT_TEST, 0x0800);
+ } else {
+ b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
+ B43_PHY_HT_CLASS_CTL_OFDM_EN);
+ if (new_channel->band == IEEE80211_BAND_2GHZ)
+ b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
+ }
- /* TODO: separated function? */
- for (i = 0; i < 3; i++) {
- u16 mask;
- u32 tmp = b43_httab_read(dev, B43_HTTAB32(26, 0xE8));
+ if (1) /* TODO: On N it's for early devices only, what about HT? */
+ b43_phy_ht_tx_power_fix(dev);
- if (0) /* FIXME */
- mask = 0x2 << (i * 4);
- else
- mask = 0;
- b43_phy_mask(dev, B43_PHY_EXTG(0x108), mask);
-
- b43_httab_write(dev, B43_HTTAB16(7, 0x110 + i), tmp >> 16);
- b43_httab_write(dev, B43_HTTAB8(13, 0x63 + (i * 4)),
- tmp & 0xFF);
- b43_httab_write(dev, B43_HTTAB8(13, 0x73 + (i * 4)),
- tmp & 0xFF);
- }
+ b43_phy_ht_spur_avoid(dev, new_channel);
b43_phy_write(dev, 0x017e, 0x3830);
}
@@ -337,14 +797,29 @@ static void b43_phy_ht_op_prepare_structs(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_ht *phy_ht = phy->ht;
+ int i;
memset(phy_ht, 0, sizeof(*phy_ht));
+
+ phy_ht->tx_pwr_ctl = true;
+ for (i = 0; i < 3; i++)
+ phy_ht->tx_pwr_idx[i] = B43_PHY_HT_TXPCTL_CMD_C1_INIT + 1;
+
+ for (i = 0; i < 3; i++)
+ phy_ht->bb_mult_save[i] = -1;
}
static int b43_phy_ht_op_init(struct b43_wldev *dev)
{
+ struct b43_phy_ht *phy_ht = dev->phy.ht;
u16 tmp;
u16 clip_state[3];
+ bool saved_tx_pwr_ctl;
+
+ if (dev->dev->bus_type != B43_BUS_BCMA) {
+ b43err(dev->wl, "HT-PHY is supported only on BCMA bus!\n");
+ return -EOPNOTSUPP;
+ }
b43_phy_ht_tables_init(dev);
@@ -357,9 +832,9 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
b43_phy_mask(dev, B43_PHY_EXTG(0), ~0x3);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0);
b43_phy_write(dev, B43_PHY_EXTG(0x103), 0x20);
b43_phy_write(dev, B43_PHY_EXTG(0x101), 0x20);
@@ -371,8 +846,11 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
if (0) /* TODO: condition */
; /* TODO: PHY op on reg 0x217 */
- b43_phy_read(dev, 0xb0); /* TODO: what for? */
- b43_phy_set(dev, 0xb0, 0x1);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
+ else
+ b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
+ B43_PHY_HT_CLASS_CTL_CCK_EN);
b43_phy_set(dev, 0xb1, 0x91);
b43_phy_write(dev, 0x32f, 0x0003);
@@ -448,12 +926,13 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
b43_mac_phy_clock_set(dev, true);
+ b43_phy_ht_pa_override(dev, false);
b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RX2TX);
b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
-
- /* TODO: PHY op on reg 0xb0 */
+ b43_phy_ht_pa_override(dev, true);
/* TODO: Should we restore it? Or store it in global PHY info? */
+ b43_phy_ht_classifier(dev, 0, 0);
b43_phy_ht_read_clip_detection(dev, clip_state);
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -462,6 +941,13 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
B43_HTTAB_1A_C0_LATE_SIZE, b43_httab_0x1a_0xc0_late);
+ saved_tx_pwr_ctl = phy_ht->tx_pwr_ctl;
+ b43_phy_ht_tx_power_fix(dev);
+ b43_phy_ht_tx_power_ctl(dev, false);
+ b43_phy_ht_tx_power_ctl_idle_tssi(dev);
+ b43_phy_ht_tx_power_ctl_setup(dev);
+ b43_phy_ht_tx_power_ctl(dev, saved_tx_pwr_ctl);
+
return 0;
}
@@ -506,19 +992,19 @@ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
static void b43_phy_ht_op_switch_analog(struct b43_wldev *dev, bool on)
{
if (on) {
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00cd);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x0000);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00cd);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x0000);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00cd);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x0000);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C1, 0x00cd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0x0000);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C2, 0x00cd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0x0000);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C3, 0x00cd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0x0000);
} else {
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x07ff);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00fd);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x07ff);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00fd);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x07ff);
- b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00fd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0x07ff);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C1, 0x00fd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0x07ff);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C2, 0x00fd);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0x07ff);
+ b43_phy_write(dev, B43_PHY_HT_AFE_C3, 0x00fd);
}
}
diff --git a/drivers/net/wireless/b43/phy_ht.h b/drivers/net/wireless/b43/phy_ht.h
index 6544c4293b3..9b2408efb22 100644
--- a/drivers/net/wireless/b43/phy_ht.h
+++ b/drivers/net/wireless/b43/phy_ht.h
@@ -12,18 +12,60 @@
#define B43_PHY_HT_TABLE_ADDR 0x072 /* Table address */
#define B43_PHY_HT_TABLE_DATALO 0x073 /* Table data low */
#define B43_PHY_HT_TABLE_DATAHI 0x074 /* Table data high */
+#define B43_PHY_HT_CLASS_CTL 0x0B0 /* Classifier control */
+#define B43_PHY_HT_CLASS_CTL_CCK_EN 0x0001 /* CCK enable */
+#define B43_PHY_HT_CLASS_CTL_OFDM_EN 0x0002 /* OFDM enable */
+#define B43_PHY_HT_CLASS_CTL_WAITED_EN 0x0004 /* Waited enable */
+#define B43_PHY_HT_IQLOCAL_CMDGCTL 0x0C2 /* I/Q LO cal command G control */
+#define B43_PHY_HT_SAMP_CMD 0x0C3 /* Sample command */
+#define B43_PHY_HT_SAMP_CMD_STOP 0x0002 /* Stop */
+#define B43_PHY_HT_SAMP_LOOP_CNT 0x0C4 /* Sample loop count */
+#define B43_PHY_HT_SAMP_WAIT_CNT 0x0C5 /* Sample wait count */
+#define B43_PHY_HT_SAMP_DEP_CNT 0x0C6 /* Sample depth count */
+#define B43_PHY_HT_SAMP_STAT 0x0C7 /* Sample status */
+#define B43_PHY_HT_TSSIMODE 0x122 /* TSSI mode */
+#define B43_PHY_HT_TSSIMODE_EN 0x0001 /* TSSI enable */
+#define B43_PHY_HT_TSSIMODE_PDEN 0x0002 /* Power det enable */
#define B43_PHY_HT_BW1 0x1CE
#define B43_PHY_HT_BW2 0x1CF
#define B43_PHY_HT_BW3 0x1D0
#define B43_PHY_HT_BW4 0x1D1
#define B43_PHY_HT_BW5 0x1D2
#define B43_PHY_HT_BW6 0x1D3
+#define B43_PHY_HT_TXPCTL_CMD_C1 0x1E7 /* TX power control command */
+#define B43_PHY_HT_TXPCTL_CMD_C1_INIT 0x007F /* Init */
+#define B43_PHY_HT_TXPCTL_CMD_C1_COEFF 0x2000 /* Power control coefficients */
+#define B43_PHY_HT_TXPCTL_CMD_C1_HWPCTLEN 0x4000 /* Hardware TX power control enable */
+#define B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN 0x8000 /* TX power control enable */
+#define B43_PHY_HT_TXPCTL_N 0x1E8 /* TX power control N num */
+#define B43_PHY_HT_TXPCTL_N_TSSID 0x00FF /* N TSSI delay */
+#define B43_PHY_HT_TXPCTL_N_TSSID_SHIFT 0
+#define B43_PHY_HT_TXPCTL_N_NPTIL2 0x0700 /* N PT integer log2 */
+#define B43_PHY_HT_TXPCTL_N_NPTIL2_SHIFT 8
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI 0x1E9 /* TX power control idle TSSI */
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C1 0x003F
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C1_SHIFT 0
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C2 0x3F00
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C2_SHIFT 8
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI_BINF 0x8000 /* Raw TSSI offset bin format */
+#define B43_PHY_HT_TXPCTL_TARG_PWR 0x1EA /* TX power control target power */
+#define B43_PHY_HT_TXPCTL_TARG_PWR_C1 0x00FF /* Power 0 */
+#define B43_PHY_HT_TXPCTL_TARG_PWR_C1_SHIFT 0
+#define B43_PHY_HT_TXPCTL_TARG_PWR_C2 0xFF00 /* Power 1 */
+#define B43_PHY_HT_TXPCTL_TARG_PWR_C2_SHIFT 8
+#define B43_PHY_HT_TXPCTL_CMD_C2 0x222
+#define B43_PHY_HT_TXPCTL_CMD_C2_INIT 0x007F
+#define B43_PHY_HT_RSSI_C1 0x219
+#define B43_PHY_HT_RSSI_C2 0x21A
+#define B43_PHY_HT_RSSI_C3 0x21B
#define B43_PHY_HT_C1_CLIP1THRES B43_PHY_OFDM(0x00E)
#define B43_PHY_HT_C2_CLIP1THRES B43_PHY_OFDM(0x04E)
#define B43_PHY_HT_C3_CLIP1THRES B43_PHY_OFDM(0x08E)
#define B43_PHY_HT_RF_SEQ_MODE B43_PHY_EXTG(0x000)
+#define B43_PHY_HT_RF_SEQ_MODE_CA_OVER 0x0001 /* Core active override */
+#define B43_PHY_HT_RF_SEQ_MODE_TR_OVER 0x0002 /* Trigger override */
#define B43_PHY_HT_RF_SEQ_TRIG B43_PHY_EXTG(0x003)
#define B43_PHY_HT_RF_SEQ_TRIG_RX2TX 0x0001 /* RX2TX */
#define B43_PHY_HT_RF_SEQ_TRIG_TX2RX 0x0002 /* TX2RX */
@@ -36,12 +78,27 @@
#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010)
-#define B43_PHY_HT_AFE_CTL1 B43_PHY_EXTG(0x110)
-#define B43_PHY_HT_AFE_CTL2 B43_PHY_EXTG(0x111)
-#define B43_PHY_HT_AFE_CTL3 B43_PHY_EXTG(0x114)
-#define B43_PHY_HT_AFE_CTL4 B43_PHY_EXTG(0x115)
-#define B43_PHY_HT_AFE_CTL5 B43_PHY_EXTG(0x118)
-#define B43_PHY_HT_AFE_CTL6 B43_PHY_EXTG(0x119)
+#define B43_PHY_HT_RF_CTL_INT_C1 B43_PHY_EXTG(0x04c)
+#define B43_PHY_HT_RF_CTL_INT_C2 B43_PHY_EXTG(0x06c)
+#define B43_PHY_HT_RF_CTL_INT_C3 B43_PHY_EXTG(0x08c)
+
+#define B43_PHY_HT_AFE_C1_OVER B43_PHY_EXTG(0x110)
+#define B43_PHY_HT_AFE_C1 B43_PHY_EXTG(0x111)
+#define B43_PHY_HT_AFE_C2_OVER B43_PHY_EXTG(0x114)
+#define B43_PHY_HT_AFE_C2 B43_PHY_EXTG(0x115)
+#define B43_PHY_HT_AFE_C3_OVER B43_PHY_EXTG(0x118)
+#define B43_PHY_HT_AFE_C3 B43_PHY_EXTG(0x119)
+
+#define B43_PHY_HT_TXPCTL_CMD_C3 B43_PHY_EXTG(0x164)
+#define B43_PHY_HT_TXPCTL_CMD_C3_INIT 0x007F
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI2 B43_PHY_EXTG(0x165) /* TX power control idle TSSI */
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3 0x003F
+#define B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3_SHIFT 0
+#define B43_PHY_HT_TXPCTL_TARG_PWR2 B43_PHY_EXTG(0x166) /* TX power control target power */
+#define B43_PHY_HT_TXPCTL_TARG_PWR2_C3 0x00FF
+#define B43_PHY_HT_TXPCTL_TARG_PWR2_C3_SHIFT 0
+
+#define B43_PHY_HT_TEST B43_PHY_N_BMODE(0x00A)
/* Values for PHY registers used on channel switching */
@@ -56,6 +113,14 @@ struct b43_phy_ht_channeltab_e_phy {
struct b43_phy_ht {
+ u16 rf_ctl_int_save[3];
+
+ bool tx_pwr_ctl;
+ u8 tx_pwr_idx[3];
+
+ s32 bb_mult_save[3];
+
+ u8 idle_tssi[3];
};
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 2d3c6644f82..faeafe219c5 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -334,13 +334,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
B43legacy_DMA_RINGMEMSIZE,
&(ring->dmabase),
- GFP_KERNEL);
- if (!ring->descbase) {
- b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
- " failed\n");
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ring->descbase)
return -ENOMEM;
- }
- memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 1d92d874ebb..747e9317dab 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -12,8 +12,9 @@ config BRCMSMAC
select CORDIC
---help---
This module adds support for PCIe wireless adapters based on Broadcom
- IEEE802.11n SoftMAC chipsets. If you choose to build a module, it'll
- be called brcmsmac.ko.
+ IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will
+ be available if you select BCMA_DRIVER_GPIO. If you choose to build a
+ module, the driver will be called brcmsmac.ko.
config BRCMFMAC
tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 756e19fc279..598c8e2f8d2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -26,6 +26,7 @@ brcmfmac-objs += \
wl_cfg80211.o \
fwil.o \
fweh.o \
+ fwsignal.o \
p2p.o \
dhd_cdc.o \
dhd_common.o \
@@ -39,3 +40,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
dhd_dbg.o
+brcmfmac-$(CONFIG_BRCM_TRACING) += \
+ tracepoint.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index ef6f23be6d3..c7fa20846b3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -501,6 +501,7 @@ struct brcmf_dcmd {
/* Forward decls for struct brcmf_pub (see below) */
struct brcmf_proto; /* device communication protocol info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
+struct brcmf_fws_info; /* firmware signalling info */
/* Common structure for module and instance linkage */
struct brcmf_pub {
@@ -527,6 +528,10 @@ struct brcmf_pub {
unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
struct brcmf_fweh_info fweh;
+
+ bool fw_signals;
+ struct brcmf_fws_info *fws;
+ spinlock_t fws_spinlock;
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
@@ -582,7 +587,7 @@ extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
void *buf, uint len);
/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
+extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
struct sk_buff *rxp);
extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index ad25c3408b5..883ef9063e8 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -134,7 +134,7 @@ extern void brcmf_dev_reset(struct device *dev);
/* Indication from bus module to change flow-control state */
extern void brcmf_txflowblock(struct device *dev, bool state);
-/* Notify tx completion */
+/* Notify the bus has transferred the tx packet to firmware */
extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
bool success);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index a2354d951dd..e224bcb9002 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -28,6 +28,7 @@
#include "dhd.h"
#include "dhd_proto.h"
#include "dhd_bus.h"
+#include "fwsignal.h"
#include "dhd_dbg.h"
struct brcmf_proto_cdc_dcmd {
@@ -71,13 +72,26 @@ struct brcmf_proto_cdc_dcmd {
((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
((idx) << BDC_FLAG2_IF_SHIFT)))
+/**
+ * struct brcmf_proto_bdc_header - BDC header format
+ *
+ * @flags: flags contain protocol and checksum info.
+ * @priority: 802.1d priority and USB flow control info (bit 4:7).
+ * @flags2: additional flags containing dongle interface index.
+ * @data_offset: start of packet data. header is following by firmware signals.
+ */
struct brcmf_proto_bdc_header {
u8 flags;
- u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
+ u8 priority;
u8 flags2;
u8 data_offset;
};
+/*
+ * maximum length of firmware signal data between
+ * the BDC header and packet data in the tx path.
+ */
+#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES 12
#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
@@ -258,7 +272,7 @@ static void pkt_set_sum_good(struct sk_buff *skb, bool x)
skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
}
-void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
+void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
struct sk_buff *pktbuf)
{
struct brcmf_proto_bdc_header *h;
@@ -266,7 +280,6 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
brcmf_dbg(CDC, "Enter\n");
/* Push BDC header used to convey priority for buses that don't */
-
skb_push(pktbuf, BDC_HEADER_LEN);
h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
@@ -277,11 +290,11 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0;
- h->data_offset = 0;
+ h->data_offset = offset;
BDC_SET_IF_IDX(h, ifidx);
}
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
struct sk_buff *pktbuf)
{
struct brcmf_proto_bdc_header *h;
@@ -328,7 +341,10 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
skb_pull(pktbuf, BDC_HEADER_LEN);
- skb_pull(pktbuf, h->data_offset << 2);
+ if (do_fws)
+ brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
+ else
+ skb_pull(pktbuf, h->data_offset << 2);
if (pktbuf->len == 0)
return -ENODATA;
@@ -350,7 +366,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
}
drvr->prot = cdc;
- drvr->hdrlen += BDC_HEADER_LEN;
+ drvr->hdrlen += BDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 4544342a042..be0787cab24 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -24,6 +24,7 @@
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "fwil.h"
+#include "tracepoint.h"
#define PKTFILTER_BUF_SIZE 128
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
@@ -373,3 +374,35 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
done:
return err;
}
+
+#ifdef CONFIG_BRCM_TRACING
+void __brcmf_err(const char *func, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ pr_err("%s: %pV", func, &vaf);
+ trace_brcmf_err(func, &vaf);
+ va_end(args);
+}
+#endif
+#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
+void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ if (brcmf_msg_level & level)
+ pr_debug("%s %pV", func, &vaf);
+ trace_brcmf_dbg(level, func, &vaf);
+ va_end(args);
+}
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 57671eddf79..ac792499b46 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -22,6 +22,7 @@
#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
+#include "tracepoint.h"
static struct dentry *root_folder;
@@ -123,3 +124,44 @@ void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
debugfs_create_file("counters", S_IRUGO, dentry,
sdcnt, &brcmf_debugfs_sdio_counter_ops);
}
+
+static
+ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct brcmf_fws_stats *fwstats = f->private_data;
+ char buf[100];
+ int res;
+
+ /* only allow read from start */
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf),
+ "header_pulls: %u\n"
+ "header_only_pkt: %u\n"
+ "tlv_parse_failed: %u\n"
+ "tlv_invalid_type: %u\n",
+ fwstats->header_pulls,
+ fwstats->header_only_pkt,
+ fwstats->tlv_parse_failed,
+ fwstats->tlv_invalid_type);
+
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations brcmf_debugfs_fws_stats_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcmf_debugfs_fws_stats_read
+};
+
+void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
+ struct brcmf_fws_stats *stats)
+{
+ struct dentry *dentry = drvr->dbgfs_dir;
+
+ if (!IS_ERR_OR_NULL(dentry))
+ debugfs_create_file("fws_stats", S_IRUGO, dentry,
+ stats, &brcmf_debugfs_fws_stats_ops);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index bc013cbe06f..4bc646bde16 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -43,6 +43,7 @@
* debugging is not selected. When debugging the driver error
* messages are as important as other tracing or even more so.
*/
+#ifndef CONFIG_BRCM_TRACING
#ifdef CONFIG_BRCMDBG
#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
#else
@@ -52,15 +53,21 @@
pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#endif
+#else
+__printf(2, 3)
+void __brcmf_err(const char *func, const char *fmt, ...);
+#define brcmf_err(fmt, ...) \
+ __brcmf_err(__func__, fmt, ##__VA_ARGS__)
+#endif
-#if defined(DEBUG)
-
+#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
+__printf(3, 4)
+void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...);
#define brcmf_dbg(level, fmt, ...) \
do { \
- if (brcmf_msg_level & BRCMF_##level##_VAL) \
- pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+ __brcmf_dbg(BRCMF_##level##_VAL, __func__, \
+ fmt, ##__VA_ARGS__); \
} while (0)
-
#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL)
#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
@@ -69,7 +76,7 @@ do { \
#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
-#else /* (defined DEBUG) || (defined DEBUG) */
+#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
@@ -81,7 +88,7 @@ do { \
#define BRCMF_EVENT_ON() 0
#define BRCMF_FIL_ON() 0
-#endif /* defined(DEBUG) */
+#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
#define brcmf_dbg_hex_dump(test, data, len, fmt, ...) \
do { \
@@ -125,6 +132,13 @@ struct brcmf_sdio_count {
ulong rx_readahead_cnt; /* packets where header read-ahead was used */
};
+struct brcmf_fws_stats {
+ u32 tlv_parse_failed;
+ u32 tlv_invalid_type;
+ u32 header_only_pkt;
+ u32 header_pulls;
+};
+
struct brcmf_pub;
#ifdef DEBUG
void brcmf_debugfs_init(void);
@@ -134,6 +148,8 @@ void brcmf_debugfs_detach(struct brcmf_pub *drvr);
struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
struct brcmf_sdio_count *sdcnt);
+void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
+ struct brcmf_fws_stats *stats);
#else
static inline void brcmf_debugfs_init(void)
{
@@ -148,6 +164,10 @@ static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
{
}
+static inline void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
+ struct brcmf_fws_stats *stats)
+{
+}
#endif
#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index c06cea88df0..fa5a2af04d4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -30,17 +30,18 @@
#include "p2p.h"
#include "wl_cfg80211.h"
#include "fwil.h"
+#include "fwsignal.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11 WLAN fullmac cards");
MODULE_LICENSE("Dual BSD/GPL");
#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
/* Error bits */
int brcmf_msg_level;
-module_param(brcmf_msg_level, int, 0);
+module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(debug, "level of debug output");
/* P2P0 enable */
static int brcmf_p2p_enable;
@@ -230,7 +231,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
atomic_inc(&ifp->pend_8021x_cnt);
/* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifp->ifidx, skb);
+ brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
/* Use bus module to send data frame */
ret = brcmf_bus_txdata(drvr->bus_if, skb);
@@ -283,7 +284,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
skb_unlink(skb, skb_list);
/* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, &ifidx, skb);
+ ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
ifp = drvr->iflist[ifidx];
if (ret || !ifp || !ifp->ndev) {
@@ -357,23 +358,29 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_if *ifp;
+ int res;
- brcmf_proto_hdrpull(drvr, &ifidx, txp);
+ res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
ifp = drvr->iflist[ifidx];
if (!ifp)
- return;
+ goto done;
- eh = (struct ethhdr *)(txp->data);
- type = ntohs(eh->h_proto);
+ if (res == 0) {
+ eh = (struct ethhdr *)(txp->data);
+ type = ntohs(eh->h_proto);
- if (type == ETH_P_PAE) {
- atomic_dec(&ifp->pend_8021x_cnt);
- if (waitqueue_active(&ifp->pend_8021x_wait))
- wake_up(&ifp->pend_8021x_wait);
+ if (type == ETH_P_PAE) {
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
+ }
}
if (!success)
ifp->stats.tx_errors++;
+
+done:
+ brcmu_pkt_buf_free_skb(txp);
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
@@ -873,6 +880,9 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
+ drvr->fw_signals = true;
+ (void)brcmf_fws_init(drvr);
+
drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
if (drvr->config == NULL) {
ret = -ENOMEM;
@@ -889,6 +899,8 @@ fail:
brcmf_err("failed: %d\n", ret);
if (drvr->config)
brcmf_cfg80211_detach(drvr->config);
+ if (drvr->fws)
+ brcmf_fws_deinit(drvr);
free_netdev(ifp->ndev);
drvr->iflist[0] = NULL;
if (p2p_ifp) {
@@ -952,6 +964,9 @@ void brcmf_detach(struct device *dev)
if (drvr->prot)
brcmf_proto_detach(drvr);
+ if (drvr->fws)
+ brcmf_fws_deinit(drvr);
+
brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 48fa7030219..ef917988374 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -33,7 +33,7 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
/* Add any protocol-specific data header.
* Caller must reserve prot_hdrlen prepend space.
*/
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
+extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
struct sk_buff *txp);
/* Sets dongle media info (drv_version, mac address). */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 4469321c0eb..9a2edd3f0a5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -1546,7 +1546,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
struct sk_buff_head pktlist; /* needed for bus interface */
u16 pad; /* Number of pad bytes to read */
uint rxleft = 0; /* Remaining number of frames allowed */
- int sdret; /* Return code from calls */
+ int ret; /* Return code from calls */
uint rxcount = 0; /* Total frames read */
struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
u8 head_read = 0;
@@ -1577,15 +1577,15 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* read header first for unknow frame length */
sdio_claim_host(bus->sdiodev->func[1]);
if (!rd->len) {
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
+ ret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC,
bus->rxhdr,
BRCMF_FIRSTREAD);
bus->sdcnt.f2rxhdrs++;
- if (sdret < 0) {
+ if (ret < 0) {
brcmf_err("RXHEADER FAILED: %d\n",
- sdret);
+ ret);
bus->sdcnt.rx_hdrfail++;
brcmf_sdbrcm_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func[1]);
@@ -1637,14 +1637,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
skb_pull(pkt, head_read);
pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
- sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
bus->sdcnt.f2rxdata++;
sdio_release_host(bus->sdiodev->func[1]);
- if (sdret < 0) {
+ if (ret < 0) {
brcmf_err("read %d bytes from channel %d failed: %d\n",
- rd->len, rd->channel, sdret);
+ rd->len, rd->channel, ret);
brcmu_pkt_buf_free_skb(pkt);
sdio_claim_host(bus->sdiodev->func[1]);
brcmf_sdbrcm_rxfail(bus, true,
@@ -1775,7 +1775,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
- uint chan, bool free_pkt)
+ uint chan)
{
int ret;
u8 *frame;
@@ -1805,10 +1805,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
pkt_align(new, pkt->len, BRCMF_SDALIGN);
memcpy(new->data, pkt->data, pkt->len);
- if (free_pkt)
- brcmu_pkt_buf_free_skb(pkt);
- /* free the pkt if canned one is not used */
- free_pkt = true;
+ brcmu_pkt_buf_free_skb(pkt);
pkt = new;
frame = (u8 *) (pkt->data);
/* precondition: (frame % BRCMF_SDALIGN) == 0) */
@@ -1901,10 +1898,6 @@ done:
/* restore pkt buffer pointer before calling tx complete routine */
skb_pull(pkt, SDPCM_HDRLEN + pad);
brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
-
- if (free_pkt)
- brcmu_pkt_buf_free_skb(pkt);
-
return ret;
}
@@ -1932,7 +1925,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
spin_unlock_bh(&bus->txqlock);
datalen = pkt->len - SDPCM_HDRLEN;
- ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
+ ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -2343,7 +2336,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
skb_pull(pkt, SDPCM_HDRLEN);
brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
- brcmu_pkt_buf_free_skb(pkt);
brcmf_err("out of bus->txq !!!\n");
ret = -ENOSR;
} else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
new file mode 100644
index 00000000000..071d55f9cd4
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -0,0 +1,382 @@
+/*
+ * Copyright (c) 2010 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/err.h>
+#include <uapi/linux/nl80211.h>
+
+#include <brcmu_utils.h>
+#include <brcmu_wifi.h>
+#include "dhd.h"
+#include "dhd_dbg.h"
+#include "fwil.h"
+#include "fweh.h"
+#include "fwsignal.h"
+
+/**
+ * DOC: Firmware Signalling
+ *
+ * Firmware can send signals to host and vice versa, which are passed in the
+ * data packets using TLV based header. This signalling layer is on top of the
+ * BDC bus protocol layer.
+ */
+
+/*
+ * single definition for firmware-driver flow control tlv's.
+ *
+ * each tlv is specified by BRCMF_FWS_TLV_DEF(name, ID, length).
+ * A length value 0 indicates variable length tlv.
+ */
+#define BRCMF_FWS_TLV_DEFLIST \
+ BRCMF_FWS_TLV_DEF(MAC_OPEN, 1, 1) \
+ BRCMF_FWS_TLV_DEF(MAC_CLOSE, 2, 1) \
+ BRCMF_FWS_TLV_DEF(MAC_REQUEST_CREDIT, 3, 2) \
+ BRCMF_FWS_TLV_DEF(TXSTATUS, 4, 4) \
+ BRCMF_FWS_TLV_DEF(PKTTAG, 5, 4) \
+ BRCMF_FWS_TLV_DEF(MACDESC_ADD, 6, 8) \
+ BRCMF_FWS_TLV_DEF(MACDESC_DEL, 7, 8) \
+ BRCMF_FWS_TLV_DEF(RSSI, 8, 1) \
+ BRCMF_FWS_TLV_DEF(INTERFACE_OPEN, 9, 1) \
+ BRCMF_FWS_TLV_DEF(INTERFACE_CLOSE, 10, 1) \
+ BRCMF_FWS_TLV_DEF(FIFO_CREDITBACK, 11, 8) \
+ BRCMF_FWS_TLV_DEF(PENDING_TRAFFIC_BMP, 12, 2) \
+ BRCMF_FWS_TLV_DEF(MAC_REQUEST_PACKET, 13, 3) \
+ BRCMF_FWS_TLV_DEF(HOST_REORDER_RXPKTS, 14, 10) \
+ BRCMF_FWS_TLV_DEF(TRANS_ID, 18, 6) \
+ BRCMF_FWS_TLV_DEF(COMP_TXSTATUS, 19, 1) \
+ BRCMF_FWS_TLV_DEF(FILLER, 255, 0)
+
+/**
+ * enum brcmf_fws_tlv_type - definition of tlv identifiers.
+ */
+#define BRCMF_FWS_TLV_DEF(name, id, len) \
+ BRCMF_FWS_TYPE_ ## name = id,
+enum brcmf_fws_tlv_type {
+ BRCMF_FWS_TLV_DEFLIST
+ BRCMF_FWS_TYPE_INVALID
+};
+#undef BRCMF_FWS_TLV_DEF
+
+/**
+ * enum brcmf_fws_tlv_len - length values for tlvs.
+ */
+#define BRCMF_FWS_TLV_DEF(name, id, len) \
+ BRCMF_FWS_TYPE_ ## name ## _LEN = len,
+enum brcmf_fws_tlv_len {
+ BRCMF_FWS_TLV_DEFLIST
+};
+#undef BRCMF_FWS_TLV_DEF
+
+#ifdef DEBUG
+/**
+ * brcmf_fws_tlv_names - array of tlv names.
+ */
+#define BRCMF_FWS_TLV_DEF(name, id, len) \
+ { id, #name },
+static struct {
+ enum brcmf_fws_tlv_type id;
+ const char *name;
+} brcmf_fws_tlv_names[] = {
+ BRCMF_FWS_TLV_DEFLIST
+};
+#undef BRCMF_FWS_TLV_DEF
+
+static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(brcmf_fws_tlv_names); i++)
+ if (brcmf_fws_tlv_names[i].id == id)
+ return brcmf_fws_tlv_names[i].name;
+
+ return "INVALID";
+}
+#else
+static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
+{
+ return "NODEBUG";
+}
+#endif /* DEBUG */
+
+/**
+ * flags used to enable tlv signalling from firmware.
+ */
+#define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001
+#define BRCMF_FWS_FLAGS_XONXOFF_SIGNALS 0x0002
+#define BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
+#define BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
+#define BRCMF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
+#define BRCMF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
+#define BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE 0x0040
+
+#define BRCMF_FWS_HANGER_MAXITEMS 1024
+#define BRCMF_FWS_HANGER_ITEM_STATE_FREE 1
+#define BRCMF_FWS_HANGER_ITEM_STATE_INUSE 2
+#define BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3
+
+#define BRCMF_FWS_STATE_OPEN 1
+#define BRCMF_FWS_STATE_CLOSE 2
+
+#define BRCMF_FWS_FCMODE_NONE 0
+#define BRCMF_FWS_FCMODE_IMPLIED_CREDIT 1
+#define BRCMF_FWS_FCMODE_EXPLICIT_CREDIT 2
+
+#define BRCMF_FWS_MAC_DESC_TABLE_SIZE 32
+#define BRCMF_FWS_MAX_IFNUM 16
+#define BRCMF_FWS_MAC_DESC_ID_INVALID 0xff
+
+#define BRCMF_FWS_HOSTIF_FLOWSTATE_OFF 0
+#define BRCMF_FWS_HOSTIF_FLOWSTATE_ON 1
+
+/**
+ * FWFC packet identifier
+ *
+ * 32-bit packet identifier used in PKTTAG tlv from host to dongle.
+ *
+ * - Generated at the host (e.g. dhd)
+ * - Seen as a generic sequence number by wlc except the flags field
+ *
+ * Generation : b[31] => generation number for this packet [host->fw]
+ * OR, current generation number [fw->host]
+ * Flags : b[30:27] => command, status flags
+ * FIFO-AC : b[26:24] => AC-FIFO id
+ * h-slot : b[23:8] => hanger-slot
+ * freerun : b[7:0] => A free running counter
+ */
+#define BRCMF_FWS_PKTTAG_GENERATION_MASK 0x80000000
+#define BRCMF_FWS_PKTTAG_GENERATION_SHIFT 31
+#define BRCMF_FWS_PKTTAG_FLAGS_MASK 0x78000000
+#define BRCMF_FWS_PKTTAG_FLAGS_SHIFT 27
+#define BRCMF_FWS_PKTTAG_FIFO_MASK 0x07000000
+#define BRCMF_FWS_PKTTAG_FIFO_SHIFT 24
+#define BRCMF_FWS_PKTTAG_HSLOT_MASK 0x00ffff00
+#define BRCMF_FWS_PKTTAG_HSLOT_SHIFT 8
+#define BRCMF_FWS_PKTTAG_FREERUN_MASK 0x000000ff
+#define BRCMF_FWS_PKTTAG_FREERUN_SHIFT 0
+
+#define brcmf_fws_pkttag_set_field(var, field, value) \
+ brcmu_maskset32((var), BRCMF_FWS_PKTTAG_ ## field ## _MASK, \
+ BRCMF_FWS_PKTTAG_ ## field ## _SHIFT, (value))
+#define brcmf_fws_pkttag_get_field(var, field) \
+ brcmu_maskget32((var), BRCMF_FWS_PKTTAG_ ## field ## _MASK, \
+ BRCMF_FWS_PKTTAG_ ## field ## _SHIFT)
+
+struct brcmf_fws_info {
+ struct brcmf_pub *drvr;
+ struct brcmf_fws_stats stats;
+};
+
+static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
+{
+ brcmf_dbg(CTL, "rssi %d\n", rssi);
+ return 0;
+}
+
+static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
+{
+ __le32 timestamp;
+
+ memcpy(&timestamp, &data[2], sizeof(timestamp));
+ brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1],
+ le32_to_cpu(timestamp));
+ return 0;
+}
+
+/* using macro so sparse checking does not complain
+ * about locking imbalance.
+ */
+#define brcmf_fws_lock(drvr, flags) \
+do { \
+ flags = 0; \
+ spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
+} while (0)
+
+/* using macro so sparse checking does not complain
+ * about locking imbalance.
+ */
+#define brcmf_fws_unlock(drvr, flags) \
+ spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
+
+int brcmf_fws_init(struct brcmf_pub *drvr)
+{
+ u32 tlv;
+ int rc;
+
+ /* enable rssi signals */
+ tlv = drvr->fw_signals ? BRCMF_FWS_FLAGS_RSSI_SIGNALS : 0;
+
+ spin_lock_init(&drvr->fws_spinlock);
+
+ drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
+ if (!drvr->fws) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* enable proptxtstatus signaling by default */
+ rc = brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv);
+ if (rc < 0) {
+ brcmf_err("failed to set bdcv2 tlv signaling\n");
+ goto fail;
+ }
+ /* set linkage back */
+ drvr->fws->drvr = drvr;
+
+ /* create debugfs file for statistics */
+ brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
+
+ /* TODO: remove upon feature delivery */
+ brcmf_err("%s bdcv2 tlv signaling [%x]\n",
+ drvr->fw_signals ? "enabled" : "disabled", tlv);
+ return 0;
+
+fail:
+ /* disable flow control entirely */
+ drvr->fw_signals = false;
+ brcmf_fws_deinit(drvr);
+ return rc;
+}
+
+void brcmf_fws_deinit(struct brcmf_pub *drvr)
+{
+ /* free top structure */
+ kfree(drvr->fws);
+ drvr->fws = NULL;
+}
+
+int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
+ struct sk_buff *skb)
+{
+ struct brcmf_fws_info *fws = drvr->fws;
+ ulong flags;
+ u8 *signal_data;
+ s16 data_len;
+ u8 type;
+ u8 len;
+ u8 *data;
+
+ brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n",
+ ifidx, skb->len, signal_len);
+
+ WARN_ON(signal_len > skb->len);
+
+ /* if flow control disabled, skip to packet data and leave */
+ if (!signal_len || !drvr->fw_signals) {
+ skb_pull(skb, signal_len);
+ return 0;
+ }
+
+ /* lock during tlv parsing */
+ brcmf_fws_lock(drvr, flags);
+
+ fws->stats.header_pulls++;
+ data_len = signal_len;
+ signal_data = skb->data;
+
+ while (data_len > 0) {
+ /* extract tlv info */
+ type = signal_data[0];
+
+ /* FILLER type is actually not a TLV, but
+ * a single byte that can be skipped.
+ */
+ if (type == BRCMF_FWS_TYPE_FILLER) {
+ signal_data += 1;
+ data_len -= 1;
+ continue;
+ }
+ len = signal_data[1];
+ data = signal_data + 2;
+
+ /* abort parsing when length invalid */
+ if (data_len < len + 2)
+ break;
+
+ brcmf_dbg(INFO, "tlv type=%d (%s), len=%d\n", type,
+ brcmf_fws_get_tlv_name(type), len);
+ switch (type) {
+ case BRCMF_FWS_TYPE_MAC_OPEN:
+ case BRCMF_FWS_TYPE_MAC_CLOSE:
+ WARN_ON(len != BRCMF_FWS_TYPE_MAC_OPEN_LEN);
+ break;
+ case BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT:
+ WARN_ON(len != BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT_LEN);
+ break;
+ case BRCMF_FWS_TYPE_TXSTATUS:
+ WARN_ON(len != BRCMF_FWS_TYPE_TXSTATUS_LEN);
+ break;
+ case BRCMF_FWS_TYPE_PKTTAG:
+ WARN_ON(len != BRCMF_FWS_TYPE_PKTTAG_LEN);
+ break;
+ case BRCMF_FWS_TYPE_MACDESC_ADD:
+ case BRCMF_FWS_TYPE_MACDESC_DEL:
+ WARN_ON(len != BRCMF_FWS_TYPE_MACDESC_ADD_LEN);
+ break;
+ case BRCMF_FWS_TYPE_RSSI:
+ WARN_ON(len != BRCMF_FWS_TYPE_RSSI_LEN);
+ brcmf_fws_rssi_indicate(fws, *(s8 *)data);
+ break;
+ case BRCMF_FWS_TYPE_INTERFACE_OPEN:
+ case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
+ WARN_ON(len != BRCMF_FWS_TYPE_INTERFACE_OPEN_LEN);
+ break;
+ case BRCMF_FWS_TYPE_FIFO_CREDITBACK:
+ WARN_ON(len != BRCMF_FWS_TYPE_FIFO_CREDITBACK_LEN);
+ break;
+ case BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP:
+ WARN_ON(len != BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN);
+ break;
+ case BRCMF_FWS_TYPE_MAC_REQUEST_PACKET:
+ WARN_ON(len != BRCMF_FWS_TYPE_MAC_REQUEST_PACKET_LEN);
+ break;
+ case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
+ WARN_ON(len != BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS_LEN);
+ break;
+ case BRCMF_FWS_TYPE_TRANS_ID:
+ WARN_ON(len != BRCMF_FWS_TYPE_TRANS_ID_LEN);
+ brcmf_fws_dbg_seqnum_check(fws, data);
+ break;
+ case BRCMF_FWS_TYPE_COMP_TXSTATUS:
+ WARN_ON(len != BRCMF_FWS_TYPE_COMP_TXSTATUS_LEN);
+ break;
+ default:
+ fws->stats.tlv_invalid_type++;
+ break;
+ }
+
+ signal_data += len + 2;
+ data_len -= len + 2;
+ }
+
+ if (data_len != 0)
+ fws->stats.tlv_parse_failed++;
+
+ /* signalling processing result does
+ * not affect the actual ethernet packet.
+ */
+ skb_pull(skb, signal_len);
+
+ /* this may be a signal-only packet
+ */
+ if (skb->len == 0)
+ fws->stats.header_only_pkt++;
+
+ brcmf_fws_unlock(drvr, flags);
+ return 0;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
new file mode 100644
index 00000000000..e728eea72bb
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef FWSIGNAL_H_
+#define FWSIGNAL_H_
+
+int brcmf_fws_init(struct brcmf_pub *drvr);
+void brcmf_fws_deinit(struct brcmf_pub *drvr);
+int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
+ struct sk_buff *skb);
+#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
new file mode 100644
index 00000000000..b505db48c60
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h> /* bug in tracepoint.h, it should include this */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "tracepoint.h"
+#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
new file mode 100644
index 00000000000..35efc7a6764
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#if !defined(BRCMF_TRACEPOINT_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define BRCMF_TRACEPOINT_H_
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#ifndef CONFIG_BRCM_TRACING
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+
+#endif /* CONFIG_BRCM_TRACING */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM brcmfmac
+
+#define MAX_MSG_LEN 100
+
+TRACE_EVENT(brcmf_err,
+ TP_PROTO(const char *func, struct va_format *vaf),
+ TP_ARGS(func, vaf),
+ TP_STRUCT__entry(
+ __string(func, func)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ __assign_str(func, func);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s: %s", __get_str(func), __get_str(msg))
+);
+
+TRACE_EVENT(brcmf_dbg,
+ TP_PROTO(u32 level, const char *func, struct va_format *vaf),
+ TP_ARGS(level, func, vaf),
+ TP_STRUCT__entry(
+ __field(u32, level)
+ __string(func, func)
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+ TP_fast_assign(
+ __entry->level = level;
+ __assign_str(func, func);
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+ TP_printk("%s: %s", __get_str(func), __get_str(msg))
+);
+
+#ifdef CONFIG_BRCM_TRACING
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE tracepoint
+
+#include <trace/define_trace.h>
+
+#endif /* CONFIG_BRCM_TRACING */
+
+#endif /* BRCMF_TRACEPOINT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 42289e9ea88..01aed7ad6be 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -112,11 +112,6 @@ struct brcmf_usbdev_info {
static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
struct brcmf_usbreq *req);
-MODULE_AUTHOR("Broadcom Corporation");
-MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac usb driver.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac usb cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -422,8 +417,6 @@ static void brcmf_usb_tx_complete(struct urb *urb)
brcmf_usb_del_fromq(devinfo, req);
brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
-
- brcmu_pkt_buf_free_skb(req->skb);
req->skb = NULL;
brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
@@ -577,15 +570,17 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
int ret;
brcmf_dbg(USB, "Enter, skb=%p\n", skb);
- if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP)
- return -EIO;
+ if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
+ ret = -EIO;
+ goto fail;
+ }
req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
&devinfo->tx_freecount);
if (!req) {
- brcmu_pkt_buf_free_skb(skb);
brcmf_err("no req to send\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
req->skb = skb;
@@ -598,18 +593,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
if (ret) {
brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
brcmf_usb_del_fromq(devinfo, req);
- brcmu_pkt_buf_free_skb(req->skb);
req->skb = NULL;
brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
- &devinfo->tx_freecount);
- } else {
- if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
- !devinfo->tx_flowblock) {
- brcmf_txflowblock(dev, true);
- devinfo->tx_flowblock = true;
- }
+ &devinfo->tx_freecount);
+ goto fail;
}
+ if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
+ !devinfo->tx_flowblock) {
+ brcmf_txflowblock(dev, true);
+ devinfo->tx_flowblock = true;
+ }
+ return 0;
+
+fail:
+ brcmf_txcomplete(dev, skb, false);
return ret;
}
@@ -1485,6 +1483,7 @@ static struct usb_device_id brcmf_usb_devid_table[] = {
{ USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
{ }
};
+
MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 2af9c0f0798..804473fc5c5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -3052,16 +3052,16 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
int i;
int ret = 0;
- brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
+ brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
request->n_match_sets, request->n_ssids);
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
return -EAGAIN;
}
- if (!request || !request->n_ssids || !request->n_match_sets) {
+ if (!request->n_ssids || !request->n_match_sets) {
brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
- request ? request->n_ssids : 0);
+ request->n_ssids);
return -EINVAL;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index d3d4151c3ed..cba19d839b7 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -43,6 +43,10 @@ BRCMSMAC_OFILES := \
brcms_trace_events.o \
debug.o
+ifdef CONFIG_BCMA_DRIVER_GPIO
+BRCMSMAC_OFILES += led.o
+endif
+
MODULEPFX := brcmsmac
obj-$(CONFIG_BRCMSMAC) += $(MODULEPFX).o
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.c b/drivers/net/wireless/brcm80211/brcmsmac/led.c
new file mode 100644
index 00000000000..74b17cecb18
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/led.c
@@ -0,0 +1,126 @@
+#include <net/mac80211.h>
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/gpio.h>
+
+#include "mac80211_if.h"
+#include "pub.h"
+#include "main.h"
+#include "led.h"
+
+ /* number of leds */
+#define BRCMS_LED_NO 4
+ /* behavior mask */
+#define BRCMS_LED_BEH_MASK 0x7f
+ /* activelow (polarity) bit */
+#define BRCMS_LED_AL_MASK 0x80
+ /* radio enabled */
+#define BRCMS_LED_RADIO 3
+
+static void brcms_radio_led_ctrl(struct brcms_info *wl, bool state)
+{
+ if (wl->radio_led.gpio == -1)
+ return;
+
+ if (wl->radio_led.active_low)
+ state = !state;
+
+ if (state)
+ gpio_set_value(wl->radio_led.gpio, 1);
+ else
+ gpio_set_value(wl->radio_led.gpio, 0);
+}
+
+
+/* Callback from the LED subsystem. */
+static void brcms_led_brightness_set(struct led_classdev *led_dev,
+ enum led_brightness brightness)
+{
+ struct brcms_info *wl = container_of(led_dev,
+ struct brcms_info, led_dev);
+ brcms_radio_led_ctrl(wl, brightness);
+}
+
+void brcms_led_unregister(struct brcms_info *wl)
+{
+ if (wl->led_dev.dev)
+ led_classdev_unregister(&wl->led_dev);
+ if (wl->radio_led.gpio != -1)
+ gpio_free(wl->radio_led.gpio);
+}
+
+int brcms_led_register(struct brcms_info *wl)
+{
+ int i, err;
+ struct brcms_led *radio_led = &wl->radio_led;
+ /* get CC core */
+ struct bcma_drv_cc *cc_drv = &wl->wlc->hw->d11core->bus->drv_cc;
+ struct gpio_chip *bcma_gpio = &cc_drv->gpio;
+ struct ssb_sprom *sprom = &wl->wlc->hw->d11core->bus->sprom;
+ u8 *leds[] = { &sprom->gpio0,
+ &sprom->gpio1,
+ &sprom->gpio2,
+ &sprom->gpio3 };
+ unsigned gpio = -1;
+ bool active_low = false;
+
+ /* none by default */
+ radio_led->gpio = -1;
+ radio_led->active_low = false;
+
+ if (!bcma_gpio || !gpio_is_valid(bcma_gpio->base))
+ return -ENODEV;
+
+ /* find radio enabled LED */
+ for (i = 0; i < BRCMS_LED_NO; i++) {
+ u8 led = *leds[i];
+ if ((led & BRCMS_LED_BEH_MASK) == BRCMS_LED_RADIO) {
+ gpio = bcma_gpio->base + i;
+ if (led & BRCMS_LED_AL_MASK)
+ active_low = true;
+ break;
+ }
+ }
+
+ if (gpio == -1 || !gpio_is_valid(gpio))
+ return -ENODEV;
+
+ /* request and configure LED gpio */
+ err = gpio_request_one(gpio,
+ active_low ? GPIOF_OUT_INIT_HIGH
+ : GPIOF_OUT_INIT_LOW,
+ "radio on");
+ if (err) {
+ wiphy_err(wl->wiphy, "requesting led gpio %d failed (err: %d)\n",
+ gpio, err);
+ return err;
+ }
+ err = gpio_direction_output(gpio, 1);
+ if (err) {
+ wiphy_err(wl->wiphy, "cannot set led gpio %d to output (err: %d)\n",
+ gpio, err);
+ return err;
+ }
+
+ snprintf(wl->radio_led.name, sizeof(wl->radio_led.name),
+ "brcmsmac-%s:radio", wiphy_name(wl->wiphy));
+
+ wl->led_dev.name = wl->radio_led.name;
+ wl->led_dev.default_trigger =
+ ieee80211_get_radio_led_name(wl->pub->ieee_hw);
+ wl->led_dev.brightness_set = brcms_led_brightness_set;
+ err = led_classdev_register(wiphy_dev(wl->wiphy), &wl->led_dev);
+
+ if (err) {
+ wiphy_err(wl->wiphy, "cannot register led device: %s (err: %d)\n",
+ wl->radio_led.name, err);
+ return err;
+ }
+
+ wiphy_info(wl->wiphy, "registered radio enabled led device: %s gpio: %d\n",
+ wl->radio_led.name,
+ gpio);
+ radio_led->gpio = gpio;
+ radio_led->active_low = active_low;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.h b/drivers/net/wireless/brcm80211/brcmsmac/led.h
new file mode 100644
index 00000000000..17a0b1f5dbc
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/led.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCM_LED_H_
+#define _BRCM_LED_H_
+struct brcms_led {
+ char name[32];
+ unsigned gpio;
+ bool active_low;
+};
+
+#ifdef CONFIG_BCMA_DRIVER_GPIO
+void brcms_led_unregister(struct brcms_info *wl);
+int brcms_led_register(struct brcms_info *wl);
+#else
+static inline void brcms_led_unregister(struct brcms_info *wl) {};
+static inline int brcms_led_register(struct brcms_info *wl)
+{
+ return -ENOTSUPP;
+};
+#endif
+
+#endif /* _BRCM_LED_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index c6451c61407..c70cf7b654c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -34,6 +34,7 @@
#include "mac80211_if.h"
#include "main.h"
#include "debug.h"
+#include "led.h"
#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
@@ -904,6 +905,7 @@ static void brcms_remove(struct bcma_device *pdev)
struct brcms_info *wl = hw->priv;
if (wl->wlc) {
+ brcms_led_unregister(wl);
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
ieee80211_unregister_hw(hw);
@@ -1151,6 +1153,8 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
pr_err("%s: brcms_attach failed!\n", __func__);
return -ENODEV;
}
+ brcms_led_register(wl);
+
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 947ccacf43e..4090032e81a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -20,8 +20,10 @@
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
+#include <linux/leds.h>
#include "ucode_loader.h"
+#include "led.h"
/*
* Starting index for 5G rates in the
* legacy rate table.
@@ -81,6 +83,8 @@ struct brcms_info {
struct wiphy *wiphy;
struct brcms_ucode ucode;
bool mute_tx;
+ struct brcms_led radio_led;
+ struct led_classdev led_dev;
};
/* misc callbacks */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 8ef02dca8f8..0c8e998bfb1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7810,9 +7810,14 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
/* read the ucode version if we have not yet done so */
if (wlc->ucode_rev == 0) {
- wlc->ucode_rev =
- brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR) << NBITS(u16);
- wlc->ucode_rev |= brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR);
+ u16 rev;
+ u16 patch;
+
+ rev = brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR);
+ patch = brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR);
+ wlc->ucode_rev = (rev << NBITS(u16)) | patch;
+ snprintf(wlc->wiphy->fw_version,
+ sizeof(wlc->wiphy->fw_version), "%u.%u", rev, patch);
}
/* ..now really unleash hell (allow the MAC out of suspend) */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index 3e6405e06ac..bf5e50fc21b 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -116,6 +116,31 @@ struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
}
EXPORT_SYMBOL(brcmu_pktq_pdeq);
+/*
+ * precedence based dequeue with match function. Passing a NULL pointer
+ * for the match function parameter is considered to be a wildcard so
+ * any packet on the queue is returned. In that case it is no different
+ * from brcmu_pktq_pdeq() above.
+ */
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+ bool (*match_fn)(struct sk_buff *skb,
+ void *arg), void *arg)
+{
+ struct sk_buff_head *q;
+ struct sk_buff *p, *next;
+
+ q = &pq->q[prec].skblist;
+ skb_queue_walk_safe(q, p, next) {
+ if (match_fn == NULL || match_fn(p, arg)) {
+ skb_unlink(p, q);
+ pq->len--;
+ return p;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(brcmu_pktq_pdeq_match);
+
struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
{
struct sk_buff_head *q;
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 477b92ad3d6..898cacb8d01 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -120,6 +120,10 @@ extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
struct sk_buff *p);
extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+ bool (*match_fn)(struct sk_buff *p,
+ void *arg),
+ void *arg);
/* packet primitives */
extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
@@ -173,6 +177,29 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
/* ip address */
struct ipv4_addr;
+/*
+ * bitfield macros using masking and shift
+ *
+ * remark: the mask parameter should be a shifted mask.
+ */
+static inline void brcmu_maskset32(u32 *var, u32 mask, u8 shift, u32 value)
+{
+ value = (value << shift) & mask;
+ *var = (*var & ~mask) | value;
+}
+static inline u32 brcmu_maskget32(u32 var, u32 mask, u8 shift)
+{
+ return (var & mask) >> shift;
+}
+static inline void brcmu_maskset16(u16 *var, u16 mask, u8 shift, u16 value)
+{
+ value = (value << shift) & mask;
+ *var = (*var & ~mask) | value;
+}
+static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
+{
+ return (var & mask) >> shift;
+}
/* externs */
/* format/print */
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index c353b5f19c8..b37a582ccbe 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3477,7 +3477,7 @@ static struct attribute_group il3945_attribute_group = {
.attrs = il3945_sysfs_entries,
};
-struct ieee80211_ops il3945_mac_ops = {
+static struct ieee80211_ops il3945_mac_ops __read_mostly = {
.tx = il3945_mac_tx,
.start = il3945_mac_start,
.stop = il3945_mac_stop,
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index e0b9d7fa5de..dc1e6da9976 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -2379,10 +2379,8 @@ il3945_hw_set_hw_params(struct il_priv *il)
il->_3945.shared_virt =
dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
&il->_3945.shared_phys, GFP_KERNEL);
- if (!il->_3945.shared_virt) {
- IL_ERR("failed to allocate pci memory\n");
+ if (!il->_3945.shared_virt)
return -ENOMEM;
- }
il->hw_params.bcast_id = IL3945_BROADCAST_ID;
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 1d45075e0d5..9a8703def0b 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -150,10 +150,6 @@ struct il3945_frame {
struct list_head list;
};
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7941eb3a016..6affa7e8f01 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -612,7 +612,7 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
/* Called for N_RX (legacy ABG frames), or
* N_RX_MPDU (HT high-throughput N frames). */
-void
+static void
il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
{
struct ieee80211_hdr *header;
@@ -744,7 +744,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
* This will be used later in il_hdl_rx() for N_RX_MPDU. */
-void
+static void
il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1250,7 +1250,7 @@ il4965_dump_fh(struct il_priv *il, char **buf, bool display)
return 0;
}
-void
+static void
il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1357,7 +1357,7 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
}
#endif
-void
+static void
il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
{
const int recalib_seconds = 60;
@@ -1399,7 +1399,7 @@ il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
il4965_temperature_calib(il);
}
-void
+static void
il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1921,8 +1921,8 @@ drop_unlock:
static inline int
il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
{
- ptr->addr =
- dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+ ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
@@ -2050,7 +2050,7 @@ il4965_txq_ctx_reset(struct il_priv *il)
il_tx_queue_reset(il, txq_id);
}
-void
+static void
il4965_txq_ctx_unmap(struct il_priv *il)
{
int txq_id;
@@ -2258,7 +2258,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
spin_lock_irqsave(&il->sta_lock, flags);
tid_data = &il->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
+ *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->agg.txq_id = txq_id;
il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -2408,7 +2408,7 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
/* aggregated HW queue */
if (txq_id == tid_data->agg.txq_id &&
q->read_ptr == q->write_ptr) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
int tx_fifo = il4965_get_fifo_from_tid(tid);
D_HT("HW queue empty: continue DELBA flow\n");
il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
@@ -2627,7 +2627,8 @@ il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
static inline u32
il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
{
- return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+ return le32_to_cpup(&tx_resp->u.status +
+ tx_resp->frame_count) & IEEE80211_MAX_SN;
}
static inline u32
@@ -2717,15 +2718,15 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
hdr = (struct ieee80211_hdr *) skb->data;
sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
IL_ERR("BUG_ON idx doesn't match seq control"
" idx=%d, seq_idx=%d, seq=%d\n", idx,
- SEQ_TO_SN(sc), hdr->seq_ctrl);
+ IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
return -1;
}
D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
- SEQ_TO_SN(sc));
+ IEEE80211_SEQ_TO_SN(sc));
sh = idx - start;
if (sh > 64) {
@@ -2895,7 +2896,7 @@ il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
* Handles block-acknowledge notification from device, which reports success
* of frames sent via aggregation.
*/
-void
+static void
il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
{
struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -6316,7 +6317,7 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
-const struct ieee80211_ops il4965_mac_ops = {
+static const struct ieee80211_ops il4965_mac_ops = {
.tx = il4965_mac_tx,
.start = il4965_mac_start,
.stop = il4965_mac_stop,
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e006ea83132..5b79819d7be 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1122,7 +1122,7 @@ il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
sizeof(struct il_powertable_cmd), cmd);
}
-int
+static int
il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
{
int ret;
@@ -2566,15 +2566,13 @@ il_rx_queue_alloc(struct il_priv *il)
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd =
- dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
- GFP_KERNEL);
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- rxq->rb_stts =
- dma_alloc_coherent(dev, sizeof(struct il_rb_status),
- &rxq->rb_stts_dma, GFP_KERNEL);
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb;
@@ -2941,10 +2939,9 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
* shared with device */
txq->tfds =
dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
- if (!txq->tfds) {
- IL_ERR("Fail to alloc TFDs\n");
+ if (!txq->tfds)
goto error;
- }
+
txq->q.id = id;
return 0;
@@ -4891,7 +4888,7 @@ il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
}
EXPORT_SYMBOL(il_add_beacon_time);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int
il_pci_suspend(struct device *device)
@@ -4942,7 +4939,7 @@ il_pci_resume(struct device *device)
SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
EXPORT_SYMBOL(il_pm_ops);
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static void
il_update_qos(struct il_priv *il)
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 96f2025d936..10986aaf908 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -541,10 +541,6 @@ struct il_frame {
struct list_head list;
};
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
enum {
CMD_SYNC = 0,
CMD_SIZE_NORMAL = 0,
@@ -2235,9 +2231,8 @@ il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
return -EINVAL;
}
- desc->v_addr =
- dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
- GFP_KERNEL);
+ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+ &desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ba319cba3f1..56c2040a955 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,7 +6,6 @@ config IWLWIFI
select LEDS_CLASS
select LEDS_TRIGGERS
select MAC80211_LEDS
- select IWLDVM
---help---
Select to build the driver supporting the:
@@ -45,6 +44,7 @@ config IWLWIFI
config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
depends on IWLWIFI
+ default IWLWIFI
help
This is the driver supporting the DVM firmware which is
currently the only firmware available for existing devices.
@@ -58,6 +58,15 @@ config IWLMVM
Say yes if you have such a device.
+# don't call it _MODULE -- will confuse Kconfig/fixdep/...
+config IWLWIFI_OPMODE_MODULAR
+ bool
+ default y if IWLDVM=m
+ default y if IWLMVM=m
+
+comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
+ depends on IWLWIFI && IWLDVM=n && IWLMVM=n
+
menu "Debugging Options"
depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 6c7800044a0..3b5613ea458 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -7,8 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
-iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
-iwlwifi-objs += pcie/7000.o
+iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 41ec27cb6ef..019d433900e 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 6468de8634b..d6c4cf2ad7c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index 65e920cab2b..cfddde19494 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 84e2c0fcfef..95ca026ecc9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -1526,6 +1526,7 @@ struct iwl_compressed_ba_resp {
__le16 scd_ssn;
u8 txed; /* number of frames sent */
u8 txed_2_done; /* number of frames acked */
+ __le16 reserved1;
} __packed;
/*
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 20806cae11b..7b8178be119 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -19,7 +19,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -2324,6 +2324,28 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ bool restart_fw = iwlwifi_mod_params.restart_fw;
+ int ret;
+
+ iwlwifi_mod_params.restart_fw = true;
+
+ mutex_lock(&priv->mutex);
+
+ /* take the return value to make compiler happy - it will fail anyway */
+ ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL);
+
+ mutex_unlock(&priv->mutex);
+
+ iwlwifi_mod_params.restart_fw = restart_fw;
+
+ return count;
+}
+
DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2343,6 +2365,7 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
DEBUGFS_READ_FILE_OPS(reply_tx_error);
DEBUGFS_WRITE_FILE_OPS(echo_test);
+DEBUGFS_WRITE_FILE_OPS(fw_restart);
#ifdef CONFIG_IWLWIFI_DEBUG
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
#endif
@@ -2400,6 +2423,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fw_restart, dir_debug, S_IWUSR);
#ifdef CONFIG_IWLWIFI_DEBUG
DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
#endif
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 44ca0e57f9f..87c006c9c57 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -19,7 +19,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 323e4a33fca..c7cd2dffa5c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1137,7 +1137,8 @@ done:
static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *channel,
- int duration)
+ int duration,
+ enum ieee80211_roc_type type)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 3a4aa5239c4..d69b5586671 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -19,7 +19,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
index dc6f965a123..b89b9d9b996 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 6aec2df3bb2..cc1e0c1a6f4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -19,7 +19,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -418,7 +418,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
" Tx flags = 0x%08x, agg.state = %d",
info->flags, tid_data->agg.state);
IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
- sta_id, tid, SEQ_TO_SN(tid_data->seq_number));
+ sta_id, tid,
+ IEEE80211_SEQ_TO_SN(tid_data->seq_number));
goto drop_unlock_sta;
}
@@ -569,7 +570,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
return 0;
}
- tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
/* There are still packets for this RA / TID in the HW */
if (!test_bit(txq_id, priv->agg_q_alloc)) {
@@ -651,7 +652,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid];
- tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->agg.txq_id = txq_id;
*ssn = tid_data->agg.ssn;
@@ -911,7 +912,7 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
{
return le32_to_cpup((__le32 *)&tx_resp->status +
- tx_resp->frame_count) & MAX_SN;
+ tx_resp->frame_count) & IEEE80211_MAX_SN;
}
static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
@@ -1148,7 +1149,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (tx_resp->frame_count == 1) {
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
- next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
+ next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
if (is_agg) {
/* If this is an aggregation queue, we can rely on the
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 1a4ac9236a4..0a1cdc5e856 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -19,7 +19,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index ff338975728..c080ae3070b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -29,7 +29,6 @@
#include "iwl-config.h"
#include "iwl-csr.h"
#include "iwl-agn-hw.h"
-#include "cfg.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 5
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index e7de33128b1..a6ddd2f9fba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -28,7 +28,6 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
-#include "cfg.h"
#include "dvm/commands.h" /* needed for BT for now */
/* Highest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 5096f7c96ab..403f3f224bf 100644
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -29,7 +29,6 @@
#include "iwl-config.h"
#include "iwl-agn-hw.h"
#include "iwl-csr.h"
-#include "cfg.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 801ff49796d..b5ab8d1bcac 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -28,7 +28,6 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
-#include "cfg.h"
#include "dvm/commands.h" /* needed for BT for now */
/* Highest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/pcie/7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 6e35b2b7233..50263e87fe1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -1,34 +1,70 @@
/******************************************************************************
*
- * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
*
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*****************************************************************************/
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-agn-hw.h"
-#include "cfg.h"
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 6
@@ -70,7 +106,6 @@ static const struct iwl_base_params iwl7000_base_params = {
};
static const struct iwl_ht_params iwl7000_ht_params = {
- .ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index e9975c54c27..6d73f943cef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 743b4834335..c38aa8f7755 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -275,4 +275,51 @@ struct iwl_cfg {
const bool temp_offset_v2;
};
+/*
+ * This list declares the config structures for all devices.
+ */
+extern const struct iwl_cfg iwl5300_agn_cfg;
+extern const struct iwl_cfg iwl5100_agn_cfg;
+extern const struct iwl_cfg iwl5350_agn_cfg;
+extern const struct iwl_cfg iwl5100_bgn_cfg;
+extern const struct iwl_cfg iwl5100_abg_cfg;
+extern const struct iwl_cfg iwl5150_agn_cfg;
+extern const struct iwl_cfg iwl5150_abg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_cfg;
+extern const struct iwl_cfg iwl6005_2abg_cfg;
+extern const struct iwl_cfg iwl6005_2bg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
+extern const struct iwl_cfg iwl6005_2agn_d_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
+extern const struct iwl_cfg iwl1030_bgn_cfg;
+extern const struct iwl_cfg iwl1030_bg_cfg;
+extern const struct iwl_cfg iwl6030_2agn_cfg;
+extern const struct iwl_cfg iwl6030_2abg_cfg;
+extern const struct iwl_cfg iwl6030_2bgn_cfg;
+extern const struct iwl_cfg iwl6030_2bg_cfg;
+extern const struct iwl_cfg iwl6000i_2agn_cfg;
+extern const struct iwl_cfg iwl6000i_2abg_cfg;
+extern const struct iwl_cfg iwl6000i_2bg_cfg;
+extern const struct iwl_cfg iwl6000_3agn_cfg;
+extern const struct iwl_cfg iwl6050_2agn_cfg;
+extern const struct iwl_cfg iwl6050_2abg_cfg;
+extern const struct iwl_cfg iwl6150_bgn_cfg;
+extern const struct iwl_cfg iwl6150_bg_cfg;
+extern const struct iwl_cfg iwl1000_bgn_cfg;
+extern const struct iwl_cfg iwl1000_bg_cfg;
+extern const struct iwl_cfg iwl100_bgn_cfg;
+extern const struct iwl_cfg iwl100_bg_cfg;
+extern const struct iwl_cfg iwl130_bgn_cfg;
+extern const struct iwl_cfg iwl130_bg_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
+extern const struct iwl_cfg iwl2030_2bgn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl105_bgn_cfg;
+extern const struct iwl_cfg iwl105_bgn_d_cfg;
+extern const struct iwl_cfg iwl135_bgn_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl3160_ac_cfg;
+
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index df3463a3870..20e845d4da0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 87535a67de7..8a44f594528 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -66,6 +66,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/export.h>
+#include "iwl-drv.h"
#include "iwl-debug.h"
#include "iwl-devtrace.h"
@@ -85,11 +86,11 @@ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
}
__iwl_fn(warn)
-EXPORT_SYMBOL_GPL(__iwl_warn);
+IWL_EXPORT_SYMBOL(__iwl_warn);
__iwl_fn(info)
-EXPORT_SYMBOL_GPL(__iwl_info);
+IWL_EXPORT_SYMBOL(__iwl_info);
__iwl_fn(crit)
-EXPORT_SYMBOL_GPL(__iwl_crit);
+IWL_EXPORT_SYMBOL(__iwl_crit);
void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
const char *fmt, ...)
@@ -110,7 +111,7 @@ void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
trace_iwlwifi_err(&vaf);
va_end(args);
}
-EXPORT_SYMBOL_GPL(__iwl_err);
+IWL_EXPORT_SYMBOL(__iwl_err);
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
@@ -133,5 +134,5 @@ void __iwl_dbg(struct device *dev,
trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
va_end(args);
}
-EXPORT_SYMBOL_GPL(__iwl_dbg);
+IWL_EXPORT_SYMBOL(__iwl_dbg);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 81aa91fab5a..4491c1c72cc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -298,7 +298,7 @@ TRACE_EVENT(iwlwifi_dbg,
MAX_MSG_LEN, vaf->fmt,
*vaf->va) >= MAX_MSG_LEN);
),
- TP_printk("%s", (char *)__get_dynamic_array(msg))
+ TP_printk("%s", __get_str(msg))
);
#undef TRACE_SYSTEM
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index fbfd2d13711..3ce4e9d5082 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -1102,7 +1102,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
/* shared module parameters */
struct iwl_mod_params iwlwifi_mod_params = {
- .restart_fw = 1,
+ .restart_fw = true,
.plcp_check = true,
.bt_coex_active = true,
.power_level = IWL_POWER_INDEX_1,
@@ -1111,7 +1111,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.wd_disable = true,
/* the rest are 0 by default */
};
-EXPORT_SYMBOL_GPL(iwlwifi_mod_params);
+IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
{
@@ -1135,7 +1135,7 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
mutex_unlock(&iwlwifi_opmode_table_mtx);
return -EIO;
}
-EXPORT_SYMBOL_GPL(iwl_opmode_register);
+IWL_EXPORT_SYMBOL(iwl_opmode_register);
void iwl_opmode_deregister(const char *name)
{
@@ -1157,7 +1157,7 @@ void iwl_opmode_deregister(const char *name)
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
}
-EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
+IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
static int __init iwl_drv_init(void)
{
@@ -1207,8 +1207,8 @@ MODULE_PARM_DESC(11n_disable,
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
-module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
int, S_IRUGO);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 594a5c71b27..7d145091630 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,8 @@
#ifndef __iwl_drv_h__
#define __iwl_drv_h__
+#include <linux/module.h>
+
/* for all modules */
#define DRV_NAME "iwlwifi"
#define IWLWIFI_VERSION "in-tree:"
@@ -123,4 +125,17 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
*/
void iwl_drv_stop(struct iwl_drv *drv);
+/*
+ * exported symbol management
+ *
+ * The driver can be split into multiple modules, in which case some symbols
+ * must be exported for the sub-modules. However, if it's not split and
+ * everything is built-in, then we can avoid that.
+ */
+#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
+#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
+#else
+#define IWL_EXPORT_SYMBOL(sym)
+#endif
+
#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 034f2ff4f43..600c9fdd7f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,6 +62,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-eeprom-parse.h"
@@ -749,7 +750,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
}
ht_info->ht_supported = true;
- ht_info->cap = 0;
+ ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
if (iwlwifi_mod_params.amsdu_size_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
@@ -909,7 +910,7 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
kfree(data);
return NULL;
}
-EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
+IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
/* helper functions */
int iwl_nvm_check_version(struct iwl_nvm_data *data,
@@ -928,4 +929,4 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data,
data->calib_version, trans->cfg->nvm_calib_ver);
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(iwl_nvm_check_version);
+IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 683fe6a8c58..37f115390b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index ef4806f27cf..e5f2e362ab0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include "iwl-drv.h"
#include "iwl-debug.h"
#include "iwl-eeprom-read.h"
#include "iwl-io.h"
@@ -460,4 +461,4 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
return ret;
}
-EXPORT_SYMBOL_GPL(iwl_read_eeprom);
+IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index b2588c5cbf9..8e941f8bd7d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index f5592fb3b1e..484d318245f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 90873eca35f..8b6c6fd95ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index b545178e46e..43561857424 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -73,12 +73,14 @@
* treats good CRC threshold as a boolean
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
+ IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
};
/* The default calibrate table size if not specified by firmware file */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 276410d82de..305c81f2c2b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -29,6 +29,7 @@
#include <linux/device.h>
#include <linux/export.h>
+#include "iwl-drv.h"
#include "iwl-io.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
@@ -49,7 +50,7 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(iwl_poll_bit);
+IWL_EXPORT_SYMBOL(iwl_poll_bit);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
@@ -62,7 +63,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
return value;
}
-EXPORT_SYMBOL_GPL(iwl_read_direct32);
+IWL_EXPORT_SYMBOL(iwl_read_direct32);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
@@ -73,7 +74,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
iwl_trans_release_nic_access(trans, &flags);
}
}
-EXPORT_SYMBOL_GPL(iwl_write_direct32);
+IWL_EXPORT_SYMBOL(iwl_write_direct32);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
@@ -89,7 +90,7 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
+IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
@@ -115,7 +116,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
}
return val;
}
-EXPORT_SYMBOL_GPL(iwl_read_prph);
+IWL_EXPORT_SYMBOL(iwl_read_prph);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
@@ -126,7 +127,7 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
iwl_trans_release_nic_access(trans, &flags);
}
}
-EXPORT_SYMBOL_GPL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
@@ -138,7 +139,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
iwl_trans_release_nic_access(trans, &flags);
}
}
-EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
+IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask)
@@ -151,7 +152,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
iwl_trans_release_nic_access(trans, &flags);
}
}
-EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
+IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
@@ -164,4 +165,4 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
iwl_trans_release_nic_access(trans, &flags);
}
}
-EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
+IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 2c2a729092f..3cc39ffe8ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -109,7 +109,7 @@ struct iwl_mod_params {
int sw_crypto;
unsigned int disable_11n;
int amsdu_size_8K;
- int restart_fw;
+ bool restart_fw;
bool plcp_check;
int wd_disable;
bool bt_coex_active;
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index c3affbc62cd..940b8a9d528 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,7 @@
#include <linux/sched.h>
#include <linux/export.h>
+#include "iwl-drv.h"
#include "iwl-notif-wait.h"
@@ -72,7 +73,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
INIT_LIST_HEAD(&notif_wait->notif_waits);
init_waitqueue_head(&notif_wait->notif_waitq);
}
-EXPORT_SYMBOL_GPL(iwl_notification_wait_init);
+IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt)
@@ -117,7 +118,7 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
if (triggered)
wake_up_all(&notif_wait->notif_waitq);
}
-EXPORT_SYMBOL_GPL(iwl_notification_wait_notify);
+IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
@@ -130,7 +131,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
wake_up_all(&notif_wait->notif_waitq);
}
-EXPORT_SYMBOL_GPL(iwl_abort_notification_waits);
+IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
void
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
@@ -154,7 +155,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
list_add(&wait_entry->list, &notif_wait->notif_waits);
spin_unlock_bh(&notif_wait->notif_wait_lock);
}
-EXPORT_SYMBOL_GPL(iwl_init_notification_wait);
+IWL_EXPORT_SYMBOL(iwl_init_notification_wait);
int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry,
@@ -178,7 +179,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
return -ETIMEDOUT;
return 0;
}
-EXPORT_SYMBOL_GPL(iwl_wait_notification);
+IWL_EXPORT_SYMBOL(iwl_wait_notification);
void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry)
@@ -187,4 +188,4 @@ void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
list_del(&wait_entry->list);
spin_unlock_bh(&notif_wait->notif_wait_lock);
}
-EXPORT_SYMBOL_GPL(iwl_remove_notification);
+IWL_EXPORT_SYMBOL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index c2ce764463a..2e2f1c8c99f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index a70213bdb83..6199a0a597a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,6 +62,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
@@ -149,6 +150,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
* @NVM_CHANNEL_DFS: dynamic freq selection candidate
* @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
* @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
+ * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
*/
enum iwl_nvm_channel_flags {
NVM_CHANNEL_VALID = BIT(0),
@@ -158,6 +161,8 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_DFS = BIT(7),
NVM_CHANNEL_WIDE = BIT(8),
NVM_CHANNEL_40MHZ = BIT(9),
+ NVM_CHANNEL_80MHZ = BIT(10),
+ NVM_CHANNEL_160MHZ = BIT(11),
};
#define CHECK_AND_PRINT_I(x) \
@@ -210,6 +215,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
else
channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
}
+ if (!(ch_flags & NVM_CHANNEL_80MHZ))
+ channel->flags |= IEEE80211_CHAN_NO_80MHZ;
+ if (!(ch_flags & NVM_CHANNEL_160MHZ))
+ channel->flags |= IEEE80211_CHAN_NO_160MHZ;
if (!(ch_flags & NVM_CHANNEL_IBSS))
channel->flags |= IEEE80211_CHAN_NO_IBSS;
@@ -245,6 +254,43 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
+static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_vht_cap *vht_cap)
+{
+ /* For now, assume new devices with NVM are VHT capable */
+
+ vht_cap->vht_supported = true;
+
+ vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ if (iwlwifi_mod_params.amsdu_size_8K)
+ vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+
+ vht_cap->vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
+
+ if (data->valid_rx_ant == 1 || cfg->rx_with_siso_diversity) {
+ vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
+ IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+ /* this works because NOT_SUPPORTED == 3 */
+ vht_cap->vht_mcs.rx_mcs_map |=
+ cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
+ }
+
+ vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
+}
+
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data, const __le16 *nvm_sw)
{
@@ -268,6 +314,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
n_used += iwl_init_sband_channels(data, sband, n_channels,
IEEE80211_BAND_5GHZ);
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+ iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
@@ -343,4 +390,4 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
return data;
}
-EXPORT_SYMBOL_GPL(iwl_parse_nvm_data);
+IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index b2692bd287f..e57fb989661 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 4a680019e11..98c7aa7346d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 3392011a876..25745daa0d5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -65,6 +65,7 @@
#include <linux/string.h>
#include <linux/export.h>
+#include "iwl-drv.h"
#include "iwl-phy-db.h"
#include "iwl-debug.h"
#include "iwl-op-mode.h"
@@ -149,7 +150,7 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
/* TODO: add default values of the phy db. */
return phy_db;
}
-EXPORT_SYMBOL(iwl_phy_db_init);
+IWL_EXPORT_SYMBOL(iwl_phy_db_init);
/*
* get phy db section: returns a pointer to a phy db section specified by
@@ -215,7 +216,7 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
kfree(phy_db);
}
-EXPORT_SYMBOL(iwl_phy_db_free);
+IWL_EXPORT_SYMBOL(iwl_phy_db_free);
int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
gfp_t alloc_ctx)
@@ -260,7 +261,7 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
return 0;
}
-EXPORT_SYMBOL(iwl_phy_db_set_section);
+IWL_EXPORT_SYMBOL(iwl_phy_db_set_section);
static int is_valid_channel(u16 ch_id)
{
@@ -495,4 +496,4 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
"Finished sending phy db non channel data\n");
return 0;
}
-EXPORT_SYMBOL(iwl_send_phy_db_data);
+IWL_EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
index d0e43d96ab3..ce983af7964 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index f76e9cad775..386f2a7c87c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
index ce0c67b425e..efff2986b5b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -64,6 +64,7 @@
#include <linux/export.h>
#include <net/netlink.h>
+#include "iwl-drv.h"
#include "iwl-io.h"
#include "iwl-fh.h"
#include "iwl-prph.h"
@@ -653,7 +654,7 @@ int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
}
return 0;
}
-EXPORT_SYMBOL_GPL(iwl_test_parse);
+IWL_EXPORT_SYMBOL(iwl_test_parse);
/*
* Handle test commands.
@@ -715,7 +716,7 @@ int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
}
return result;
}
-EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
+IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
struct netlink_callback *cb)
@@ -803,7 +804,7 @@ int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
}
return result;
}
-EXPORT_SYMBOL_GPL(iwl_test_dump);
+IWL_EXPORT_SYMBOL(iwl_test_dump);
/*
* Multicast a spontaneous messages from the device to the user space.
@@ -849,4 +850,4 @@ void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
if (tst->notify)
iwl_test_send_rx(tst, rxb);
}
-EXPORT_SYMBOL_GPL(iwl_test_rx);
+IWL_EXPORT_SYMBOL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
index 7fbf4d717ca..8fbd2170484 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index a963f45c684..98f48a9afc9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0cac2b7af78..7f9c254292a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -114,9 +114,6 @@
* completely agnostic to these differences.
* The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
*/
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
#define SEQ_TO_INDEX(s) ((s) & 0xff)
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 807b250ec39..2acc44b4098 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o
+iwlmvm-y += power.o bt-coex.o
iwlmvm-y += led.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
index 73d24aacb90..93fd1457954 100644
--- a/drivers/net/wireless/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
new file mode 100644
index 00000000000..47954deb649
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -0,0 +1,347 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "fw-api-bt-coex.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-debug.h"
+
+#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
+ [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
+ ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
+
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
+ BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
+ BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
+ BT_COEX_PRIO_TBL_PRIO_LOW, 0),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
+ BT_COEX_PRIO_TBL_PRIO_LOW, 1),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
+ BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
+ BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
+ BT_COEX_PRIO_TBL_DISABLED, 0),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
+ BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
+ BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
+ EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
+ BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
+ 0, 0, 0, 0, 0, 0,
+};
+
+#undef EVENT_PRIO_ANT
+
+int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
+{
+ return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
+ sizeof(struct iwl_bt_coex_prio_tbl_cmd),
+ &iwl_bt_prio_tbl);
+}
+
+static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
+{
+ struct iwl_bt_coex_prot_env_cmd env_cmd;
+ int ret;
+
+ env_cmd.action = action;
+ env_cmd.type = type;
+ ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
+ sizeof(env_cmd), &env_cmd);
+ if (ret)
+ IWL_ERR(mvm, "failed to send BT env command\n");
+ return ret;
+}
+
+enum iwl_bt_kill_msk {
+ BT_KILL_MSK_DEFAULT,
+ BT_KILL_MSK_SCO_HID_A2DP,
+ BT_KILL_MSK_REDUCED_TXPOW,
+ BT_KILL_MSK_MAX,
+};
+
+static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
+ 0xffffffff,
+ 0xfffffc00,
+ 0,
+};
+
+static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
+ 0xffffffff,
+ 0xfffffc00,
+ 0,
+};
+
+#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
+
+/* Tight Coex */
+static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaeaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xc0004000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+};
+
+/* Loose Coex */
+static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaeaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xcc00ff28),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0xcc00aaaa),
+ cpu_to_le32(0x0000aaaa),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0xf0005000),
+ cpu_to_le32(0xf0005000),
+};
+
+/* Full concurrency */
+static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0xaaaaaaaa),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000),
+ cpu_to_le32(0x00000000),
+};
+
+/* BT Antenna Coupling Threshold (dB) */
+#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
+
+int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
+{
+ struct iwl_bt_coex_cmd cmd = {
+ .max_kill = 5,
+ .bt3_time_t7_value = 1,
+ .bt3_prio_sample_time = 2,
+ .bt3_timer_t2_value = 0xc,
+ };
+ int ret;
+
+ cmd.flags = iwlwifi_mod_params.bt_coex_active ?
+ BT_COEX_NW : BT_COEX_DISABLE;
+ cmd.flags |= iwlwifi_mod_params.bt_ch_announce ?
+ BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN : 0;
+ cmd.flags |= BT_SYNC_2_BT_DISABLE;
+
+ cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+ BT_VALID_BT_PRIO_BOOST |
+ BT_VALID_MAX_KILL |
+ BT_VALID_3W_TMRS |
+ BT_VALID_KILL_ACK |
+ BT_VALID_KILL_CTS |
+ BT_VALID_REDUCED_TX_POWER |
+ BT_VALID_LUT);
+
+ if (iwlwifi_mod_params.ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD)
+ memcpy(&cmd.decision_lut, iwl_loose_lookup,
+ sizeof(iwl_tight_lookup));
+ else
+ memcpy(&cmd.decision_lut, iwl_tight_lookup,
+ sizeof(iwl_tight_lookup));
+
+ cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+ cmd.kill_ack_msk =
+ cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
+ cmd.kill_cts_msk =
+ cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
+
+ /* go to CALIB state in internal BT-Coex state machine */
+ ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+
+ ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+
+struct iwl_bt_notif_iterator_data {
+ struct iwl_mvm *mvm;
+ struct iwl_bt_coex_profile_notif *notif;
+};
+
+static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_bt_notif_iterator_data *data = _data;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ enum ieee80211_smps_mode smps_mode;
+ enum ieee80211_band band;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf && chanctx_conf->def.chan)
+ band = chanctx_conf->def.chan->band;
+ else
+ band = -1;
+ rcu_read_unlock();
+
+ if (band != IEEE80211_BAND_2GHZ)
+ return;
+
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+ if (data->notif->bt_status)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+
+ if (data->notif->bt_traffic_load)
+ smps_mode = IEEE80211_SMPS_STATIC;
+
+ IWL_DEBUG_COEX(data->mvm,
+ "mac %d: bt_status %d traffic_load %d smps_req %d\n",
+ mvmvif->id, data->notif->bt_status,
+ data->notif->bt_traffic_load, smps_mode);
+
+ ieee80211_request_smps(vif, smps_mode);
+}
+
+int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *dev_cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+ struct iwl_bt_notif_iterator_data data = {
+ .mvm = mvm,
+ .notif = notif,
+ };
+ struct iwl_bt_coex_cmd cmd = {};
+ enum iwl_bt_kill_msk bt_kill_msk;
+
+ IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
+ IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
+ IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
+ IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
+ IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
+ notif->bt_agg_traffic_load);
+ IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+
+ /* remember this notification for future use: rssi fluctuations */
+ memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
+
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bt_notif_iterator, &data);
+
+ /* Low latency BT profile is active: give higher prio to BT */
+ if (BT_MBOX_MSG(notif, 3, SCO_STATE) ||
+ BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
+ BT_MBOX_MSG(notif, 3, SNIFF_STATE))
+ bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
+ else
+ bt_kill_msk = BT_KILL_MSK_DEFAULT;
+
+ /* Don't send HCMD if there is no update */
+ if (bt_kill_msk == mvm->bt_kill_msk)
+ return 0;
+
+ IWL_DEBUG_COEX(mvm,
+ "Udpate kill_msk: %d\n\t SCO %sactive A2DP %sactive SNIFF %sactive\n",
+ bt_kill_msk,
+ BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
+ BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
+ BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
+
+ mvm->bt_kill_msk = bt_kill_msk;
+ cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
+ cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+
+ cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+
+ if (iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to sent BT Coex CMD\n");
+
+ /* This handler is ASYNC */
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 994c8c263dc..d4578cefe44 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,8 +62,10 @@
*****************************************************************************/
#include <linux/etherdevice.h>
+#include <linux/ip.h>
#include <net/cfg80211.h>
#include <net/ipv6.h>
+#include <net/tcp.h>
#include "iwl-modparams.h"
#include "fw-api.h"
#include "mvm.h"
@@ -402,6 +404,233 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
sizeof(cmd), &cmd);
}
+enum iwl_mvm_tcp_packet_type {
+ MVM_TCP_TX_SYN,
+ MVM_TCP_RX_SYNACK,
+ MVM_TCP_TX_DATA,
+ MVM_TCP_RX_ACK,
+ MVM_TCP_RX_WAKE,
+ MVM_TCP_TX_FIN,
+};
+
+static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
+{
+ __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
+ return cpu_to_le16(be16_to_cpu((__force __be16)check));
+}
+
+static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_wowlan_tcp *tcp,
+ void *_pkt, u8 *mask,
+ __le16 *pseudo_hdr_csum,
+ enum iwl_mvm_tcp_packet_type ptype)
+{
+ struct {
+ struct ethhdr eth;
+ struct iphdr ip;
+ struct tcphdr tcp;
+ u8 data[];
+ } __packed *pkt = _pkt;
+ u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
+ int i;
+
+ pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
+ pkt->ip.version = 4;
+ pkt->ip.ihl = 5;
+ pkt->ip.protocol = IPPROTO_TCP;
+
+ switch (ptype) {
+ case MVM_TCP_TX_SYN:
+ case MVM_TCP_TX_DATA:
+ case MVM_TCP_TX_FIN:
+ memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
+ memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
+ pkt->ip.ttl = 128;
+ pkt->ip.saddr = tcp->src;
+ pkt->ip.daddr = tcp->dst;
+ pkt->tcp.source = cpu_to_be16(tcp->src_port);
+ pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
+ /* overwritten for TX SYN later */
+ pkt->tcp.doff = sizeof(struct tcphdr) / 4;
+ pkt->tcp.window = cpu_to_be16(65000);
+ break;
+ case MVM_TCP_RX_SYNACK:
+ case MVM_TCP_RX_ACK:
+ case MVM_TCP_RX_WAKE:
+ memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
+ memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
+ pkt->ip.saddr = tcp->dst;
+ pkt->ip.daddr = tcp->src;
+ pkt->tcp.source = cpu_to_be16(tcp->dst_port);
+ pkt->tcp.dest = cpu_to_be16(tcp->src_port);
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ switch (ptype) {
+ case MVM_TCP_TX_SYN:
+ /* firmware assumes 8 option bytes - 8 NOPs for now */
+ memset(pkt->data, 0x01, 8);
+ ip_tot_len += 8;
+ pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
+ pkt->tcp.syn = 1;
+ break;
+ case MVM_TCP_TX_DATA:
+ ip_tot_len += tcp->payload_len;
+ memcpy(pkt->data, tcp->payload, tcp->payload_len);
+ pkt->tcp.psh = 1;
+ pkt->tcp.ack = 1;
+ break;
+ case MVM_TCP_TX_FIN:
+ pkt->tcp.fin = 1;
+ pkt->tcp.ack = 1;
+ break;
+ case MVM_TCP_RX_SYNACK:
+ pkt->tcp.syn = 1;
+ pkt->tcp.ack = 1;
+ break;
+ case MVM_TCP_RX_ACK:
+ pkt->tcp.ack = 1;
+ break;
+ case MVM_TCP_RX_WAKE:
+ ip_tot_len += tcp->wake_len;
+ pkt->tcp.psh = 1;
+ pkt->tcp.ack = 1;
+ memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
+ break;
+ }
+
+ switch (ptype) {
+ case MVM_TCP_TX_SYN:
+ case MVM_TCP_TX_DATA:
+ case MVM_TCP_TX_FIN:
+ pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
+ pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
+ break;
+ case MVM_TCP_RX_WAKE:
+ for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
+ u8 tmp = tcp->wake_mask[i];
+ mask[i + 6] |= tmp << 6;
+ if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
+ mask[i + 7] = tmp >> 2;
+ }
+ /* fall through for ethernet/IP/TCP headers mask */
+ case MVM_TCP_RX_SYNACK:
+ case MVM_TCP_RX_ACK:
+ mask[0] = 0xff; /* match ethernet */
+ /*
+ * match ethernet, ip.version, ip.ihl
+ * the ip.ihl half byte is really masked out by firmware
+ */
+ mask[1] = 0x7f;
+ mask[2] = 0x80; /* match ip.protocol */
+ mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
+ mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
+ mask[5] = 0x80; /* match tcp flags */
+ /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
+ break;
+ };
+
+ *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
+ pkt->ip.saddr, pkt->ip.daddr);
+}
+
+static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_wowlan_tcp *tcp)
+{
+ struct iwl_wowlan_remote_wake_config *cfg;
+ struct iwl_host_cmd cmd = {
+ .id = REMOTE_WAKE_CONFIG_CMD,
+ .len = { sizeof(*cfg), },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ .flags = CMD_SYNC,
+ };
+ int ret;
+
+ if (!tcp)
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+ cmd.data[0] = cfg;
+
+ cfg->max_syn_retries = 10;
+ cfg->max_data_retries = 10;
+ cfg->tcp_syn_ack_timeout = 1; /* seconds */
+ cfg->tcp_ack_timeout = 1; /* seconds */
+
+ /* SYN (TX) */
+ iwl_mvm_build_tcp_packet(
+ mvm, vif, tcp, cfg->syn_tx.data, NULL,
+ &cfg->syn_tx.info.tcp_pseudo_header_checksum,
+ MVM_TCP_TX_SYN);
+ cfg->syn_tx.info.tcp_payload_length = 0;
+
+ /* SYN/ACK (RX) */
+ iwl_mvm_build_tcp_packet(
+ mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
+ &cfg->synack_rx.info.tcp_pseudo_header_checksum,
+ MVM_TCP_RX_SYNACK);
+ cfg->synack_rx.info.tcp_payload_length = 0;
+
+ /* KEEPALIVE/ACK (TX) */
+ iwl_mvm_build_tcp_packet(
+ mvm, vif, tcp, cfg->keepalive_tx.data, NULL,
+ &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
+ MVM_TCP_TX_DATA);
+ cfg->keepalive_tx.info.tcp_payload_length =
+ cpu_to_le16(tcp->payload_len);
+ cfg->sequence_number_offset = tcp->payload_seq.offset;
+ /* length must be 0..4, the field is little endian */
+ cfg->sequence_number_length = tcp->payload_seq.len;
+ cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
+ cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
+ if (tcp->payload_tok.len) {
+ cfg->token_offset = tcp->payload_tok.offset;
+ cfg->token_length = tcp->payload_tok.len;
+ cfg->num_tokens =
+ cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
+ memcpy(cfg->tokens, tcp->payload_tok.token_stream,
+ tcp->tokens_size);
+ } else {
+ /* set tokens to max value to almost never run out */
+ cfg->num_tokens = cpu_to_le16(65535);
+ }
+
+ /* ACK (RX) */
+ iwl_mvm_build_tcp_packet(
+ mvm, vif, tcp, cfg->keepalive_ack_rx.data,
+ cfg->keepalive_ack_rx.rx_mask,
+ &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
+ MVM_TCP_RX_ACK);
+ cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
+
+ /* WAKEUP (RX) */
+ iwl_mvm_build_tcp_packet(
+ mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
+ &cfg->wake_rx.info.tcp_pseudo_header_checksum,
+ MVM_TCP_RX_WAKE);
+ cfg->wake_rx.info.tcp_payload_length =
+ cpu_to_le16(tcp->wake_len);
+
+ /* FIN */
+ iwl_mvm_build_tcp_packet(
+ mvm, vif, tcp, cfg->fin_tx.data, NULL,
+ &cfg->fin_tx.info.tcp_pseudo_header_checksum,
+ MVM_TCP_TX_FIN);
+ cfg->fin_tx.info.tcp_payload_length = 0;
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(cfg);
+
+ return ret;
+}
+
struct iwl_d3_iter_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
@@ -640,6 +869,22 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
d3_cfg_cmd.wakeup_flags |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+ if (wowlan->tcp) {
+ /*
+ * The firmware currently doesn't really look at these, only
+ * the IWL_WOWLAN_WAKEUP_LINK_CHANGE bit. We have to set that
+ * reason bit since losing the connection to the AP implies
+ * losing the TCP connection.
+ * Set the flags anyway as long as they exist, in case this
+ * will be changed in the firmware.
+ */
+ wowlan_config_cmd.wakeup_filter |=
+ cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
+ IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
+ IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+ }
+
iwl_mvm_cancel_scan(mvm);
iwl_trans_stop_device(mvm->trans);
@@ -755,6 +1000,10 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (ret)
goto out;
+ ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
+ if (ret)
+ goto out;
+
/* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
@@ -874,6 +1123,15 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
wakeup.four_way_handshake = true;
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
+ wakeup.tcp_connlost = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
+ wakeup.tcp_nomoretokens = true;
+
+ if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
+ wakeup.tcp_match = true;
+
if (status->wake_packet_bufsize) {
int pktsize = le32_to_cpu(status->wake_packet_bufsize);
int pktlen = le32_to_cpu(status->wake_packet_length);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index c1bdb558212..b080b4ba545 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -69,12 +69,6 @@ struct iwl_dbgfs_mvm_ctx {
struct ieee80211_vif *vif;
};
-static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -306,10 +300,130 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
return count;
}
+#define BT_MBOX_MSG(_notif, _num, _field) \
+ ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
+ >> BT_MBOX##_num##_##_field##_POS)
+
+
+#define BT_MBOX_PRINT(_num, _field, _end) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ "\t%s: %d%s", \
+ #_field, \
+ BT_MBOX_MSG(notif, _num, _field), \
+ true ? "\n" : ", ");
+
+static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+ char *buf;
+ int ret, pos = 0, bufsz = sizeof(char) * 1024;
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
+
+ BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
+ BT_MBOX_PRINT(0, LE_PROF1, false);
+ BT_MBOX_PRINT(0, LE_PROF2, false);
+ BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
+ BT_MBOX_PRINT(0, CHL_SEQ_N, false);
+ BT_MBOX_PRINT(0, INBAND_S, false);
+ BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
+ BT_MBOX_PRINT(0, LE_SCAN, false);
+ BT_MBOX_PRINT(0, LE_ADV, false);
+ BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
+ BT_MBOX_PRINT(0, OPEN_CON_1, true);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
+
+ BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
+ BT_MBOX_PRINT(1, IP_SR, false);
+ BT_MBOX_PRINT(1, LE_MSTR, false);
+ BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
+ BT_MBOX_PRINT(1, MSG_TYPE, false);
+ BT_MBOX_PRINT(1, SSN, true);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
+
+ BT_MBOX_PRINT(2, SNIFF_ACT, false);
+ BT_MBOX_PRINT(2, PAG, false);
+ BT_MBOX_PRINT(2, INQUIRY, false);
+ BT_MBOX_PRINT(2, CONN, false);
+ BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
+ BT_MBOX_PRINT(2, DISC, false);
+ BT_MBOX_PRINT(2, SCO_TX_ACT, false);
+ BT_MBOX_PRINT(2, SCO_RX_ACT, false);
+ BT_MBOX_PRINT(2, ESCO_RE_TX, false);
+ BT_MBOX_PRINT(2, SCO_DURATION, true);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
+
+ BT_MBOX_PRINT(3, SCO_STATE, false);
+ BT_MBOX_PRINT(3, SNIFF_STATE, false);
+ BT_MBOX_PRINT(3, A2DP_STATE, false);
+ BT_MBOX_PRINT(3, ACL_STATE, false);
+ BT_MBOX_PRINT(3, MSTR_STATE, false);
+ BT_MBOX_PRINT(3, OBX_STATE, false);
+ BT_MBOX_PRINT(3, OPEN_CON_2, false);
+ BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
+ BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
+ BT_MBOX_PRINT(3, INBAND_P, false);
+ BT_MBOX_PRINT(3, MSG_TYPE_2, false);
+ BT_MBOX_PRINT(3, SSN_2, false);
+ BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
+ notif->bt_status);
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
+ notif->bt_open_conn);
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
+ notif->bt_traffic_load);
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
+ notif->bt_agg_traffic_load);
+ pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
+ notif->bt_ci_compliance);
+
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
+}
+#undef BT_MBOX_PRINT
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ bool restart_fw = iwlwifi_mod_params.restart_fw;
+ int ret;
+
+ iwlwifi_mod_params.restart_fw = true;
+
+ mutex_lock(&mvm->mutex);
+
+ /* take the return value to make compiler happy - it will fail anyway */
+ ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
+
+ mutex_unlock(&mvm->mutex);
+
+ iwlwifi_mod_params.restart_fw = restart_fw;
+
+ return count;
+}
+
#define MVM_DEBUGFS_READ_FILE_OPS(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
- .open = iwl_dbgfs_open_file_generic, \
+ .open = simple_open, \
.llseek = generic_file_llseek, \
}
@@ -317,14 +431,14 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
- .open = iwl_dbgfs_open_file_generic, \
+ .open = simple_open, \
.llseek = generic_file_llseek, \
};
#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
- .open = iwl_dbgfs_open_file_generic, \
+ .open = simple_open, \
.llseek = generic_file_llseek, \
};
@@ -345,8 +459,10 @@ MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
@@ -358,8 +474,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
/*
* Create a symlink with mac80211. It will be removed when mac80211
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
new file mode 100644
index 00000000000..05c61d6f384
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -0,0 +1,319 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_api_bt_coex_h__
+#define __fw_api_bt_coex_h__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define BITS(nb) (BIT(nb) - 1)
+
+/**
+ * enum iwl_bt_coex_flags - flags for BT_COEX command
+ * @BT_CH_PRIMARY_EN:
+ * @BT_CH_SECONDARY_EN:
+ * @BT_NOTIF_COEX_OFF:
+ * @BT_COEX_MODE_POS:
+ * @BT_COEX_MODE_MSK:
+ * @BT_COEX_DISABLE:
+ * @BT_COEX_2W:
+ * @BT_COEX_3W:
+ * @BT_COEX_NW:
+ * @BT_USE_DEFAULTS:
+ * @BT_SYNC_2_BT_DISABLE:
+ * @BT_COEX_CORUNNING_TBL_EN:
+ */
+enum iwl_bt_coex_flags {
+ BT_CH_PRIMARY_EN = BIT(0),
+ BT_CH_SECONDARY_EN = BIT(1),
+ BT_NOTIF_COEX_OFF = BIT(2),
+ BT_COEX_MODE_POS = 3,
+ BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
+ BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
+ BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
+ BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
+ BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
+ BT_USE_DEFAULTS = BIT(6),
+ BT_SYNC_2_BT_DISABLE = BIT(7),
+ /*
+ * For future use - when the flags will be enlarged
+ * BT_COEX_CORUNNING_TBL_EN = BIT(8),
+ */
+};
+
+/*
+ * indicates what has changed in the BT_COEX command.
+ */
+enum iwl_bt_coex_valid_bit_msk {
+ BT_VALID_ENABLE = BIT(0),
+ BT_VALID_BT_PRIO_BOOST = BIT(1),
+ BT_VALID_MAX_KILL = BIT(2),
+ BT_VALID_3W_TMRS = BIT(3),
+ BT_VALID_KILL_ACK = BIT(4),
+ BT_VALID_KILL_CTS = BIT(5),
+ BT_VALID_REDUCED_TX_POWER = BIT(6),
+ BT_VALID_LUT = BIT(7),
+ BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8),
+ BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9),
+ BT_VALID_MULTI_PRIO_LUT = BIT(10),
+ BT_VALID_TRM_KICK_FILTER = BIT(11),
+ BT_VALID_CORUN_LUT_20 = BIT(12),
+ BT_VALID_CORUN_LUT_40 = BIT(13),
+ BT_VALID_ANT_ISOLATION = BIT(14),
+ BT_VALID_ANT_ISOLATION_THRS = BIT(15),
+ /*
+ * For future use - when the valid flags will be enlarged
+ * BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
+ * BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
+ */
+};
+
+/**
+ * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
+ * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
+ * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
+ *
+ * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
+ * reduces its Tx power, it can work along with BT, hence reducing the amount
+ * of WiFi frames being killed by BT.
+ */
+enum iwl_bt_reduced_tx_power {
+ BT_REDUCED_TX_POWER_CTL = BIT(0),
+ BT_REDUCED_TX_POWER_DATA = BIT(1),
+};
+
+#define BT_COEX_LUT_SIZE (12)
+
+/**
+ * struct iwl_bt_coex_cmd - bt coex configuration command
+ * @flags:&enum iwl_bt_coex_flags
+ * @lead_time:
+ * @max_kill:
+ * @bt3_time_t7_value:
+ * @kill_ack_msk:
+ * @kill_cts_msk:
+ * @bt3_prio_sample_time:
+ * @bt3_timer_t2_value:
+ * @bt4_reaction_time:
+ * @decision_lut[12]:
+ * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
+ * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
+ * @bt_prio_boost: values for PTA boost register
+ * @wifi_tx_prio_boost: SW boost of wifi tx priority
+ * @wifi_rx_prio_boost: SW boost of wifi rx priority
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwl_bt_coex_cmd {
+ u8 flags;
+ u8 lead_time;
+ u8 max_kill;
+ u8 bt3_time_t7_value;
+ __le32 kill_ack_msk;
+ __le32 kill_cts_msk;
+ u8 bt3_prio_sample_time;
+ u8 bt3_timer_t2_value;
+ __le16 bt4_reaction_time;
+ __le32 decision_lut[BT_COEX_LUT_SIZE];
+ u8 bt_reduced_tx_power;
+ u8 reserved;
+ __le16 valid_bit_msk;
+ __le32 bt_prio_boost;
+ u8 reserved2;
+ u8 wifi_tx_prio_boost;
+ __le16 wifi_rx_prio_boost;
+} __packed; /* BT_COEX_CMD_API_S_VER_3 */
+
+#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
+ BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
+ BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
+
+enum iwl_bt_mxbox_dw0 {
+ BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
+ BT_MBOX(0, LE_PROF1, 3, 1),
+ BT_MBOX(0, LE_PROF2, 4, 1),
+ BT_MBOX(0, LE_PROF_OTHER, 5, 1),
+ BT_MBOX(0, CHL_SEQ_N, 8, 4),
+ BT_MBOX(0, INBAND_S, 13, 1),
+ BT_MBOX(0, LE_MIN_RSSI, 16, 4),
+ BT_MBOX(0, LE_SCAN, 20, 1),
+ BT_MBOX(0, LE_ADV, 21, 1),
+ BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
+ BT_MBOX(0, OPEN_CON_1, 28, 2),
+};
+
+enum iwl_bt_mxbox_dw1 {
+ BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
+ BT_MBOX(1, IP_SR, 4, 1),
+ BT_MBOX(1, LE_MSTR, 5, 1),
+ BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
+ BT_MBOX(1, MSG_TYPE, 16, 3),
+ BT_MBOX(1, SSN, 19, 2),
+};
+
+enum iwl_bt_mxbox_dw2 {
+ BT_MBOX(2, SNIFF_ACT, 0, 3),
+ BT_MBOX(2, PAG, 3, 1),
+ BT_MBOX(2, INQUIRY, 4, 1),
+ BT_MBOX(2, CONN, 5, 1),
+ BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
+ BT_MBOX(2, DISC, 13, 1),
+ BT_MBOX(2, SCO_TX_ACT, 16, 2),
+ BT_MBOX(2, SCO_RX_ACT, 18, 2),
+ BT_MBOX(2, ESCO_RE_TX, 20, 2),
+ BT_MBOX(2, SCO_DURATION, 24, 6),
+};
+
+enum iwl_bt_mxbox_dw3 {
+ BT_MBOX(3, SCO_STATE, 0, 1),
+ BT_MBOX(3, SNIFF_STATE, 1, 1),
+ BT_MBOX(3, A2DP_STATE, 2, 1),
+ BT_MBOX(3, ACL_STATE, 3, 1),
+ BT_MBOX(3, MSTR_STATE, 4, 1),
+ BT_MBOX(3, OBX_STATE, 5, 1),
+ BT_MBOX(3, OPEN_CON_2, 8, 2),
+ BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
+ BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
+ BT_MBOX(3, INBAND_P, 13, 1),
+ BT_MBOX(3, MSG_TYPE_2, 16, 3),
+ BT_MBOX(3, SSN_2, 19, 2),
+ BT_MBOX(3, UPDATE_REQUEST, 21, 1),
+};
+
+#define BT_MBOX_MSG(_notif, _num, _field) \
+ ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
+ >> BT_MBOX##_num##_##_field##_POS)
+
+/**
+ * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * @mbox_msg: message from BT to WiFi
+ * @:bt_status: 0 - off, 1 - on
+ * @:bt_open_conn: number of BT connections open
+ * @:bt_traffic_load: load of BT traffic
+ * @:bt_agg_traffic_load: aggregated load of BT traffic
+ * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ */
+struct iwl_bt_coex_profile_notif {
+ __le32 mbox_msg[4];
+ u8 bt_status;
+ u8 bt_open_conn;
+ u8 bt_traffic_load;
+ u8 bt_agg_traffic_load;
+ u8 bt_ci_compliance;
+ u8 reserved[3];
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
+
+enum iwl_bt_coex_prio_table_event {
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
+ BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
+ BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3,
+ BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
+ BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
+ BT_COEX_PRIO_TBL_EVT_DTIM = 6,
+ BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
+ BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
+ BT_COEX_PRIO_TBL_EVT_IDLE = 9,
+ BT_COEX_PRIO_TBL_EVT_MAX = 16,
+}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
+
+enum iwl_bt_coex_prio_table_prio {
+ BT_COEX_PRIO_TBL_DISABLED = 0,
+ BT_COEX_PRIO_TBL_PRIO_LOW = 1,
+ BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
+ BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
+ BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
+ BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
+ BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
+ BT_COEX_PRIO_TBL_MAX = 8,
+}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
+
+#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0)
+#define BT_COEX_PRIO_TBL_PRIO_POS (1)
+#define BT_COEX_PRIO_TBL_RESERVED_POS (4)
+
+/**
+ * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
+ * @prio_tbl:
+ */
+struct iwl_bt_coex_prio_tbl_cmd {
+ u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
+} __packed;
+
+enum iwl_bt_coex_env_action {
+ BT_COEX_ENV_CLOSE = 0,
+ BT_COEX_ENV_OPEN = 1,
+}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
+
+/**
+ * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
+ * @action: enum %iwl_bt_coex_env_action
+ * @type: enum %iwl_bt_coex_prio_table_event
+ */
+struct iwl_bt_coex_prot_env_cmd {
+ u8 action; /* 0 = closed, 1 = open */
+ u8 type; /* 0 .. 15 */
+ u8 reserved[2];
+} __packed;
+
+#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index cf6f9a02fb7..51e015d1dfb 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -258,7 +258,7 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
- IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11),
+ /* BIT(11) reserved */
IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
@@ -277,6 +277,55 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
+#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
+#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
+#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
+
+struct iwl_tcp_packet_info {
+ __le16 tcp_pseudo_header_checksum;
+ __le16 tcp_payload_length;
+} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
+
+struct iwl_tcp_packet {
+ struct iwl_tcp_packet_info info;
+ u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+ u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
+} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
+
+struct iwl_remote_wake_packet {
+ struct iwl_tcp_packet_info info;
+ u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+ u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
+} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
+
+struct iwl_wowlan_remote_wake_config {
+ __le32 connection_max_time; /* unused */
+ /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
+ u8 max_syn_retries;
+ u8 max_data_retries;
+ u8 tcp_syn_ack_timeout;
+ u8 tcp_ack_timeout;
+
+ struct iwl_tcp_packet syn_tx;
+ struct iwl_tcp_packet synack_rx;
+ struct iwl_tcp_packet keepalive_ack_rx;
+ struct iwl_tcp_packet fin_tx;
+
+ struct iwl_remote_wake_packet keepalive_tx;
+ struct iwl_remote_wake_packet wake_rx;
+
+ /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
+ u8 sequence_number_offset;
+ u8 sequence_number_length;
+ u8 token_offset;
+ u8 token_length;
+ /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
+ __le32 initial_sequence_number;
+ __le16 keepalive_interval;
+ __le16 num_tokens;
+ u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
+} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
+
/* TODO: NetDetect API */
#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index ae39b7dfda7..d68640ea41d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index be36b7604b7..127051891e9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index aa3474d0823..fdd33bc0a59 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 670ac8f95e2..b60d1415172 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 0acb53dda22..a30691a8a85 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 2677914bf0a..6d53850c544 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 2adb61f103f..f8d7e88234e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -70,6 +70,7 @@
#include "fw-api-mac.h"
#include "fw-api-power.h"
#include "fw-api-d3.h"
+#include "fw-api-bt-coex.h"
/* queue and FIFO numbers by usage */
enum {
@@ -152,6 +153,7 @@ enum {
BEACON_TEMPLATE_CMD = 0x91,
TX_ANT_CONFIGURATION_CMD = 0x98,
+ BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
/* RF-KILL commands and notifications */
@@ -162,6 +164,11 @@ enum {
REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5,
+ /* BT Coex */
+ BT_COEX_PRIO_TABLE = 0xcc,
+ BT_COEX_PROT_ENV = 0xcd,
+ BT_PROFILE_NOTIFICATION = 0xce,
+
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
@@ -794,6 +801,7 @@ struct iwl_phy_context_cmd {
* @byte_count: frame's byte-count
* @frame_time: frame's time on the air, based on byte count and frame rate
* calculation
+ * @mac_active_msk: what MACs were active when the frame was received
*
* Before each Rx, the device sends this data. It contains PHY information
* about the reception of the packet.
@@ -811,7 +819,7 @@ struct iwl_rx_phy_info {
__le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
__le32 rate_n_flags;
__le32 byte_count;
- __le16 reserved2;
+ __le16 mac_active_msk;
__le16 frame_time;
} __packed;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 500f818dba0..1006b3204e7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -309,6 +309,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
goto error;
}
+ ret = iwl_send_bt_prio_tbl(mvm);
+ if (ret)
+ goto error;
+
if (read_nvm) {
/* Read nvm */
ret = iwl_nvm_init(mvm);
@@ -414,6 +418,14 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
goto error;
+ ret = iwl_send_bt_prio_tbl(mvm);
+ if (ret)
+ goto error;
+
+ ret = iwl_send_bt_init_conf(mvm);
+ if (ret)
+ goto error;
+
/* Send phy db control command and then phy db calibration*/
ret = iwl_send_phy_db_data(mvm->phy_db);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 011906e73a0..2269a9e5cc6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 341dbc0237e..2779235daa3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -553,9 +553,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
if (vif->bss_conf.qos)
cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+ /* Don't use cts to self as the fw doesn't support it currently. */
if (vif->bss_conf.use_cts_prot)
- cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT |
- MAC_PROT_FLG_SELF_CTS_EN);
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
/*
* I think that we should enable these 2 flags regardless the HT PROT
@@ -651,6 +651,13 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+ /* Allow beacons to pass through as long as we are not associated,or we
+ * do not have dtim period information */
+ if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+ else
+ cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
+
/* Fill the data specific for station mode */
iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
@@ -714,7 +721,9 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
- cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROMISC);
+
+ /* Override the filter flags to accept only probe requests */
+ cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
/*
* This flag should be set to true when the P2P Device is
@@ -846,10 +855,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
*/
static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_mac_data_ap *ctxt_ap)
+ struct iwl_mac_data_ap *ctxt_ap,
+ bool add)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 curr_dev_time;
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
ctxt_ap->bi_reciprocal =
@@ -861,10 +870,19 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
vif->bss_conf.dtim_period));
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
- curr_dev_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
- ctxt_ap->beacon_time = cpu_to_le32(curr_dev_time);
- ctxt_ap->beacon_tsf = cpu_to_le64(curr_dev_time);
+ /*
+ * Only read the system time when the MAC is being added, when we
+ * just modify the MAC then we should keep the time -- the firmware
+ * can otherwise have a "jumping" TBTT.
+ */
+ if (add)
+ mvmvif->ap_beacon_time =
+ iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+
+ ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
+
+ ctxt_ap->beacon_tsf = 0; /* unused */
/* TODO: Assume that the beacon id == mac context id */
ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
@@ -881,8 +899,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+ /* Also enable probe requests to pass */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
/* Fill the data specific for ap mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap);
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
+ action == FW_CTXT_ACTION_ADD);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -899,7 +921,8 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
/* Fill the data specific for GO mode */
- iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap);
+ iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
+ action == FW_CTXT_ACTION_ADD);
cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7e169b085af..14dd5ee9a01 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -65,7 +65,9 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ip.h>
#include <net/mac80211.h>
+#include <net/tcp.h>
#include "iwl-op-mode.h"
#include "iwl-io.h"
@@ -102,10 +104,33 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
+#ifdef CONFIG_PM_SLEEP
+static const struct nl80211_wowlan_tcp_data_token_feature
+iwl_mvm_wowlan_tcp_token_feature = {
+ .min_len = 0,
+ .max_len = 255,
+ .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
+};
+
+static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
+ .tok = &iwl_mvm_wowlan_tcp_token_feature,
+ .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
+ sizeof(struct ethhdr) -
+ sizeof(struct iphdr) -
+ sizeof(struct tcphdr),
+ .data_interval_max = 65535, /* __le16 in API */
+ .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
+ sizeof(struct ethhdr) -
+ sizeof(struct iphdr) -
+ sizeof(struct tcphdr),
+ .seq = true,
+};
+#endif
+
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
{
struct ieee80211_hw *hw = mvm->hw;
- int num_mac, ret;
+ int num_mac, ret, i;
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -156,11 +181,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
hw->wiphy->addresses = mvm->addresses;
hw->wiphy->n_addresses = 1;
- num_mac = mvm->nvm_data->n_hw_addrs;
- if (num_mac > 1) {
- memcpy(mvm->addresses[1].addr, mvm->addresses[0].addr,
+
+ /* Extract additional MAC addresses if available */
+ num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
+ min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
+
+ for (i = 1; i < num_mac; i++) {
+ memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
ETH_ALEN);
- mvm->addresses[1].addr[5]++;
+ mvm->addresses[i].addr[5]++;
hw->wiphy->n_addresses++;
}
@@ -206,6 +235,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+ hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
}
#endif
@@ -273,12 +303,18 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
break;
case IEEE80211_AMPDU_TX_START:
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
+ ret = -EINVAL;
+ break;
+ }
ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
+ ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+ break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+ ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
@@ -1090,7 +1126,8 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
static int iwl_mvm_roc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *channel,
- int duration)
+ int duration,
+ enum ieee80211_roc_type type)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct cfg80211_chan_def chandef;
@@ -1101,8 +1138,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
return -EINVAL;
}
- IWL_DEBUG_MAC80211(mvm, "enter (%d, %d)\n", channel->hw_value,
- duration);
+ IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
+ duration, type);
mutex_lock(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index bdae700c769..203eb85e03d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -79,7 +79,7 @@
#include "fw-api.h"
#define IWL_INVALID_MAC80211_QUEUE 0xff
-#define IWL_MVM_MAX_ADDRESSES 2
+#define IWL_MVM_MAX_ADDRESSES 5
/* RSSI offset for WkP */
#define IWL_RSSI_OFFSET 50
@@ -174,6 +174,8 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_active;
+ u32 ap_beacon_time;
+
enum iwl_tsf_id tsf_id;
/*
@@ -332,6 +334,10 @@ struct iwl_mvm {
#ifdef CONFIG_PM_SLEEP
int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
#endif
+
+ /* BT-Coex */
+ u8 bt_kill_msk;
+ struct iwl_bt_coex_profile_notif last_bt_notif;
};
/* Extract MVM priv from op_mode and _hw */
@@ -502,4 +508,11 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int idx);
+/* BT Coex */
+int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
+int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
+int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 20016bcbdea..93e3d0f174c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -74,6 +74,9 @@ static const int nvm_to_read[] = {
NVM_SECTION_TYPE_PRODUCTION,
};
+/* Default NVM size to read */
+#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024);
+
/* used to simplify the shared operations on NCM_ACCESS_CMD versions */
union iwl_nvm_access_cmd {
struct iwl_nvm_access_cmd_ver1 ver1;
@@ -193,9 +196,9 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
int ret;
bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000;
- length = (iwlwifi_mod_params.amsdu_size_8K ? (8 * 1024) : (4 * 1024))
- - sizeof(union iwl_nvm_access_cmd)
- - sizeof(struct iwl_rx_packet);
+ /* Set nvm section read length */
+ length = IWL_NVM_DEFAULT_CHUNK_SIZE;
+
/*
* if length is greater than EEPROM size, truncate it because uCode
* doesn't check it by itself, and exit the loop when reached.
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index d0f9c1e0475..828bdddd07e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -230,6 +230,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+ RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
+
RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -293,6 +295,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(NET_DETECT_PROFILES_CMD),
CMD(NET_DETECT_HOTSPOTS_CMD),
CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
+ CMD(CARD_STATE_NOTIFICATION),
+ CMD(BT_COEX_PRIO_TABLE),
+ CMD(BT_COEX_PROT_ENV),
+ CMD(BT_PROFILE_NOTIFICATION),
+ CMD(BT_CONFIG),
};
#undef CMD
@@ -363,8 +370,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
- /* TODO: this should really be a TLV */
- if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
trans_cfg.bc_table_dword = true;
if (!iwlwifi_mod_params.wd_disable)
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index b428448f8dd..0d537e035ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 5a92a497879..efb9a6f3faa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 92562846814..df85c49dc59 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 56b636d9ab3..a01a6612677 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -680,12 +680,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
*/
static bool rs_use_green(struct ieee80211_sta *sta)
{
- struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv;
-
- bool use_green = !(sta_priv->vif->bss_conf.ht_operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && use_green;
+ /*
+ * There's a bug somewhere in this code that causes the
+ * scaling to get stuck because GF+SGI can't be combined
+ * in SISO rates. Until we find that bug, disable GF, it
+ * has only limited benefit and we still interoperate with
+ * GF APs since we can always receive GF transmissions.
+ */
+ return false;
}
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index b0b190d0ec2..4dfc21a3e83 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9b21b92aa8d..0d3c76b2924 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 274f44e2ef6..4d872d69577 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -101,8 +101,55 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
}
add_sta_cmd.add_modify = update ? 1 : 0;
- /* STA_FLG_FAT_EN_MSK ? */
- /* STA_FLG_MIMO_EN_MSK ? */
+ add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_FAT_EN_MSK |
+ STA_FLG_MIMO_EN_MSK);
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
+ /* fall through */
+ case IEEE80211_STA_RX_BW_80:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
+ /* fall through */
+ case IEEE80211_STA_RX_BW_40:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
+ /* fall through */
+ case IEEE80211_STA_RX_BW_20:
+ if (sta->ht_cap.ht_supported)
+ add_sta_cmd.station_flags |=
+ cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
+ break;
+ }
+
+ switch (sta->rx_nss) {
+ case 1:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+ break;
+ case 2:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
+ break;
+ case 3 ... 8:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
+ break;
+ }
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ break;
+ case IEEE80211_SMPS_STATIC:
+ /* override NSS */
+ add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
+ break;
+ case IEEE80211_SMPS_OFF:
+ /* nothing */
+ break;
+ }
if (sta->ht_cap.ht_supported) {
add_sta_cmd.station_flags_msk |=
@@ -340,6 +387,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
+ /* flush its queues here since we are freeing mvm_sta */
+ ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
+
/*
* Put a non-NULL since the fw station isn't removed.
* It will be removed after the MAC will be set as
@@ -348,9 +398,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
ERR_PTR(-EINVAL));
- /* flush its queues here since we are freeing mvm_sta */
- ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
-
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@@ -686,7 +733,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
spin_lock_bh(&mvmsta->lock);
tid_data = &mvmsta->tid_data[tid];
- tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->txq_id = txq_id;
*ssn = tid_data->ssn;
@@ -789,7 +836,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
switch (tid_data->state) {
case IWL_AGG_ON:
- tid_data->ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
IWL_DEBUG_TX_QUEUES(mvm,
"ssn = %d, next_recl = %d\n",
@@ -834,6 +881,34 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return err;
}
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ u16 txq_id;
+
+ /*
+ * First set the agg state to OFF to avoid calling
+ * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
+ */
+ spin_lock_bh(&mvmsta->lock);
+ txq_id = tid_data->txq_id;
+ IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
+ mvmsta->sta_id, tid, txq_id, tid_data->state);
+ tid_data->state = IWL_AGG_OFF;
+ spin_unlock_bh(&mvmsta->lock);
+
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+
+ iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+ mvm->queue_to_mac80211[tid_data->txq_id] =
+ IWL_INVALID_MAC80211_QUEUE;
+
+ return 0;
+}
+
static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
{
int i;
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 896f88ac814..b0352df981e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -348,6 +348,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u8 buf_size);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index e437e02c714..c2c7f517602 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index 64fb57a5ab4..b36424eda36 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6645efe5c03..0556d5e16f4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -637,7 +637,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
next_reclaimed = ssn;
} else {
/* The next packet to be reclaimed is the one after this one */
- next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10);
+ next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
}
IWL_DEBUG_TX_REPLY(mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 000e842c2ed..e308ad93aa9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
deleted file mode 100644
index c6f8e83c355..00000000000
--- a/drivers/net/wireless/iwlwifi/pcie/cfg.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_pci_h__
-#define __iwl_pci_h__
-
-
-/*
- * This file declares the config structures for all devices.
- */
-
-extern const struct iwl_cfg iwl5300_agn_cfg;
-extern const struct iwl_cfg iwl5100_agn_cfg;
-extern const struct iwl_cfg iwl5350_agn_cfg;
-extern const struct iwl_cfg iwl5100_bgn_cfg;
-extern const struct iwl_cfg iwl5100_abg_cfg;
-extern const struct iwl_cfg iwl5150_agn_cfg;
-extern const struct iwl_cfg iwl5150_abg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_cfg;
-extern const struct iwl_cfg iwl6005_2abg_cfg;
-extern const struct iwl_cfg iwl6005_2bg_cfg;
-extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
-extern const struct iwl_cfg iwl6005_2agn_d_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
-extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
-extern const struct iwl_cfg iwl1030_bgn_cfg;
-extern const struct iwl_cfg iwl1030_bg_cfg;
-extern const struct iwl_cfg iwl6030_2agn_cfg;
-extern const struct iwl_cfg iwl6030_2abg_cfg;
-extern const struct iwl_cfg iwl6030_2bgn_cfg;
-extern const struct iwl_cfg iwl6030_2bg_cfg;
-extern const struct iwl_cfg iwl6000i_2agn_cfg;
-extern const struct iwl_cfg iwl6000i_2abg_cfg;
-extern const struct iwl_cfg iwl6000i_2bg_cfg;
-extern const struct iwl_cfg iwl6000_3agn_cfg;
-extern const struct iwl_cfg iwl6050_2agn_cfg;
-extern const struct iwl_cfg iwl6050_2abg_cfg;
-extern const struct iwl_cfg iwl6150_bgn_cfg;
-extern const struct iwl_cfg iwl6150_bg_cfg;
-extern const struct iwl_cfg iwl1000_bgn_cfg;
-extern const struct iwl_cfg iwl1000_bg_cfg;
-extern const struct iwl_cfg iwl100_bgn_cfg;
-extern const struct iwl_cfg iwl100_bg_cfg;
-extern const struct iwl_cfg iwl130_bgn_cfg;
-extern const struct iwl_cfg iwl130_bg_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_cfg;
-extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
-extern const struct iwl_cfg iwl2030_2bgn_cfg;
-extern const struct iwl_cfg iwl6035_2agn_cfg;
-extern const struct iwl_cfg iwl105_bgn_cfg;
-extern const struct iwl_cfg iwl105_bgn_d_cfg;
-extern const struct iwl_cfg iwl135_bgn_cfg;
-extern const struct iwl_cfg iwl7260_2ac_cfg;
-extern const struct iwl_cfg iwl3160_ac_cfg;
-
-#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7bc0fb9128d..46ca91f77c9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -69,8 +69,6 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
-
-#include "cfg.h"
#include "internal.h"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 12c4f31ca8f..50ba0a468f9 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -22,7 +22,7 @@
* USA
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
+ * in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
@@ -728,7 +728,8 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
- iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
+ ((reg & 0x000FFFFF) | (3 << 24)));
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}
@@ -736,7 +737,7 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
u32 val)
{
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x0000FFFF) | (3 << 24)));
+ ((addr & 0x000FFFFF) | (3 << 24)));
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
@@ -1383,28 +1384,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
return ret;
}
-static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_trans *trans = file->private_data;
-
- if (!trans->op_mode)
- return -EAGAIN;
-
- local_bh_disable();
- iwl_op_mode_nic_error(trans->op_mode);
- local_bh_enable();
-
- return count;
-}
-
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
-DEBUGFS_WRITE_FILE_OPS(fw_restart);
/*
* Create the debugfs files and directories
@@ -1418,7 +1402,6 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
- DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
return 0;
err:
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index cb5c6792e3a..282a5cafa91 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -501,10 +501,8 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
* shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
&txq->q.dma_addr, GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
+ if (!txq->tfds)
goto error;
- }
BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
@@ -1609,7 +1607,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* Check here that the packets are in the right place on the ring.
*/
#ifdef CONFIG_IWLWIFI_DEBUG
- wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
((wifi_seq & 0xff) != q->write_ptr),
"Q: %d WiFi Seq %d tfdNum %d",
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cffdf4fbf16..7490c4fc717 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1535,7 +1535,8 @@ static void hw_roc_done(struct work_struct *work)
static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
- int duration)
+ int duration,
+ enum ieee80211_roc_type type)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 97b245cbafd..ecf28464367 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -39,6 +39,7 @@ mwifiex-y += sta_tx.o
mwifiex-y += sta_rx.o
mwifiex-y += uap_txrx.o
mwifiex-y += cfg80211.o
+mwifiex-y += ethtool.o
mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index a44023a7bd5..dbf5b128951 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1932,66 +1932,10 @@ static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info,
struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- u32 vht_cap = 0, cap = adapter->hw_dot_11ac_dev_cap;
vht_info->vht_supported = true;
- switch (GET_VHTCAP_MAXMPDULEN(cap)) {
- case 0x00:
- vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
- break;
- case 0x01:
- vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
- break;
- case 0x10:
- vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
- break;
- default:
- dev_err(adapter->dev, "unsupported MAX MPDU len\n");
- break;
- }
-
- if (ISSUPP_11ACVHTHTCVHT(cap))
- vht_cap |= IEEE80211_VHT_CAP_HTC_VHT;
-
- if (ISSUPP_11ACVHTTXOPPS(cap))
- vht_cap |= IEEE80211_VHT_CAP_VHT_TXOP_PS;
-
- if (ISSUPP_11ACMURXBEAMFORMEE(cap))
- vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
-
- if (ISSUPP_11ACMUTXBEAMFORMEE(cap))
- vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
-
- if (ISSUPP_11ACSUBEAMFORMER(cap))
- vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
-
- if (ISSUPP_11ACSUBEAMFORMEE(cap))
- vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
-
- if (ISSUPP_11ACRXSTBC(cap))
- vht_cap |= IEEE80211_VHT_CAP_RXSTBC_1;
-
- if (ISSUPP_11ACTXSTBC(cap))
- vht_cap |= IEEE80211_VHT_CAP_TXSTBC;
-
- if (ISSUPP_11ACSGI160(cap))
- vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
-
- if (ISSUPP_11ACSGI80(cap))
- vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
-
- if (ISSUPP_11ACLDPC(cap))
- vht_cap |= IEEE80211_VHT_CAP_RXLDPC;
-
- if (ISSUPP_11ACBW8080(cap))
- vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
-
- if (ISSUPP_11ACBW160(cap))
- vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
-
- vht_info->cap = vht_cap;
-
+ vht_info->cap = adapter->hw_dot_11ac_dev_cap;
/* Update MCS support for VHT */
vht_info->vht_mcs.rx_mcs_map = cpu_to_le16(
adapter->hw_dot_11ac_mcs_support & 0xFFFF);
@@ -2235,6 +2179,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
+ dev->ethtool_ops = &mwifiex_ethtool_ops;
mdev_priv = netdev_priv(dev);
*((unsigned long *) mdev_priv) = (unsigned long) priv;
@@ -2293,6 +2238,152 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
}
EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
+#ifdef CONFIG_PM
+static bool
+mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
+ s8 *byte_seq)
+{
+ int j, k, valid_byte_cnt = 0;
+ bool dont_care_byte = false;
+
+ for (j = 0; j < DIV_ROUND_UP(pat->pattern_len, 8); j++) {
+ for (k = 0; k < 8; k++) {
+ if (pat->mask[j] & 1 << k) {
+ memcpy(byte_seq + valid_byte_cnt,
+ &pat->pattern[j * 8 + k], 1);
+ valid_byte_cnt++;
+ if (dont_care_byte)
+ return false;
+ } else {
+ if (valid_byte_cnt)
+ dont_care_byte = true;
+ }
+
+ if (valid_byte_cnt > MAX_BYTESEQ)
+ return false;
+ }
+ }
+
+ byte_seq[MAX_BYTESEQ] = valid_byte_cnt;
+
+ return true;
+}
+
+static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_ds_mef_cfg mef_cfg;
+ struct mwifiex_mef_entry *mef_entry;
+ int i, filt_num = 0, ret;
+ bool first_pat = true;
+ u8 byte_seq[MAX_BYTESEQ + 1];
+ const u8 ipv4_mc_mac[] = {0x33, 0x33};
+ const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
+ struct mwifiex_private *priv =
+ mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+ if (!wowlan) {
+ dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
+ return 0;
+ }
+
+ if (!priv->media_connected) {
+ dev_warn(adapter->dev,
+ "Can not configure WOWLAN in disconnected state\n");
+ return 0;
+ }
+
+ mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
+ if (!mef_entry)
+ return -ENOMEM;
+
+ memset(&mef_cfg, 0, sizeof(mef_cfg));
+ mef_cfg.num_entries = 1;
+ mef_cfg.mef_entry = mef_entry;
+ mef_entry->mode = MEF_MODE_HOST_SLEEP;
+ mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ memset(byte_seq, 0, sizeof(byte_seq));
+ if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
+ byte_seq)) {
+ wiphy_err(wiphy, "Pattern not supported\n");
+ kfree(mef_entry);
+ return -EOPNOTSUPP;
+ }
+
+ if (!wowlan->patterns[i].pkt_offset) {
+ if (!(byte_seq[0] & 0x01) &&
+ (byte_seq[MAX_BYTESEQ] == 1)) {
+ mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+ continue;
+ } else if (is_broadcast_ether_addr(byte_seq)) {
+ mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
+ continue;
+ } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
+ (byte_seq[MAX_BYTESEQ] == 2)) ||
+ (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
+ (byte_seq[MAX_BYTESEQ] == 3))) {
+ mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
+ continue;
+ }
+ }
+
+ mef_entry->filter[filt_num].repeat = 1;
+ mef_entry->filter[filt_num].offset =
+ wowlan->patterns[i].pkt_offset;
+ memcpy(mef_entry->filter[filt_num].byte_seq, byte_seq,
+ sizeof(byte_seq));
+ mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+
+ if (first_pat)
+ first_pat = false;
+ else
+ mef_entry->filter[filt_num].filt_action = TYPE_AND;
+
+ filt_num++;
+ }
+
+ if (wowlan->magic_pkt) {
+ mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
+ mef_entry->filter[filt_num].repeat = 16;
+ memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
+ ETH_ALEN);
+ mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN;
+ mef_entry->filter[filt_num].offset = 14;
+ mef_entry->filter[filt_num].filt_type = TYPE_EQ;
+ if (filt_num)
+ mef_entry->filter[filt_num].filt_action = TYPE_OR;
+ }
+
+ if (!mef_cfg.criteria)
+ mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
+ MWIFIEX_CRITERIA_UNICAST |
+ MWIFIEX_CRITERIA_MULTICAST;
+
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MEF_CFG,
+ HostCmd_ACT_GEN_SET, 0,
+ &mef_cfg);
+
+ kfree(mef_entry);
+ return ret;
+}
+
+static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
+{
+ return 0;
+}
+
+static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
+ bool enabled)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+
+ device_set_wakeup_enable(adapter->dev, enabled);
+}
+#endif
+
/* station cfg80211 operations */
static struct cfg80211_ops mwifiex_cfg80211_ops = {
.add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2321,6 +2412,11 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.change_beacon = mwifiex_cfg80211_change_beacon,
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
.set_antenna = mwifiex_cfg80211_set_antenna,
+#ifdef CONFIG_PM
+ .suspend = mwifiex_cfg80211_suspend,
+ .resume = mwifiex_cfg80211_resume,
+ .set_wakeup = mwifiex_cfg80211_set_wakeup,
+#endif
};
/*
@@ -2379,6 +2475,14 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
+#ifdef CONFIG_PM
+ wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT;
+ wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS;
+ wiphy->wowlan.pattern_min_len = 1;
+ wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN;
+ wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN;
+#endif
+
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index b5c8b962ce1..9a1302bd4c0 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1149,7 +1149,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
phs_cfg->params.hs_config.gpio,
phs_cfg->params.hs_config.gap);
}
- if (conditions != HOST_SLEEP_CFG_CANCEL) {
+ if (conditions != HS_CFG_CANCEL) {
adapter->is_hs_configured = true;
if (adapter->iface_type == MWIFIEX_USB ||
adapter->iface_type == MWIFIEX_PCIE)
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
new file mode 100644
index 00000000000..bfb39908b2c
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -0,0 +1,70 @@
+/*
+ * Marvell Wireless LAN device driver: ethtool
+ *
+ * Copyright (C) 2013, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available by writing to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
+ * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+
+static void mwifiex_ethtool_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ u32 conditions = le32_to_cpu(priv->adapter->hs_cfg.conditions);
+
+ wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY;
+
+ if (conditions == HS_CFG_COND_DEF)
+ return;
+
+ if (conditions & HS_CFG_COND_UNICAST_DATA)
+ wol->wolopts |= WAKE_UCAST;
+ if (conditions & HS_CFG_COND_MULTICAST_DATA)
+ wol->wolopts |= WAKE_MCAST;
+ if (conditions & HS_CFG_COND_BROADCAST_DATA)
+ wol->wolopts |= WAKE_BCAST;
+ if (conditions & HS_CFG_COND_MAC_EVENT)
+ wol->wolopts |= WAKE_PHY;
+}
+
+static int mwifiex_ethtool_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ u32 conditions = 0;
+
+ if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & WAKE_UCAST)
+ conditions |= HS_CFG_COND_UNICAST_DATA;
+ if (wol->wolopts & WAKE_MCAST)
+ conditions |= HS_CFG_COND_MULTICAST_DATA;
+ if (wol->wolopts & WAKE_BCAST)
+ conditions |= HS_CFG_COND_BROADCAST_DATA;
+ if (wol->wolopts & WAKE_PHY)
+ conditions |= HS_CFG_COND_MAC_EVENT;
+ if (wol->wolopts == 0)
+ conditions |= HS_CFG_COND_DEF;
+ priv->adapter->hs_cfg.conditions = cpu_to_le32(conditions);
+
+ return 0;
+}
+
+const struct ethtool_ops mwifiex_ethtool_ops = {
+ .get_wol = mwifiex_ethtool_get_wol,
+ .set_wol = mwifiex_ethtool_set_wol,
+};
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 25acb0682c5..57c5defe1f9 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -230,40 +230,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
-#define GET_VHTCAP_MAXMPDULEN(vht_cap_info) (vht_cap_info & 0x3)
#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
(2 * (nss - 1)))
#define NO_NSS_SUPPORT 0x3
-/* HW_SPEC: HTC-VHT supported */
-#define ISSUPP_11ACVHTHTCVHT(Dot11acDevCap) (Dot11acDevCap & BIT(22))
-/* HW_SPEC: VHT TXOP PS support */
-#define ISSUPP_11ACVHTTXOPPS(Dot11acDevCap) (Dot11acDevCap & BIT(21))
-/* HW_SPEC: MU RX beamformee support */
-#define ISSUPP_11ACMURXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(20))
-/* HW_SPEC: MU TX beamformee support */
-#define ISSUPP_11ACMUTXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(19))
-/* HW_SPEC: SU Beamformee support */
-#define ISSUPP_11ACSUBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(10))
-/* HW_SPEC: SU Beamformer support */
-#define ISSUPP_11ACSUBEAMFORMER(Dot11acDevCap) (Dot11acDevCap & BIT(9))
-/* HW_SPEC: Rx STBC support */
-#define ISSUPP_11ACRXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(8))
-/* HW_SPEC: Tx STBC support */
-#define ISSUPP_11ACTXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(7))
-/* HW_SPEC: Short GI support for 160MHz BW */
-#define ISSUPP_11ACSGI160(Dot11acDevCap) (Dot11acDevCap & BIT(6))
-/* HW_SPEC: Short GI support for 80MHz BW */
-#define ISSUPP_11ACSGI80(Dot11acDevCap) (Dot11acDevCap & BIT(5))
-/* HW_SPEC: LDPC coding support */
-#define ISSUPP_11ACLDPC(Dot11acDevCap) (Dot11acDevCap & BIT(4))
-/* HW_SPEC: Channel BW 20/40/80/160/80+80 MHz support */
-#define ISSUPP_11ACBW8080(Dot11acDevCap) (Dot11acDevCap & BIT(3))
-/* HW_SPEC: Channel BW 20/40/80/160 MHz support */
-#define ISSUPP_11ACBW160(Dot11acDevCap) (Dot11acDevCap & BIT(2))
-
#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
@@ -300,6 +272,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
#define HostCmd_CMD_VERSION_EXT 0x0097
+#define HostCmd_CMD_MEF_CFG 0x009a
#define HostCmd_CMD_RSSI_INFO 0x00a4
#define HostCmd_CMD_FUNC_INIT 0x00a9
#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa
@@ -376,10 +349,14 @@ enum P2P_MODES {
#define HostCmd_SCAN_RADIO_TYPE_BG 0
#define HostCmd_SCAN_RADIO_TYPE_A 1
-#define HOST_SLEEP_CFG_CANCEL 0xffffffff
-#define HOST_SLEEP_CFG_COND_DEF 0x00000000
-#define HOST_SLEEP_CFG_GPIO_DEF 0xff
-#define HOST_SLEEP_CFG_GAP_DEF 0
+#define HS_CFG_CANCEL 0xffffffff
+#define HS_CFG_COND_DEF 0x00000000
+#define HS_CFG_GPIO_DEF 0xff
+#define HS_CFG_GAP_DEF 0
+#define HS_CFG_COND_BROADCAST_DATA 0x00000001
+#define HS_CFG_COND_UNICAST_DATA 0x00000002
+#define HS_CFG_COND_MAC_EVENT 0x00000004
+#define HS_CFG_COND_MULTICAST_DATA 0x00000008
#define MWIFIEX_TIMEOUT_FOR_AP_RESP 0xfffc
#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT 2
@@ -469,6 +446,23 @@ enum P2P_MODES {
#define EVENT_GET_BSS_TYPE(event_cause) \
(((event_cause) >> 24) & 0x00ff)
+#define MWIFIEX_MAX_PATTERN_LEN 20
+#define MWIFIEX_MAX_OFFSET_LEN 50
+#define STACK_NBYTES 100
+#define TYPE_DNUM 1
+#define TYPE_BYTESEQ 2
+#define MAX_OPERAND 0x40
+#define TYPE_EQ (MAX_OPERAND+1)
+#define TYPE_EQ_DNUM (MAX_OPERAND+2)
+#define TYPE_EQ_BIT (MAX_OPERAND+3)
+#define TYPE_AND (MAX_OPERAND+4)
+#define TYPE_OR (MAX_OPERAND+5)
+#define MEF_MODE_HOST_SLEEP 1
+#define MEF_ACTION_ALLOW_AND_WAKEUP_HOST 3
+#define MWIFIEX_CRITERIA_BROADCAST BIT(0)
+#define MWIFIEX_CRITERIA_UNICAST BIT(1)
+#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
+
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -1499,6 +1493,19 @@ struct host_cmd_ds_802_11_ibss_status {
__le16 use_g_rate_protect;
} __packed;
+struct mwifiex_fw_mef_entry {
+ u8 mode;
+ u8 action;
+ __le16 exprsize;
+ u8 expr[0];
+} __packed;
+
+struct host_cmd_ds_mef_cfg {
+ __le32 criteria;
+ __le16 num_entries;
+ struct mwifiex_fw_mef_entry mef_entry[0];
+} __packed;
+
#define CONNECTION_TYPE_INFRA 0
#define CONNECTION_TYPE_ADHOC 1
#define CONNECTION_TYPE_AP 2
@@ -1603,6 +1610,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_remain_on_chan roc_cfg;
struct host_cmd_ds_p2p_mode_cfg mode_cfg;
struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
+ struct host_cmd_ds_mef_cfg mef_cfg;
struct host_cmd_ds_mac_reg_access mac_reg;
struct host_cmd_ds_bbp_reg_access bbp_reg;
struct host_cmd_ds_rf_reg_access rf_reg;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 0ff4c37ab42..daf8801cecd 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -318,9 +318,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
adapter->is_hs_configured = false;
- adapter->hs_cfg.conditions = cpu_to_le32(HOST_SLEEP_CFG_COND_DEF);
- adapter->hs_cfg.gpio = HOST_SLEEP_CFG_GPIO_DEF;
- adapter->hs_cfg.gap = HOST_SLEEP_CFG_GAP_DEF;
+ adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
+ adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
+ adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
adapter->hs_activated = false;
memset(adapter->event_body, 0, sizeof(adapter->event_body));
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d85e6eb1f58..91d522c746e 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -354,6 +354,29 @@ struct mwifiex_ds_misc_subsc_evt {
struct subsc_evt_cfg bcn_h_rssi_cfg;
};
+#define MAX_BYTESEQ 6 /* non-adjustable */
+#define MWIFIEX_MAX_FILTERS 10
+
+struct mwifiex_mef_filter {
+ u16 repeat;
+ u16 offset;
+ s8 byte_seq[MAX_BYTESEQ + 1];
+ u8 filt_type;
+ u8 filt_action;
+};
+
+struct mwifiex_mef_entry {
+ u8 mode;
+ u8 action;
+ struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS];
+};
+
+struct mwifiex_ds_mef_cfg {
+ u32 criteria;
+ u16 num_entries;
+ struct mwifiex_mef_entry *mef_entry;
+};
+
#define MWIFIEX_MAX_VSIE_LEN (256)
#define MWIFIEX_MAX_VSIE_NUM (8)
#define MWIFIEX_VSIE_MASK_CLEAR 0x00
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c802ede9c3..121443a0f2a 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -588,10 +588,19 @@ mwifiex_tx_timeout(struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_type-num = %d-%d\n",
- jiffies, priv->bss_type, priv->bss_num);
- mwifiex_set_trans_start(dev);
priv->num_tx_timeout++;
+ priv->tx_timeout_cnt++;
+ dev_err(priv->adapter->dev,
+ "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
+ jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num);
+ mwifiex_set_trans_start(dev);
+
+ if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD &&
+ priv->adapter->if_ops.card_reset) {
+ dev_err(priv->adapter->dev,
+ "tx_timeout_cnt exceeds threshold. Triggering card reset!\n");
+ priv->adapter->if_ops.card_reset(priv->adapter);
+ }
}
/*
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 7035ade9af7..7255289a48a 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -130,6 +130,9 @@ enum {
#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE
#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE
+/* Threshold for tx_timeout_cnt before we trigger a card reset */
+#define TX_TIMEOUT_THRESHOLD 6
+
struct mwifiex_dbg {
u32 num_cmd_host_to_card_failure;
u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -394,6 +397,8 @@ struct mwifiex_private {
u8 curr_addr[ETH_ALEN];
u8 media_connected;
u32 num_tx_timeout;
+ /* track consecutive timeout */
+ u8 tx_timeout_cnt;
struct net_device *netdev;
struct net_device_stats stats;
u16 curr_pkt_filter;
@@ -1098,11 +1103,15 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
+int mwifiex_add_wowlan_magic_pkt_filter(struct mwifiex_adapter *adapter);
+
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
struct cfg80211_beacon_data *data);
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
u8 *mwifiex_11d_code_2_region(u8 code);
+extern const struct ethtool_ops mwifiex_ethtool_ops;
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index feb20461339..8cd8cdc91a7 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -36,8 +36,6 @@ static u8 user_rmmod;
static struct mwifiex_if_ops pcie_ops;
static struct semaphore add_remove_card_sem;
-static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter);
-static int mwifiex_pcie_resume(struct pci_dev *pdev);
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
@@ -78,6 +76,82 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
return false;
}
+#ifdef CONFIG_PM
+/*
+ * Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not suspended, this function allocates and sends a host
+ * sleep activate request to the firmware and turns off the traffic.
+ */
+static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mwifiex_adapter *adapter;
+ struct pcie_service_card *card;
+ int hs_actived;
+
+ if (pdev) {
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ if (!card || !card->adapter) {
+ pr_err("Card or adapter structure is not valid\n");
+ return 0;
+ }
+ } else {
+ pr_err("PCIE device is not specified\n");
+ return 0;
+ }
+
+ adapter = card->adapter;
+
+ hs_actived = mwifiex_enable_hs(adapter);
+
+ /* Indicate device suspended */
+ adapter->is_suspended = true;
+
+ return 0;
+}
+
+/*
+ * Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not resumed, this function turns on the traffic and
+ * sends a host sleep cancel request to the firmware.
+ */
+static int mwifiex_pcie_resume(struct pci_dev *pdev)
+{
+ struct mwifiex_adapter *adapter;
+ struct pcie_service_card *card;
+
+ if (pdev) {
+ card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+ if (!card || !card->adapter) {
+ pr_err("Card or adapter structure is not valid\n");
+ return 0;
+ }
+ } else {
+ pr_err("PCIE device is not specified\n");
+ return 0;
+ }
+
+ adapter = card->adapter;
+
+ if (!adapter->is_suspended) {
+ dev_warn(adapter->dev, "Device already resumed\n");
+ return 0;
+ }
+
+ adapter->is_suspended = false;
+
+ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
+ MWIFIEX_ASYNC_CMD);
+
+ return 0;
+}
+#endif
+
/*
* This function probes an mwifiex device and registers it. It allocates
* the card structure, enables PCIE function number and initiates the
@@ -159,80 +233,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
kfree(card);
}
-/*
- * Kernel needs to suspend all functions separately. Therefore all
- * registered functions must have drivers with suspend and resume
- * methods. Failing that the kernel simply removes the whole card.
- *
- * If already not suspended, this function allocates and sends a host
- * sleep activate request to the firmware and turns off the traffic.
- */
-static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct mwifiex_adapter *adapter;
- struct pcie_service_card *card;
- int hs_actived;
-
- if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
- if (!card || !card->adapter) {
- pr_err("Card or adapter structure is not valid\n");
- return 0;
- }
- } else {
- pr_err("PCIE device is not specified\n");
- return 0;
- }
-
- adapter = card->adapter;
-
- hs_actived = mwifiex_enable_hs(adapter);
-
- /* Indicate device suspended */
- adapter->is_suspended = true;
-
- return 0;
-}
-
-/*
- * Kernel needs to suspend all functions separately. Therefore all
- * registered functions must have drivers with suspend and resume
- * methods. Failing that the kernel simply removes the whole card.
- *
- * If already not resumed, this function turns on the traffic and
- * sends a host sleep cancel request to the firmware.
- */
-static int mwifiex_pcie_resume(struct pci_dev *pdev)
-{
- struct mwifiex_adapter *adapter;
- struct pcie_service_card *card;
-
- if (pdev) {
- card = (struct pcie_service_card *) pci_get_drvdata(pdev);
- if (!card || !card->adapter) {
- pr_err("Card or adapter structure is not valid\n");
- return 0;
- }
- } else {
- pr_err("PCIE device is not specified\n");
- return 0;
- }
-
- adapter = card->adapter;
-
- if (!adapter->is_suspended) {
- dev_warn(adapter->dev, "Device already resumed\n");
- return 0;
- }
-
- adapter->is_suspended = false;
-
- mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
- MWIFIEX_ASYNC_CMD);
-
- return 0;
-}
-
static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
@@ -1030,8 +1030,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
u32 wrindx, num_tx_buffs, rx_val;
int ret;
dma_addr_t buf_pa;
- struct mwifiex_pcie_buf_desc *desc;
- struct mwifiex_pfu_buf_desc *desc2;
+ struct mwifiex_pcie_buf_desc *desc = NULL;
+ struct mwifiex_pfu_buf_desc *desc2 = NULL;
__le16 *tmp;
if (!(skb->data && skb->len)) {
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c55c5bb9313..a2ae690a0a6 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -334,7 +334,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH);
if (!hs_activate &&
- (hscfg_param->conditions != cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) &&
+ (hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) &&
((adapter->arp_filter_size > 0) &&
(adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
dev_dbg(adapter->dev,
@@ -1059,6 +1059,80 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
+ struct mwifiex_mef_entry *mef_entry,
+ u8 **buffer)
+{
+ struct mwifiex_mef_filter *filter = mef_entry->filter;
+ int i, byte_len;
+ u8 *stack_ptr = *buffer;
+
+ for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) {
+ filter = &mef_entry->filter[i];
+ if (!filter->filt_type)
+ break;
+ *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->repeat);
+ stack_ptr += 4;
+ *stack_ptr = TYPE_DNUM;
+ stack_ptr += 1;
+
+ byte_len = filter->byte_seq[MAX_BYTESEQ];
+ memcpy(stack_ptr, filter->byte_seq, byte_len);
+ stack_ptr += byte_len;
+ *stack_ptr = byte_len;
+ stack_ptr += 1;
+ *stack_ptr = TYPE_BYTESEQ;
+ stack_ptr += 1;
+
+ *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->offset);
+ stack_ptr += 4;
+ *stack_ptr = TYPE_DNUM;
+ stack_ptr += 1;
+
+ *stack_ptr = filter->filt_type;
+ stack_ptr += 1;
+
+ if (filter->filt_action) {
+ *stack_ptr = filter->filt_action;
+ stack_ptr += 1;
+ }
+
+ if (stack_ptr - *buffer > STACK_NBYTES)
+ return -1;
+ }
+
+ *buffer = stack_ptr;
+ return 0;
+}
+
+static int
+mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_ds_mef_cfg *mef)
+{
+ struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg;
+ u8 *pos = (u8 *)mef_cfg;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG);
+
+ mef_cfg->criteria = cpu_to_le32(mef->criteria);
+ mef_cfg->num_entries = cpu_to_le16(mef->num_entries);
+ pos += sizeof(*mef_cfg);
+ mef_cfg->mef_entry->mode = mef->mef_entry->mode;
+ mef_cfg->mef_entry->action = mef->mef_entry->action;
+ pos += sizeof(*(mef_cfg->mef_entry));
+
+ if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos))
+ return -1;
+
+ mef_cfg->mef_entry->exprsize =
+ cpu_to_le16(pos - mef_cfg->mef_entry->expr);
+ cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN);
+
+ return 0;
+}
+
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1273,6 +1347,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_MEF_CFG:
+ ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4669f8d9389..80b9f223800 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -976,6 +976,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_UAP_BSS_STOP:
priv->bss_started = 0;
break;
+ case HostCmd_CMD_MEF_CFG:
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 13100f8de3d..8c943b6ebf4 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -382,7 +382,7 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
break;
}
if (hs_cfg->is_invoke_hostcmd) {
- if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) {
+ if (hs_cfg->conditions == HS_CFG_CANCEL) {
if (!adapter->is_hs_configured)
/* Already cancelled */
break;
@@ -397,8 +397,8 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
if (hs_cfg->gap)
adapter->hs_cfg.gap = (u8)hs_cfg->gap;
- } else if (adapter->hs_cfg.conditions
- == cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) {
+ } else if (adapter->hs_cfg.conditions ==
+ cpu_to_le32(HS_CFG_CANCEL)) {
/* Return failure if no parameters for HS
enable */
status = -1;
@@ -414,7 +414,7 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
HostCmd_CMD_802_11_HS_CFG_ENH,
HostCmd_ACT_GEN_SET, 0,
&adapter->hs_cfg);
- if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL)
+ if (hs_cfg->conditions == HS_CFG_CANCEL)
/* Restore previous condition */
adapter->hs_cfg.conditions =
cpu_to_le32(prev_cond);
@@ -448,7 +448,7 @@ int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type)
{
struct mwifiex_ds_hs_cfg hscfg;
- hscfg.conditions = HOST_SLEEP_CFG_CANCEL;
+ hscfg.conditions = HS_CFG_CANCEL;
hscfg.is_invoke_hostcmd = true;
return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 296faec1436..8f923d0d2ba 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -169,6 +169,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if (!status) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
+ if (priv->tx_timeout_cnt)
+ priv->tx_timeout_cnt = 0;
} else {
priv->stats.tx_errors++;
}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 21553976b55..54667e65ca4 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -195,7 +195,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
skb->protocol = eth_type_trans(skb, priv->netdev);
skb->ip_summed = CHECKSUM_NONE;
- /* This is required only in case of 11n and USB as we alloc
+ /* This is required only in case of 11n and USB/PCIE as we alloc
* a buffer of 4K only if its 11N (to be able to receive 4K
* AMSDU packets). In case of SD we allocate buffers based
* on the size of packet and hence this is not needed.
@@ -212,7 +212,8 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
* fragments. Currently we fail the Filesndl-ht.scr script
* for UDP, hence this fix
*/
- if ((priv->adapter->iface_type == MWIFIEX_USB) &&
+ if ((priv->adapter->iface_type == MWIFIEX_USB ||
+ priv->adapter->iface_type == MWIFIEX_PCIE) &&
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 091d9a64080..0640e7d7f0c 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -232,6 +232,7 @@ struct mwl8k_priv {
u16 num_mcaddrs;
u8 hw_rev;
u32 fw_rev;
+ u32 caps;
/*
* Running count of TX packets in flight, to avoid
@@ -284,6 +285,7 @@ struct mwl8k_priv {
unsigned fw_state;
char *fw_pref;
char *fw_alt;
+ bool is_8764;
struct completion firmware_loading_complete;
/* bitmap of running BSSes */
@@ -600,13 +602,18 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
loops = 1000;
do {
u32 int_code;
-
- int_code = ioread32(regs + MWL8K_HIU_INT_CODE);
- if (int_code == MWL8K_INT_CODE_CMD_FINISHED) {
- iowrite32(0, regs + MWL8K_HIU_INT_CODE);
- break;
+ if (priv->is_8764) {
+ int_code = ioread32(regs +
+ MWL8K_HIU_H2A_INTERRUPT_STATUS);
+ if (int_code == 0)
+ break;
+ } else {
+ int_code = ioread32(regs + MWL8K_HIU_INT_CODE);
+ if (int_code == MWL8K_INT_CODE_CMD_FINISHED) {
+ iowrite32(0, regs + MWL8K_HIU_INT_CODE);
+ break;
+ }
}
-
cond_resched();
udelay(1);
} while (--loops);
@@ -724,7 +731,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
int rc;
int loops;
- if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
+ if (!memcmp(fw->data, "\x01\x00\x00\x00", 4) && !priv->is_8764) {
const struct firmware *helper = priv->fw_helper;
if (helper == NULL) {
@@ -743,7 +750,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
} else {
- rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
+ if (priv->is_8764)
+ rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
+ else
+ rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
}
if (rc) {
@@ -908,9 +918,9 @@ static void mwl8k_encapsulate_tx_frame(struct mwl8k_priv *priv,
}
/*
- * Packet reception for 88w8366 AP firmware.
+ * Packet reception for 88w8366/88w8764 AP firmware.
*/
-struct mwl8k_rxd_8366_ap {
+struct mwl8k_rxd_ap {
__le16 pkt_len;
__u8 sq2;
__u8 rate;
@@ -928,30 +938,30 @@ struct mwl8k_rxd_8366_ap {
__u8 rx_ctrl;
} __packed;
-#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
-#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
-#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
+#define MWL8K_AP_RATE_INFO_MCS_FORMAT 0x80
+#define MWL8K_AP_RATE_INFO_40MHZ 0x40
+#define MWL8K_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
-#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
+#define MWL8K_AP_RX_CTRL_OWNED_BY_HOST 0x80
-/* 8366 AP rx_status bits */
-#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
-#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
-#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
-#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
-#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
+/* 8366/8764 AP rx_status bits */
+#define MWL8K_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
+#define MWL8K_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
+#define MWL8K_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
+#define MWL8K_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
+#define MWL8K_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
-static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8366_ap *rxd = _rxd;
+ struct mwl8k_rxd_ap *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_AP_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_ap_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8366_ap *rxd = _rxd;
+ struct mwl8k_rxd_ap *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -960,12 +970,12 @@ static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
- __le16 *qos, s8 *noise)
+mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
+ __le16 *qos, s8 *noise)
{
- struct mwl8k_rxd_8366_ap *rxd = _rxd;
+ struct mwl8k_rxd_ap *rxd = _rxd;
- if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_AP_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -974,11 +984,11 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
*noise = -rxd->noise_floor;
- if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
+ if (rxd->rate & MWL8K_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
- if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
+ if (rxd->rate & MWL8K_AP_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
+ status->rate_idx = MWL8K_AP_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
@@ -1002,19 +1012,19 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
*qos = rxd->qos_control;
- if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
- (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) &&
- (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
+ if ((rxd->rx_status != MWL8K_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
+ (rxd->rx_status & MWL8K_AP_RXSTAT_DECRYPT_ERR_MASK) &&
+ (rxd->rx_status & MWL8K_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
status->flag |= RX_FLAG_MMIC_ERROR;
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8366_ap_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
- .rxd_init = mwl8k_rxd_8366_ap_init,
- .rxd_refill = mwl8k_rxd_8366_ap_refill,
- .rxd_process = mwl8k_rxd_8366_ap_process,
+static struct rxd_ops rxd_ap_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_ap),
+ .rxd_init = mwl8k_rxd_ap_init,
+ .rxd_refill = mwl8k_rxd_ap_refill,
+ .rxd_process = mwl8k_rxd_ap_process,
};
/*
@@ -2401,6 +2411,9 @@ mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
{
struct mwl8k_priv *priv = hw->priv;
+ if (priv->caps)
+ return;
+
if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
mwl8k_setup_2ghz_band(hw);
if (caps & MWL8K_CAP_MIMO)
@@ -2412,6 +2425,8 @@ mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
if (caps & MWL8K_CAP_MIMO)
mwl8k_set_ht_caps(hw, &priv->band_50, caps);
}
+
+ priv->caps = caps;
}
static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
@@ -5429,12 +5444,17 @@ enum {
MWL8363 = 0,
MWL8687,
MWL8366,
+ MWL8764,
};
#define MWL8K_8366_AP_FW_API 3
#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
+#define MWL8K_8764_AP_FW_API 1
+#define _MWL8K_8764_AP_FW(api) "mwl8k/fmimage_8764_ap-" #api ".fw"
+#define MWL8K_8764_AP_FW(api) _MWL8K_8764_AP_FW(api)
+
static struct mwl8k_device_info mwl8k_info_tbl[] = {
[MWL8363] = {
.part_name = "88w8363",
@@ -5452,7 +5472,13 @@ static struct mwl8k_device_info mwl8k_info_tbl[] = {
.fw_image_sta = "mwl8k/fmimage_8366.fw",
.fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API),
.fw_api_ap = MWL8K_8366_AP_FW_API,
- .ap_rxd_ops = &rxd_8366_ap_ops,
+ .ap_rxd_ops = &rxd_ap_ops,
+ },
+ [MWL8764] = {
+ .part_name = "88w8764",
+ .fw_image_ap = MWL8K_8764_AP_FW(MWL8K_8764_AP_FW_API),
+ .fw_api_ap = MWL8K_8764_AP_FW_API,
+ .ap_rxd_ops = &rxd_ap_ops,
},
};
@@ -5474,6 +5500,7 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
{ PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, },
{ PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, },
{ PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2b36), .driver_data = MWL8764, },
{ },
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -5995,6 +6022,8 @@ static int mwl8k_probe(struct pci_dev *pdev,
priv->pdev = pdev;
priv->device_info = &mwl8k_info_tbl[id->driver_data];
+ if (id->driver_data == MWL8764)
+ priv->is_8764 = true;
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 7744f42de1e..1f9cb55c336 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1584,7 +1584,7 @@ static int ezusb_probe(struct usb_interface *interface,
struct ezusb_priv *upriv = NULL;
struct usb_interface_descriptor *iface_desc;
struct usb_endpoint_descriptor *ep;
- const struct firmware *fw_entry;
+ const struct firmware *fw_entry = NULL;
int retval = 0;
int i;
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 3109c0db66e..ebada812b3a 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -144,7 +144,7 @@ static int psm;
static char *essid;
/* Default to encapsulation unless translation requested */
-static int translate = 1;
+static bool translate = 1;
static int country = USA;
@@ -178,7 +178,7 @@ module_param(hop_dwell, int, 0);
module_param(beacon_period, int, 0);
module_param(psm, int, 0);
module_param(essid, charp, 0);
-module_param(translate, int, 0);
+module_param(translate, bool, 0);
module_param(country, int, 0);
module_param(sniffer, int, 0);
module_param(bc, int, 0);
@@ -953,7 +953,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
unsigned char *data, int len)
{
__be16 proto = ((struct ethhdr *)data)->h_proto;
- if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */
+ if (ntohs(proto) >= ETH_P_802_3_MIN) { /* DIX II ethernet frame */
pr_debug("ray_cs translate_frame DIX II\n");
/* Copy LLC header to card buffer */
memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
@@ -1353,7 +1353,7 @@ static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- translate = *(extra); /* Set framing mode */
+ translate = !!*(extra); /* Set framing mode */
return 0;
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 525fd7521df..8169a85c449 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2,7 +2,7 @@
* Driver for RNDIS based wireless USB devices.
*
* Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net>
- * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2839,8 +2839,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
- if (info != NULL)
- kfree(info);
+ kfree(info);
priv->connected = true;
memcpy(priv->bssid, bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 2bf4efa3318..ffe61d53e3f 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -169,6 +169,13 @@ config RT2800USB_RT53XX
rt2800usb driver.
Supported chips: RT5370
+config RT2800USB_RT55XX
+ bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
+ ---help---
+ This adds support for rt55xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT5572
+
config RT2800USB_UNKNOWN
bool "rt2800usb - Include support for unknown (USB) devices"
default n
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4db1088a847..a7630d5ec89 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5592 2.4G/5G 2T2R
* RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
@@ -68,6 +69,7 @@
#define RF3320 0x000b
#define RF3322 0x000c
#define RF3053 0x000d
+#define RF5592 0x000f
#define RF3290 0x3290
#define RF5360 0x5360
#define RF5370 0x5370
@@ -88,11 +90,8 @@
#define REV_RT3390E 0x0211
#define REV_RT5390F 0x0502
#define REV_RT5390R 0x1502
+#define REV_RT5592C 0x0221
-/*
- * Signal information.
- * Default offset is required for RSSI <-> dBm conversion.
- */
#define DEFAULT_RSSI_OFFSET 120
/*
@@ -690,6 +689,12 @@
#define GPIO_SWITCH_7 FIELD32(0x00000080)
/*
+ * FIXME: where the DEBUG_INDEX name come from?
+ */
+#define MAC_DEBUG_INDEX 0x05e8
+#define MAC_DEBUG_INDEX_XTAL FIELD32(0x80000000)
+
+/*
* MAC Control/Status Registers(CSR).
* Some values are set in TU, whereas 1 TU == 1024 us.
*/
@@ -1934,6 +1939,9 @@ struct mac_iveiv_entry {
#define BBP4_BANDWIDTH FIELD8(0x18)
#define BBP4_MAC_IF_CTRL FIELD8(0x40)
+/* BBP27 */
+#define BBP27_RX_CHAIN_SEL FIELD8(0x60)
+
/*
* BBP 47: Bandwidth
*/
@@ -1948,6 +1956,20 @@ struct mac_iveiv_entry {
#define BBP49_UPDATE_FLAG FIELD8(0x01)
/*
+ * BBP 105:
+ * - bit0: detect SIG on primary channel only (on 40MHz bandwidth)
+ * - bit1: FEQ (Feed Forward Compensation) for independend streams
+ * - bit2: MLD (Maximum Likehood Detection) for 2 streams (reserved on single
+ * stream)
+ * - bit4: channel estimation updates based on remodulation of
+ * L-SIG and HT-SIG symbols
+ */
+#define BBP105_DETECT_SIG_ON_PRIMARY FIELD8(0x01)
+#define BBP105_FEQ FIELD8(0x02)
+#define BBP105_MLD FIELD8(0x04)
+#define BBP105_SIG_REMODULATION FIELD8(0x08)
+
+/*
* BBP 109
*/
#define BBP109_TX0_POWER FIELD8(0x0f)
@@ -1967,6 +1989,11 @@ struct mac_iveiv_entry {
#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
/*
+ * BBP 254: unknown
+ */
+#define BBP254_BIT7 FIELD8(0x80)
+
+/*
* RFCSR registers
* The wordsize of the RFCSR is 8 bits.
*/
@@ -2022,9 +2049,18 @@ struct mac_iveiv_entry {
#define RFCSR7_BITS67 FIELD8(0xc0)
/*
+ * RFCSR 9:
+ */
+#define RFCSR9_K FIELD8(0x0f)
+#define RFCSR9_N FIELD8(0x10)
+#define RFCSR9_UNKNOWN FIELD8(0x60)
+#define RFCSR9_MOD FIELD8(0x80)
+
+/*
* RFCSR 11:
*/
#define RFCSR11_R FIELD8(0x03)
+#define RFCSR11_MOD FIELD8(0xc0)
/*
* RFCSR 12:
@@ -2130,11 +2166,13 @@ struct mac_iveiv_entry {
* RFCSR 49:
*/
#define RFCSR49_TX FIELD8(0x3f)
+#define RFCSR49_EP FIELD8(0xc0)
/*
* RFCSR 50:
*/
#define RFCSR50_TX FIELD8(0x3f)
+#define RFCSR50_EP FIELD8(0xc0)
/*
* RF registers
@@ -2497,6 +2535,61 @@ struct mac_iveiv_entry {
#define EEPROM_BBP_REG_ID FIELD16(0xff00)
/*
+ * EEPROM IQ Calibration, unlike other entries those are byte addresses.
+ */
+
+#define EEPROM_IQ_GAIN_CAL_TX0_2G 0x130
+#define EEPROM_IQ_PHASE_CAL_TX0_2G 0x131
+#define EEPROM_IQ_GROUPDELAY_CAL_TX0_2G 0x132
+#define EEPROM_IQ_GAIN_CAL_TX1_2G 0x133
+#define EEPROM_IQ_PHASE_CAL_TX1_2G 0x134
+#define EEPROM_IQ_GROUPDELAY_CAL_TX1_2G 0x135
+#define EEPROM_IQ_GAIN_CAL_RX0_2G 0x136
+#define EEPROM_IQ_PHASE_CAL_RX0_2G 0x137
+#define EEPROM_IQ_GROUPDELAY_CAL_RX0_2G 0x138
+#define EEPROM_IQ_GAIN_CAL_RX1_2G 0x139
+#define EEPROM_IQ_PHASE_CAL_RX1_2G 0x13A
+#define EEPROM_IQ_GROUPDELAY_CAL_RX1_2G 0x13B
+#define EEPROM_RF_IQ_COMPENSATION_CONTROL 0x13C
+#define EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL 0x13D
+#define EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G 0x144
+#define EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G 0x145
+#define EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G 0X146
+#define EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G 0x147
+#define EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G 0x148
+#define EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G 0x149
+#define EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G 0x14A
+#define EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G 0x14B
+#define EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G 0X14C
+#define EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G 0x14D
+#define EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G 0x14E
+#define EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G 0x14F
+#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH36_TO_CH64_5G 0x150
+#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH36_TO_CH64_5G 0x151
+#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH100_TO_CH138_5G 0x152
+#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH100_TO_CH138_5G 0x153
+#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH140_TO_CH165_5G 0x154
+#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH140_TO_CH165_5G 0x155
+#define EEPROM_IQ_GAIN_CAL_RX0_CH36_TO_CH64_5G 0x156
+#define EEPROM_IQ_PHASE_CAL_RX0_CH36_TO_CH64_5G 0x157
+#define EEPROM_IQ_GAIN_CAL_RX0_CH100_TO_CH138_5G 0X158
+#define EEPROM_IQ_PHASE_CAL_RX0_CH100_TO_CH138_5G 0x159
+#define EEPROM_IQ_GAIN_CAL_RX0_CH140_TO_CH165_5G 0x15A
+#define EEPROM_IQ_PHASE_CAL_RX0_CH140_TO_CH165_5G 0x15B
+#define EEPROM_IQ_GAIN_CAL_RX1_CH36_TO_CH64_5G 0x15C
+#define EEPROM_IQ_PHASE_CAL_RX1_CH36_TO_CH64_5G 0x15D
+#define EEPROM_IQ_GAIN_CAL_RX1_CH100_TO_CH138_5G 0X15E
+#define EEPROM_IQ_PHASE_CAL_RX1_CH100_TO_CH138_5G 0x15F
+#define EEPROM_IQ_GAIN_CAL_RX1_CH140_TO_CH165_5G 0x160
+#define EEPROM_IQ_PHASE_CAL_RX1_CH140_TO_CH165_5G 0x161
+#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH36_TO_CH64_5G 0x162
+#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH36_TO_CH64_5G 0x163
+#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH100_TO_CH138_5G 0x164
+#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH100_TO_CH138_5G 0x165
+#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH140_TO_CH165_5G 0x166
+#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH140_TO_CH165_5G 0x167
+
+/*
* MCU mailbox commands.
* MCU_SLEEP - go to power-save mode.
* arg1: 1: save as much power as possible, 0: save less power.
@@ -2535,6 +2628,8 @@ struct mac_iveiv_entry {
#define TXWI_DESC_SIZE (4 * sizeof(__le32))
#define RXWI_DESC_SIZE (4 * sizeof(__le32))
+#define TXWI_DESC_SIZE_5592 (5 * sizeof(__le32))
+#define RXWI_DESC_SIZE_5592 (6 * sizeof(__le32))
/*
* TX WI structure
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index a658b4bc7da..f08a0424fe4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -527,8 +527,10 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
*/
rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
- if (rt2x00_is_usb(rt2x00dev))
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
+ }
msleep(1);
return 0;
@@ -674,11 +676,6 @@ void rt2800_process_rxwi(struct queue_entry *entry,
* Convert descriptor AGC value to RSSI value.
*/
rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word);
-
- /*
- * Remove RXWI descriptor from start of buffer.
- */
- skb_pull(entry->skb, RXWI_DESC_SIZE);
}
EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
@@ -1988,8 +1985,21 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
}
#define POWER_BOUND 0x27
+#define POWER_BOUND_5G 0x2b
#define FREQ_OFFSET_BOUND 0x5f
+static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfcsr;
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+}
+
static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -2010,12 +2020,7 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
- else
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
- rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ rt2800_adjust_freq_offset(rt2x00dev);
if (rf->channel <= 14) {
if (rf->channel == 6)
@@ -2056,13 +2061,7 @@ static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
else
rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
- else
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
-
- rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ rt2800_adjust_freq_offset(rt2x00dev);
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -2127,12 +2126,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
- else
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
- rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ rt2800_adjust_freq_offset(rt2x00dev);
if (rf->channel <= 14) {
int idx = rf->channel-1;
@@ -2184,6 +2178,382 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
}
}
+static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr, ep_reg;
+ u32 reg;
+ int power_bound;
+
+ /* TODO */
+ const bool is_11b = false;
+ const bool is_type_ep = false;
+
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL,
+ (rf->channel > 14 || conf_is_ht40(conf)) ? 5 : 0);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+ /* Order of values on rf_channel entry: N, K, mod, R */
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1 & 0xff);
+
+ rt2800_rfcsr_read(rt2x00dev, 9, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR9_K, rf->rf2 & 0xf);
+ rt2x00_set_field8(&rfcsr, RFCSR9_N, (rf->rf1 & 0x100) >> 8);
+ rt2x00_set_field8(&rfcsr, RFCSR9_MOD, ((rf->rf3 - 8) & 0x4) >> 2);
+ rt2800_rfcsr_write(rt2x00dev, 9, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf4 - 1);
+ rt2x00_set_field8(&rfcsr, RFCSR11_MOD, (rf->rf3 - 8) & 0x3);
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ if (rf->channel <= 14) {
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x90);
+ /* FIXME: RF11 owerwrite ? */
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4A);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x52);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x4A);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1B);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0D);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0x9B);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xD5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x72);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0E);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xA2);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x6B);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x3E);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xA1);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
+
+ /* TODO RF27 <- tssi */
+
+ rfcsr = rf->channel <= 10 ? 0x07 : 0x06;
+ rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
+ rt2800_rfcsr_write(rt2x00dev, 59, rfcsr);
+
+ if (is_11b) {
+ /* CCK */
+ rt2800_rfcsr_write(rt2x00dev, 31, 0xF8);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0xC0);
+ if (is_type_ep)
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x06);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x47);
+ } else {
+ /* OFDM */
+ if (is_type_ep)
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x03);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+ }
+
+ power_bound = POWER_BOUND;
+ ep_reg = 0x2;
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x97);
+ /* FIMXE: RF11 overwrite */
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0xBF);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x04);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xBB);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xD7);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0x41);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x77);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x01);
+
+ /* TODO RF27 <- tssi */
+
+ if (rf->channel >= 36 && rf->channel <= 64) {
+
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x2E);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x22);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x60);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x7F);
+ if (rf->channel <= 50)
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x09);
+ else if (rf->channel >= 52)
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1C);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x5B);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0X40);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0X00);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0xFE);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x0C);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0xF8);
+ if (rf->channel <= 50) {
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x06),
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xD3);
+ } else if (rf->channel >= 52) {
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x04);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
+ }
+
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x7F);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
+
+ } else if (rf->channel >= 100 && rf->channel <= 165) {
+
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x0E);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
+ if (rf->channel <= 153) {
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x3C);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x06);
+ } else if (rf->channel >= 155) {
+ rt2800_rfcsr_write(rt2x00dev, 23, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x05);
+ }
+ if (rf->channel <= 138) {
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1A);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x3B);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x18);
+ } else if (rf->channel >= 140) {
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x1B);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0X08);
+ }
+ if (rf->channel <= 124)
+ rt2800_rfcsr_write(rt2x00dev, 51, 0xFC);
+ else if (rf->channel >= 126)
+ rt2800_rfcsr_write(rt2x00dev, 51, 0xEC);
+ if (rf->channel <= 138)
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
+ else if (rf->channel >= 140)
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0xEB);
+ if (rf->channel <= 138)
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x01);
+ else if (rf->channel >= 140)
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x00);
+ if (rf->channel <= 128)
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
+ else if (rf->channel >= 130)
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xAB);
+ if (rf->channel <= 116)
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x1D);
+ else if (rf->channel >= 118)
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
+ if (rf->channel <= 138)
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x3F);
+ else if (rf->channel >= 140)
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x7C);
+ if (rf->channel <= 116)
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x1D);
+ else if (rf->channel >= 118)
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
+ }
+
+ power_bound = POWER_BOUND_5G;
+ ep_reg = 0x3;
+ }
+
+ rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ if (info->default_power1 > power_bound)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, power_bound);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
+ if (is_type_ep)
+ rt2x00_set_field8(&rfcsr, RFCSR49_EP, ep_reg);
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ if (info->default_power1 > power_bound)
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
+ if (is_type_ep)
+ rt2x00_set_field8(&rfcsr, RFCSR50_EP, ep_reg);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD,
+ rt2x00dev->default_ant.tx_chain_num >= 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
+ rt2x00dev->default_ant.tx_chain_num == 2);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD,
+ rt2x00dev->default_ant.rx_chain_num >= 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
+ rt2x00dev->default_ant.rx_chain_num == 2);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xe4);
+
+ if (conf_is_ht40(conf))
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x16);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+
+ if (!is_11b) {
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ }
+
+ /* TODO proper frequency adjustment */
+ rt2800_adjust_freq_offset(rt2x00dev);
+
+ /* TODO merge with others */
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+
+ /* BBP settings */
+ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+
+ rt2800_bbp_write(rt2x00dev, 79, (rf->channel <= 14) ? 0x1C : 0x18);
+ rt2800_bbp_write(rt2x00dev, 80, (rf->channel <= 14) ? 0x0E : 0x08);
+ rt2800_bbp_write(rt2x00dev, 81, (rf->channel <= 14) ? 0x3A : 0x38);
+ rt2800_bbp_write(rt2x00dev, 82, (rf->channel <= 14) ? 0x62 : 0x92);
+
+ /* GLRT band configuration */
+ rt2800_bbp_write(rt2x00dev, 195, 128);
+ rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0xE0 : 0xF0);
+ rt2800_bbp_write(rt2x00dev, 195, 129);
+ rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x1F : 0x1E);
+ rt2800_bbp_write(rt2x00dev, 195, 130);
+ rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x38 : 0x28);
+ rt2800_bbp_write(rt2x00dev, 195, 131);
+ rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x32 : 0x20);
+ rt2800_bbp_write(rt2x00dev, 195, 133);
+ rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x28 : 0x7F);
+ rt2800_bbp_write(rt2x00dev, 195, 124);
+ rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F);
+}
+
+static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev,
+ const unsigned int word,
+ const u8 value)
+{
+ u8 chain, reg;
+
+ for (chain = 0; chain < rt2x00dev->default_ant.rx_chain_num; chain++) {
+ rt2800_bbp_read(rt2x00dev, 27, &reg);
+ rt2x00_set_field8(&reg, BBP27_RX_CHAIN_SEL, chain);
+ rt2800_bbp_write(rt2x00dev, 27, reg);
+
+ rt2800_bbp_write(rt2x00dev, word, value);
+ }
+}
+
+static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
+{
+ u8 cal;
+
+ /* TX0 IQ Gain */
+ rt2800_bbp_write(rt2x00dev, 158, 0x2c);
+ if (channel <= 14)
+ cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_2G);
+ else if (channel >= 36 && channel <= 64)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G);
+ else if (channel >= 100 && channel <= 138)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G);
+ else if (channel >= 140 && channel <= 165)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G);
+ else
+ cal = 0;
+ rt2800_bbp_write(rt2x00dev, 159, cal);
+
+ /* TX0 IQ Phase */
+ rt2800_bbp_write(rt2x00dev, 158, 0x2d);
+ if (channel <= 14)
+ cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_2G);
+ else if (channel >= 36 && channel <= 64)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G);
+ else if (channel >= 100 && channel <= 138)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G);
+ else if (channel >= 140 && channel <= 165)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G);
+ else
+ cal = 0;
+ rt2800_bbp_write(rt2x00dev, 159, cal);
+
+ /* TX1 IQ Gain */
+ rt2800_bbp_write(rt2x00dev, 158, 0x4a);
+ if (channel <= 14)
+ cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_2G);
+ else if (channel >= 36 && channel <= 64)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G);
+ else if (channel >= 100 && channel <= 138)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G);
+ else if (channel >= 140 && channel <= 165)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G);
+ else
+ cal = 0;
+ rt2800_bbp_write(rt2x00dev, 159, cal);
+
+ /* TX1 IQ Phase */
+ rt2800_bbp_write(rt2x00dev, 158, 0x4b);
+ if (channel <= 14)
+ cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_2G);
+ else if (channel >= 36 && channel <= 64)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G);
+ else if (channel >= 100 && channel <= 138)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G);
+ else if (channel >= 140 && channel <= 165)
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G);
+ else
+ cal = 0;
+ rt2800_bbp_write(rt2x00dev, 159, cal);
+
+ /* FIXME: possible RX0, RX1 callibration ? */
+
+ /* RF IQ compensation control */
+ rt2800_bbp_write(rt2x00dev, 158, 0x04);
+ cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_RF_IQ_COMPENSATION_CONTROL);
+ rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
+
+ /* RF IQ imbalance compensation control */
+ rt2800_bbp_write(rt2x00dev, 158, 0x03);
+ cal = rt2x00_eeprom_byte(rt2x00dev,
+ EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL);
+ rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
+}
+
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -2225,6 +2595,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF5392:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
break;
+ case RF5592:
+ rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info);
+ break;
default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
@@ -2326,6 +2699,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+ if (rt2x00_rt(rt2x00dev, RT5592)) {
+ rt2800_bbp_write(rt2x00dev, 195, 141);
+ rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
+
+ /* AGC init */
+ reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain;
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+
+ rt2800_iq_calibrate(rt2x00dev, rf->channel);
+ }
+
rt2800_bbp_read(rt2x00dev, 4, &bbp);
rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
rt2800_bbp_write(rt2x00dev, 4, bbp);
@@ -2938,13 +3322,16 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x1c + (2 * rt2x00dev->lna_gain);
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
if (rt2x00_rt(rt2x00dev, RT3572))
vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+ else if (rt2x00_rt(rt2x00dev, RT5592))
+ vgc = 0x24 + (2 * rt2x00dev->lna_gain);
else {
if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
@@ -2960,7 +3347,11 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
struct link_qual *qual, u8 vgc_level)
{
if (qual->vgc_level != vgc_level) {
- rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+ if (rt2x00_rt(rt2x00dev, RT5592)) {
+ rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
+ } else
+ rt2800_bbp_write(rt2x00dev, 66, vgc_level);
qual->vgc_level = vgc_level;
qual->vgc_level_reg = vgc_level;
}
@@ -2975,15 +3366,23 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
+ u8 vgc;
+
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
return;
-
/*
- * When RSSI is better then -80 increase VGC level with 0x10
+ * When RSSI is better then -80 increase VGC level with 0x10, except
+ * for rt5592 chip.
*/
- rt2800_set_vgc(rt2x00dev, qual,
- rt2800_get_default_vgc(rt2x00dev) +
- ((qual->rssi > -80) * 0x10));
+
+ vgc = rt2800_get_default_vgc(rt2x00dev);
+
+ if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
+ vgc += 0x20;
+ else if (qual->rssi > -80)
+ vgc += 0x10;
+
+ rt2800_set_vgc(rt2x00dev, qual, vgc);
}
EXPORT_SYMBOL_GPL(rt2800_link_tuner);
@@ -3122,7 +3521,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3302,7 +3702,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0);
rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
- rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
+ reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002;
+ rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
@@ -3487,6 +3888,136 @@ static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
return -EACCES;
}
+static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev)
+{
+ u8 value;
+
+ rt2800_bbp_read(rt2x00dev, 4, &value);
+ rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
+ rt2800_bbp_write(rt2x00dev, 4, value);
+}
+
+static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 142, 1);
+ rt2800_bbp_write(rt2x00dev, 143, 57);
+}
+
+static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
+{
+ const u8 glrt_table[] = {
+ 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
+ 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
+ 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
+ 0X3C, 0x34, 0x2C, 0x2F, 0x3C, 0x35, 0x2E, 0x2A, 0x49, 0x41, /* 158 ~ 167 */
+ 0x36, 0x31, 0x30, 0x30, 0x0E, 0x0D, 0x28, 0x21, 0x1C, 0x16, /* 168 ~ 177 */
+ 0x50, 0x4A, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, /* 178 ~ 187 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 ~ 197 */
+ 0x00, 0x00, 0x7D, 0x14, 0x32, 0x2C, 0x36, 0x4C, 0x43, 0x2C, /* 198 ~ 207 */
+ 0x2E, 0x36, 0x30, 0x6E, /* 208 ~ 211 */
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(glrt_table); i++) {
+ rt2800_bbp_write(rt2x00dev, 195, 128 + i);
+ rt2800_bbp_write(rt2x00dev, 196, glrt_table[i]);
+ }
+};
+
+static void rt2800_init_bbb_early(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_bbp_write(rt2x00dev, 65, 0x2C);
+ rt2800_bbp_write(rt2x00dev, 66, 0x38);
+ rt2800_bbp_write(rt2x00dev, 68, 0x0B);
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+ rt2800_bbp_write(rt2x00dev, 81, 0x37);
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+ rt2800_bbp_write(rt2x00dev, 83, 0x6A);
+ rt2800_bbp_write(rt2x00dev, 84, 0x99);
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+ rt2800_bbp_write(rt2x00dev, 92, 0x00);
+ rt2800_bbp_write(rt2x00dev, 103, 0x00);
+ rt2800_bbp_write(rt2x00dev, 105, 0x05);
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+}
+
+static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
+{
+ int ant, div_mode;
+ u16 eeprom;
+ u8 value;
+
+ rt2800_init_bbb_early(rt2x00dev);
+
+ rt2800_bbp_read(rt2x00dev, 105, &value);
+ rt2x00_set_field8(&value, BBP105_MLD,
+ rt2x00dev->default_ant.rx_chain_num == 2);
+ rt2800_bbp_write(rt2x00dev, 105, value);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 20, 0x06);
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+ rt2800_bbp_write(rt2x00dev, 65, 0x2C);
+ rt2800_bbp_write(rt2x00dev, 68, 0xDD);
+ rt2800_bbp_write(rt2x00dev, 69, 0x1A);
+ rt2800_bbp_write(rt2x00dev, 70, 0x05);
+ rt2800_bbp_write(rt2x00dev, 73, 0x13);
+ rt2800_bbp_write(rt2x00dev, 74, 0x0F);
+ rt2800_bbp_write(rt2x00dev, 75, 0x4F);
+ rt2800_bbp_write(rt2x00dev, 76, 0x28);
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
+ rt2800_bbp_write(rt2x00dev, 84, 0x9A);
+ rt2800_bbp_write(rt2x00dev, 86, 0x38);
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+ rt2800_bbp_write(rt2x00dev, 91, 0x04);
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+ rt2800_bbp_write(rt2x00dev, 95, 0x9a);
+ rt2800_bbp_write(rt2x00dev, 98, 0x12);
+ rt2800_bbp_write(rt2x00dev, 103, 0xC0);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ /* FIXME BBP105 owerwrite */
+ rt2800_bbp_write(rt2x00dev, 105, 0x3C);
+ rt2800_bbp_write(rt2x00dev, 106, 0x35);
+ rt2800_bbp_write(rt2x00dev, 128, 0x12);
+ rt2800_bbp_write(rt2x00dev, 134, 0xD0);
+ rt2800_bbp_write(rt2x00dev, 135, 0xF6);
+ rt2800_bbp_write(rt2x00dev, 137, 0x0F);
+
+ /* Initialize GLRT (Generalized Likehood Radio Test) */
+ rt2800_init_bbp_5592_glrt(rt2x00dev);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
+ ant = (div_mode == 3) ? 1 : 0;
+ rt2800_bbp_read(rt2x00dev, 152, &value);
+ if (ant == 0) {
+ /* Main antenna */
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
+ } else {
+ /* Auxiliary antenna */
+ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
+ }
+ rt2800_bbp_write(rt2x00dev, 152, value);
+
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
+ rt2800_bbp_read(rt2x00dev, 254, &value);
+ rt2x00_set_field8(&value, BBP254_BIT7, 1);
+ rt2800_bbp_write(rt2x00dev, 254, value);
+ }
+
+ rt2800_init_freq_calibration(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+}
+
static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
@@ -3498,6 +4029,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
+ if (rt2x00_rt(rt2x00dev, RT5592)) {
+ rt2800_init_bbp_5592(rt2x00dev);
+ return 0;
+ }
+
if (rt2x00_rt(rt2x00dev, RT3352)) {
rt2800_bbp_write(rt2x00dev, 3, 0x00);
rt2800_bbp_write(rt2x00dev, 4, 0x50);
@@ -3505,11 +4041,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
- rt2800_bbp_read(rt2x00dev, 4, &value);
- rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
- rt2800_bbp_write(rt2x00dev, 4, value);
- }
+ rt2x00_rt(rt2x00dev, RT5392))
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
if (rt2800_is_305x_soc(rt2x00dev) ||
rt2x00_rt(rt2x00dev, RT3290) ||
@@ -3783,9 +4316,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
rt2800_bbp_write(rt2x00dev, 152, value);
- /* Init frequency calibration */
- rt2800_bbp_write(rt2x00dev, 142, 1);
- rt2800_bbp_write(rt2x00dev, 143, 57);
+ rt2800_init_freq_calibration(rt2x00dev);
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -4259,6 +4790,69 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
}
+static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
+{
+ u8 reg;
+ u16 eeprom;
+
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x3F);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xE4);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x4D);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x8D);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x0C);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x22);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
+
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ msleep(1);
+
+ rt2800_adjust_freq_offset(rt2x00dev);
+
+ rt2800_bbp_read(rt2x00dev, 138, &reg);
+
+ /* Turn off unused DAC1 and ADC1 to reduce power consumption */
+ rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
+ rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0);
+ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
+ rt2x00_set_field8(&reg, BBP138_TX_DAC1, 1);
+
+ rt2800_bbp_write(rt2x00dev, 138, reg);
+
+ /* Enable DC filter */
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+
+ rt2800_rfcsr_read(rt2x00dev, 38, &reg);
+ rt2x00_set_field8(&reg, RFCSR38_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 38, reg);
+
+ rt2800_rfcsr_read(rt2x00dev, 39, &reg);
+ rt2x00_set_field8(&reg, RFCSR39_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 39, reg);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &reg);
+ rt2x00_set_field8(&reg, RFCSR30_RX_VCM, 2);
+ rt2800_rfcsr_write(rt2x00dev, 30, reg);
+}
+
static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
@@ -4276,6 +4870,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
!rt2x00_rt(rt2x00dev, RT5392) &&
+ !rt2x00_rt(rt2x00dev, RT5392) &&
+ !rt2x00_rt(rt2x00dev, RT5592) &&
!rt2800_is_305x_soc(rt2x00dev))
return 0;
@@ -4330,6 +4926,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
case RT5392:
rt2800_init_rfcsr_5392(rt2x00dev);
break;
+ case RT5592:
+ rt2800_init_rfcsr_5592(rt2x00dev);
+ return 0;
}
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -4427,7 +5026,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
- rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
+ rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2x00_rt_rev_lt(rt2x00dev, RT5592, REV_RT5592C))
rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
@@ -4451,7 +5051,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
}
- if (rt2x00_rt(rt2x00dev, RT3090)) {
+ if (rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_read(rt2x00dev, 138, &bbp);
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
@@ -4507,7 +5108,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
}
if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
@@ -4533,15 +5135,23 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
* Initialize all registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
- rt2800_init_registers(rt2x00dev) ||
- rt2800_init_bbp(rt2x00dev) ||
- rt2800_init_rfcsr(rt2x00dev)))
+ rt2800_init_registers(rt2x00dev)))
return -EIO;
/*
* Send signal to firmware during boot time.
*/
- rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
+ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
+ rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
+ if (rt2x00_is_usb(rt2x00dev)) {
+ rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
+ rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
+ }
+ msleep(1);
+
+ if (unlikely(rt2800_init_bbp(rt2x00dev) ||
+ rt2800_init_rfcsr(rt2x00dev)))
+ return -EIO;
if (rt2x00_is_usb(rt2x00dev) &&
(rt2x00_rt(rt2x00dev, RT3070) ||
@@ -4863,6 +5473,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RT3572:
case RT5390:
case RT5392:
+ case RT5592:
break;
default:
ERROR(rt2x00dev, "Invalid RT chipset 0x%04x detected.\n", rt2x00dev->chip.rt);
@@ -4887,6 +5498,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF5372:
case RF5390:
case RF5392:
+ case RF5592:
break;
default:
ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n",
@@ -5122,6 +5734,138 @@ static const struct rf_channel rf_vals_3x[] = {
{173, 0x61, 0, 9},
};
+static const struct rf_channel rf_vals_5592_xtal20[] = {
+ /* Channel, N, K, mod, R */
+ {1, 482, 4, 10, 3},
+ {2, 483, 4, 10, 3},
+ {3, 484, 4, 10, 3},
+ {4, 485, 4, 10, 3},
+ {5, 486, 4, 10, 3},
+ {6, 487, 4, 10, 3},
+ {7, 488, 4, 10, 3},
+ {8, 489, 4, 10, 3},
+ {9, 490, 4, 10, 3},
+ {10, 491, 4, 10, 3},
+ {11, 492, 4, 10, 3},
+ {12, 493, 4, 10, 3},
+ {13, 494, 4, 10, 3},
+ {14, 496, 8, 10, 3},
+ {36, 172, 8, 12, 1},
+ {38, 173, 0, 12, 1},
+ {40, 173, 4, 12, 1},
+ {42, 173, 8, 12, 1},
+ {44, 174, 0, 12, 1},
+ {46, 174, 4, 12, 1},
+ {48, 174, 8, 12, 1},
+ {50, 175, 0, 12, 1},
+ {52, 175, 4, 12, 1},
+ {54, 175, 8, 12, 1},
+ {56, 176, 0, 12, 1},
+ {58, 176, 4, 12, 1},
+ {60, 176, 8, 12, 1},
+ {62, 177, 0, 12, 1},
+ {64, 177, 4, 12, 1},
+ {100, 183, 4, 12, 1},
+ {102, 183, 8, 12, 1},
+ {104, 184, 0, 12, 1},
+ {106, 184, 4, 12, 1},
+ {108, 184, 8, 12, 1},
+ {110, 185, 0, 12, 1},
+ {112, 185, 4, 12, 1},
+ {114, 185, 8, 12, 1},
+ {116, 186, 0, 12, 1},
+ {118, 186, 4, 12, 1},
+ {120, 186, 8, 12, 1},
+ {122, 187, 0, 12, 1},
+ {124, 187, 4, 12, 1},
+ {126, 187, 8, 12, 1},
+ {128, 188, 0, 12, 1},
+ {130, 188, 4, 12, 1},
+ {132, 188, 8, 12, 1},
+ {134, 189, 0, 12, 1},
+ {136, 189, 4, 12, 1},
+ {138, 189, 8, 12, 1},
+ {140, 190, 0, 12, 1},
+ {149, 191, 6, 12, 1},
+ {151, 191, 10, 12, 1},
+ {153, 192, 2, 12, 1},
+ {155, 192, 6, 12, 1},
+ {157, 192, 10, 12, 1},
+ {159, 193, 2, 12, 1},
+ {161, 193, 6, 12, 1},
+ {165, 194, 2, 12, 1},
+ {184, 164, 0, 12, 1},
+ {188, 164, 4, 12, 1},
+ {192, 165, 8, 12, 1},
+ {196, 166, 0, 12, 1},
+};
+
+static const struct rf_channel rf_vals_5592_xtal40[] = {
+ /* Channel, N, K, mod, R */
+ {1, 241, 2, 10, 3},
+ {2, 241, 7, 10, 3},
+ {3, 242, 2, 10, 3},
+ {4, 242, 7, 10, 3},
+ {5, 243, 2, 10, 3},
+ {6, 243, 7, 10, 3},
+ {7, 244, 2, 10, 3},
+ {8, 244, 7, 10, 3},
+ {9, 245, 2, 10, 3},
+ {10, 245, 7, 10, 3},
+ {11, 246, 2, 10, 3},
+ {12, 246, 7, 10, 3},
+ {13, 247, 2, 10, 3},
+ {14, 248, 4, 10, 3},
+ {36, 86, 4, 12, 1},
+ {38, 86, 6, 12, 1},
+ {40, 86, 8, 12, 1},
+ {42, 86, 10, 12, 1},
+ {44, 87, 0, 12, 1},
+ {46, 87, 2, 12, 1},
+ {48, 87, 4, 12, 1},
+ {50, 87, 6, 12, 1},
+ {52, 87, 8, 12, 1},
+ {54, 87, 10, 12, 1},
+ {56, 88, 0, 12, 1},
+ {58, 88, 2, 12, 1},
+ {60, 88, 4, 12, 1},
+ {62, 88, 6, 12, 1},
+ {64, 88, 8, 12, 1},
+ {100, 91, 8, 12, 1},
+ {102, 91, 10, 12, 1},
+ {104, 92, 0, 12, 1},
+ {106, 92, 2, 12, 1},
+ {108, 92, 4, 12, 1},
+ {110, 92, 6, 12, 1},
+ {112, 92, 8, 12, 1},
+ {114, 92, 10, 12, 1},
+ {116, 93, 0, 12, 1},
+ {118, 93, 2, 12, 1},
+ {120, 93, 4, 12, 1},
+ {122, 93, 6, 12, 1},
+ {124, 93, 8, 12, 1},
+ {126, 93, 10, 12, 1},
+ {128, 94, 0, 12, 1},
+ {130, 94, 2, 12, 1},
+ {132, 94, 4, 12, 1},
+ {134, 94, 6, 12, 1},
+ {136, 94, 8, 12, 1},
+ {138, 94, 10, 12, 1},
+ {140, 95, 0, 12, 1},
+ {149, 95, 9, 12, 1},
+ {151, 95, 11, 12, 1},
+ {153, 96, 1, 12, 1},
+ {155, 96, 3, 12, 1},
+ {157, 96, 5, 12, 1},
+ {159, 96, 7, 12, 1},
+ {161, 96, 9, 12, 1},
+ {165, 97, 1, 12, 1},
+ {184, 82, 0, 12, 1},
+ {188, 82, 4, 12, 1},
+ {192, 82, 8, 12, 1},
+ {196, 83, 0, 12, 1},
+};
+
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -5130,6 +5874,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
char *default_power2;
unsigned int i;
u16 eeprom;
+ u32 reg;
/*
* Disable powersaving as default on PCI devices.
@@ -5211,8 +5956,22 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
+ } else if (rt2x00_rf(rt2x00dev, RF5592)) {
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+
+ rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
+ if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
+ spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
+ spec->channels = rf_vals_5592_xtal40;
+ } else {
+ spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
+ spec->channels = rf_vals_5592_xtal20;
+ }
}
+ if (WARN_ON_ONCE(!spec->channels))
+ return -ENODEV;
+
/*
* Initialize HT information.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index ded73da4de0..f732ded8f1b 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -729,6 +729,11 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
* Process the RXWI structure that is at the start of the buffer.
*/
rt2800_process_rxwi(entry, rxdesc);
+
+ /*
+ * Remove RXWI descriptor from start of buffer.
+ */
+ skb_pull(entry->skb, RXWI_DESC_SIZE);
}
/*
@@ -742,10 +747,90 @@ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
+static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
+{
+ __le32 *txwi;
+ u32 word;
+ int wcid, tx_wcid;
+
+ wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+
+ txwi = rt2800_drv_get_txwi(entry);
+ rt2x00_desc_read(txwi, 1, &word);
+ tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+
+ return (tx_wcid == wcid);
+}
+
+static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * rt2800pci hardware might reorder frames when exchanging traffic
+ * with multiple BA enabled STAs.
+ *
+ * For example, a tx queue
+ * [ STA1 | STA2 | STA1 | STA2 ]
+ * can result in tx status reports
+ * [ STA1 | STA1 | STA2 | STA2 ]
+ * when the hw decides to aggregate the frames for STA1 into one AMPDU.
+ *
+ * To mitigate this effect, associate the tx status to the first frame
+ * in the tx queue with a matching wcid.
+ */
+ if (rt2800pci_txdone_entry_check(entry, status) &&
+ !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+
+static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
+{
+ u32 status = *(u32 *)data;
+
+ /*
+ * Find the first frame without tx status and assign this status to it
+ * regardless if it matches or not.
+ */
+ if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ /*
+ * Got a matching frame, associate the tx status with
+ * the frame
+ */
+ entry->status = status;
+ set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+ return true;
+ }
+
+ /* Check the next frame */
+ return false;
+}
+static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
+ void *data)
+{
+ if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+ rt2800_txdone_entry(entry, entry->status,
+ rt2800pci_get_txwi(entry));
+ return false;
+ }
+
+ /* No more frames to release */
+ return true;
+}
+
static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
- struct queue_entry *entry;
u32 status;
u8 qid;
int max_tx_done = 16;
@@ -783,8 +868,33 @@ static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
}
- entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
- rt2800_txdone_entry(entry, status, rt2800pci_get_txwi(entry));
+ /*
+ * Let's associate this tx status with the first
+ * matching frame.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800pci_txdone_find_entry)) {
+ /*
+ * We cannot match the tx status to any frame, so just
+ * use the first one.
+ */
+ if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, &status,
+ rt2800pci_txdone_match_first)) {
+ WARNING(rt2x00dev, "No frame found for TX "
+ "status on queue %u, dropping\n",
+ qid);
+ break;
+ }
+ }
+
+ /*
+ * Release all frames with a valid tx status.
+ */
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+ Q_INDEX, NULL,
+ rt2800pci_txdone_release_entries);
if (--max_tx_done == 0)
break;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 098613ed93f..f3228200914 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -485,7 +485,7 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
*/
skbdesc->flags |= SKBDESC_DESC_IN_SKB;
skbdesc->desc = txi;
- skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
+ skbdesc->desc_len = entry->queue->desc_size;
}
/*
@@ -730,6 +730,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
* Process the RXWI structure.
*/
rt2800_process_rxwi(entry, rxdesc);
+
+ /*
+ * Remove RXWI descriptor from start of buffer.
+ */
+ skb_pull(entry->skb, entry->queue->desc_size - RXINFO_DESC_SIZE);
}
/*
@@ -890,6 +895,47 @@ static const struct rt2x00_ops rt2800usb_ops = {
#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
};
+static const struct data_queue_desc rt2800usb_queue_rx_5592 = {
+ .entry_num = 128,
+ .data_size = AGGREGATION_SIZE,
+ .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE_5592,
+ .priv_size = sizeof(struct queue_entry_priv_usb),
+};
+
+static const struct data_queue_desc rt2800usb_queue_tx_5592 = {
+ .entry_num = 16,
+ .data_size = AGGREGATION_SIZE,
+ .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
+ .priv_size = sizeof(struct queue_entry_priv_usb),
+};
+
+static const struct data_queue_desc rt2800usb_queue_bcn_5592 = {
+ .entry_num = 8,
+ .data_size = MGMT_FRAME_SIZE,
+ .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
+ .priv_size = sizeof(struct queue_entry_priv_usb),
+};
+
+
+static const struct rt2x00_ops rt2800usb_ops_5592 = {
+ .name = KBUILD_MODNAME,
+ .drv_data_size = sizeof(struct rt2800_drv_data),
+ .max_ap_intf = 8,
+ .eeprom_size = EEPROM_SIZE,
+ .rf_size = RF_SIZE,
+ .tx_queues = NUM_TX_QUEUES,
+ .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
+ .rx = &rt2800usb_queue_rx_5592,
+ .tx = &rt2800usb_queue_tx_5592,
+ .bcn = &rt2800usb_queue_bcn_5592,
+ .lib = &rt2800usb_rt2x00_ops,
+ .drv = &rt2800usb_rt2800_ops,
+ .hw = &rt2800usb_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+ .debugfs = &rt2800_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
/*
* rt2800usb module information.
*/
@@ -1200,6 +1246,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x148f, 0x5370) },
{ USB_DEVICE(0x148f, 0x5372) },
#endif
+#ifdef CONFIG_RT2800USB_RT55XX
+ /* Arcadyan */
+ { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 },
+ /* AVM GmbH */
+ { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 },
+ /* D-Link DWA-160-B2 */
+ { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 },
+ /* Proware */
+ { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 },
+#endif
#ifdef CONFIG_RT2800USB_UNKNOWN
/*
* Unclear what kind of devices these are (they aren't supported by the
@@ -1303,6 +1361,9 @@ MODULE_LICENSE("GPL");
static int rt2800usb_probe(struct usb_interface *usb_intf,
const struct usb_device_id *id)
{
+ if (id->driver_info == 5592)
+ return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592);
+
return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 086abb403a4..51922cc179d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -193,6 +193,7 @@ struct rt2x00_chip {
#define RT3883 0x3883 /* WSOC */
#define RT5390 0x5390 /* 2.4GHz */
#define RT5392 0x5392 /* 2.4GHz */
+#define RT5592 0x5592
u16 rf;
u16 rev;
@@ -1064,8 +1065,7 @@ static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
}
/*
- * Generic EEPROM access.
- * The EEPROM is being accessed by word index.
+ * Generic EEPROM access. The EEPROM is being accessed by word or byte index.
*/
static inline void *rt2x00_eeprom_addr(struct rt2x00_dev *rt2x00dev,
const unsigned int word)
@@ -1085,6 +1085,12 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
rt2x00dev->eeprom[word] = cpu_to_le16(data);
}
+static inline u8 rt2x00_eeprom_byte(struct rt2x00_dev *rt2x00dev,
+ const unsigned int byte)
+{
+ return *(((u8 *)rt2x00dev->eeprom) + byte);
+}
+
/*
* Chipset handlers
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index a0c8caef3b0..c4009eaeb69 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -52,8 +52,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
udelay(REGISTER_BUSY_DELAY);
}
- ERROR(rt2x00dev, "Indirect register access failed: "
- "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
+ printk_once(KERN_ERR "%s() Indirect register access failed: "
+ "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
*reg = ~0;
return 0;
@@ -124,12 +124,10 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
*/
addr = dma_alloc_coherent(rt2x00dev->dev,
queue->limit * queue->desc_size,
- &dma, GFP_KERNEL);
+ &dma, GFP_KERNEL | __GFP_ZERO);
if (!addr)
return -ENOMEM;
- memset(addr, 0, queue->limit * queue->desc_size);
-
/*
* Initialize all queue entries to contain valid addresses.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4d91795dc6a..952a0490eb1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -832,7 +832,9 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- bool (*fn)(struct queue_entry *entry))
+ void *data,
+ bool (*fn)(struct queue_entry *entry,
+ void *data))
{
unsigned long irqflags;
unsigned int index_start;
@@ -863,17 +865,17 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
*/
if (index_start < index_end) {
for (i = index_start; i < index_end; i++) {
- if (fn(&queue->entries[i]))
+ if (fn(&queue->entries[i], data))
return true;
}
} else {
for (i = index_start; i < queue->limit; i++) {
- if (fn(&queue->entries[i]))
+ if (fn(&queue->entries[i], data))
return true;
}
for (i = 0; i < index_end; i++) {
- if (fn(&queue->entries[i]))
+ if (fn(&queue->entries[i], data))
return true;
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 9b8c10a86de..3d0137193da 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -359,6 +359,7 @@ enum queue_entry_flags {
ENTRY_DATA_PENDING,
ENTRY_DATA_IO_FAILED,
ENTRY_DATA_STATUS_PENDING,
+ ENTRY_DATA_STATUS_SET,
};
/**
@@ -372,6 +373,7 @@ enum queue_entry_flags {
* @entry_idx: The entry index number.
* @priv_data: Private data belonging to this queue entry. The pointer
* points to data specific to a particular driver and queue type.
+ * @status: Device specific status
*/
struct queue_entry {
unsigned long flags;
@@ -383,6 +385,8 @@ struct queue_entry {
unsigned int entry_idx;
+ u32 status;
+
void *priv_data;
};
@@ -584,6 +588,7 @@ struct data_queue_desc {
* @queue: Pointer to @data_queue
* @start: &enum queue_index Pointer to start index
* @end: &enum queue_index Pointer to end index
+ * @data: Data to pass to the callback function
* @fn: The function to call for each &struct queue_entry
*
* This will walk through all entries in the queue, in chronological
@@ -596,7 +601,9 @@ struct data_queue_desc {
bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- bool (*fn)(struct queue_entry *entry));
+ void *data,
+ bool (*fn)(struct queue_entry *entry,
+ void *data));
/**
* rt2x00queue_empty - Check if the queue is empty.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 40ea80725a9..5e50d4ff9d2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -285,7 +285,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
}
-static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry)
+static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -390,7 +390,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
-static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry)
+static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -427,12 +427,18 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
case QID_AC_BE:
case QID_AC_BK:
if (!rt2x00queue_empty(queue))
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
+ rt2x00queue_for_each_entry(queue,
+ Q_INDEX_DONE,
+ Q_INDEX,
+ NULL,
rt2x00usb_kick_tx_entry);
break;
case QID_RX:
if (!rt2x00queue_full(queue))
- rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE,
+ rt2x00queue_for_each_entry(queue,
+ Q_INDEX,
+ Q_INDEX_DONE,
+ NULL,
rt2x00usb_kick_rx_entry);
break;
default:
@@ -441,7 +447,7 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
}
EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
-static bool rt2x00usb_flush_entry(struct queue_entry *entry)
+static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
@@ -468,7 +474,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
unsigned int i;
if (drop)
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
rt2x00usb_flush_entry);
/*
@@ -559,7 +565,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
entry->flags = 0;
if (entry->queue->qid == QID_RX)
- rt2x00usb_kick_rx_entry(entry);
+ rt2x00usb_kick_rx_entry(entry, NULL);
}
EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 5847d6d0881..41dce83ff41 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -224,10 +224,9 @@ static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
u8 *buffer;
wvalue = (u16)(addr & 0x0000ffff);
- buffer = kmalloc(len, GFP_ATOMIC);
+ buffer = kmemdup(data, len, GFP_ATOMIC);
if (!buffer)
return;
- memcpy(buffer, data, len);
usb_control_msg(udev, pipe, request, reqtype, wvalue,
index, buffer, len, 50);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index f13258a8d99..c3eff32acf6 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2127,9 +2127,6 @@ value to host byte ordering.*/
#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 2c2ff3e1f84..d7e306333f6 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -4956,7 +4956,8 @@ static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
- int duration)
+ int duration,
+ enum ieee80211_roc_type type)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cd49ba94963..83905a97c56 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -942,7 +942,6 @@ static int netbk_count_requests(struct xenvif *vif,
}
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
- struct sk_buff *skb,
u16 pending_idx)
{
struct page *page;
@@ -976,7 +975,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
index = pending_index(netbk->pending_cons++);
pending_idx = netbk->pending_ring[index];
- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ page = xen_netbk_alloc_page(netbk, pending_idx);
if (!page)
goto err;
@@ -1185,6 +1184,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
if (th >= skb_tail_pointer(skb))
goto out;
+ skb_set_transport_header(skb, 4 * iph->ihl);
skb->csum_start = th - skb->head;
switch (iph->protocol) {
case IPPROTO_TCP:
@@ -1381,7 +1381,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
}
/* XXX could copy straight to head */
- page = xen_netbk_alloc_page(netbk, skb, pending_idx);
+ page = xen_netbk_alloc_page(netbk, pending_idx);
if (!page) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
@@ -1496,6 +1496,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
skb->dev = vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
+ skb_reset_network_header(skb);
if (checksum_setup(vif, skb)) {
netdev_dbg(vif->dev,
@@ -1504,6 +1505,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
continue;
}
+ skb_probe_transport_header(skb, 0);
+
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7ffa43bd7cf..d9097a78696 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -537,7 +537,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *stats = this_cpu_ptr(np->stats);
struct xen_netif_tx_request *tx;
- struct xen_netif_extra_info *extra;
char *data = skb->data;
RING_IDX i;
grant_ref_t ref;
@@ -581,7 +580,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
- extra = NULL;
tx->flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -597,10 +595,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&np->tx, ++i);
- if (extra)
- extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
- else
- tx->flags |= XEN_NETTXF_extra_info;
+ tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
@@ -609,7 +604,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
gso->flags = 0;
- extra = gso;
}
np->tx.req_prod_pvt = i + 1;
@@ -718,7 +712,7 @@ static int xennet_get_responses(struct netfront_info *np,
struct sk_buff *skb = xennet_get_rx_skb(np, cons);
grant_ref_t ref = xennet_get_rx_ref(np, cons);
int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
- int frags = 1;
+ int slots = 1;
int err = 0;
unsigned long ret;
@@ -762,27 +756,27 @@ next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
- if (cons + frags == rp) {
+ if (cons + slots == rp) {
if (net_ratelimit())
- dev_warn(dev, "Need more frags\n");
+ dev_warn(dev, "Need more slots\n");
err = -ENOENT;
break;
}
- rx = RING_GET_RESPONSE(&np->rx, cons + frags);
- skb = xennet_get_rx_skb(np, cons + frags);
- ref = xennet_get_rx_ref(np, cons + frags);
- frags++;
+ rx = RING_GET_RESPONSE(&np->rx, cons + slots);
+ skb = xennet_get_rx_skb(np, cons + slots);
+ ref = xennet_get_rx_ref(np, cons + slots);
+ slots++;
}
- if (unlikely(frags > max)) {
+ if (unlikely(slots > max)) {
if (net_ratelimit())
dev_warn(dev, "Too many frags\n");
err = -E2BIG;
}
if (unlikely(err))
- np->rx.rsp_cons = cons + frags;
+ np->rx.rsp_cons = cons + slots;
return err;
}
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index eef38cfd812..13bde92b1e2 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -48,7 +48,7 @@ struct mei_nfc_hdr {
#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
struct microread_mei_phy {
- struct mei_device *mei_device;
+ struct mei_device *device;
struct nfc_hci_dev *hdev;
int powered;
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 1367655eee3..bea94510ad2 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -118,7 +118,7 @@ struct pch_ts_regs {
* struct pch_dev - Driver private data
*/
struct pch_dev {
- struct pch_ts_regs *regs;
+ struct pch_ts_regs __iomem *regs;
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
int exts0_enabled;
@@ -154,7 +154,7 @@ static inline void pch_eth_enable_set(struct pch_dev *chip)
iowrite32(val, (&chip->regs->ts_sel));
}
-static u64 pch_systime_read(struct pch_ts_regs *regs)
+static u64 pch_systime_read(struct pch_ts_regs __iomem *regs)
{
u64 ns;
u32 lo, hi;
@@ -169,7 +169,7 @@ static u64 pch_systime_read(struct pch_ts_regs *regs)
return ns;
}
-static void pch_systime_write(struct pch_ts_regs *regs, u64 ns)
+static void pch_systime_write(struct pch_ts_regs __iomem *regs, u64 ns)
{
u32 hi, lo;
@@ -315,7 +315,7 @@ int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
struct pch_dev *chip = pci_get_drvdata(pdev);
/* Verify the parameter */
- if ((chip->regs == 0) || addr == (u8 *)NULL) {
+ if ((chip->regs == NULL) || addr == (u8 *)NULL) {
dev_err(&pdev->dev,
"invalid params returning PCH_INVALIDPARAM\n");
return PCH_INVALIDPARAM;
@@ -361,7 +361,7 @@ EXPORT_SYMBOL(pch_set_station_address);
static irqreturn_t isr(int irq, void *priv)
{
struct pch_dev *pch_dev = priv;
- struct pch_ts_regs *regs = pch_dev->regs;
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
struct ptp_clock_event event;
u32 ack = 0, lo, hi, val;
@@ -415,7 +415,7 @@ static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
u32 diff, addend;
int neg_adj = 0;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
- struct pch_ts_regs *regs = pch_dev->regs;
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
if (ppb < 0) {
neg_adj = 1;
@@ -438,7 +438,7 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
s64 now;
unsigned long flags;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
- struct pch_ts_regs *regs = pch_dev->regs;
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
spin_lock_irqsave(&pch_dev->register_lock, flags);
now = pch_systime_read(regs);
@@ -455,7 +455,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
u32 remainder;
unsigned long flags;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
- struct pch_ts_regs *regs = pch_dev->regs;
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
spin_lock_irqsave(&pch_dev->register_lock, flags);
ns = pch_systime_read(regs);
@@ -472,7 +472,7 @@ static int ptp_pch_settime(struct ptp_clock_info *ptp,
u64 ns;
unsigned long flags;
struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
- struct pch_ts_regs *regs = pch_dev->regs;
+ struct pch_ts_regs __iomem *regs = pch_dev->regs;
ns = ts->tv_sec * 1000000000ULL;
ns += ts->tv_nsec;
@@ -567,9 +567,9 @@ static void pch_remove(struct pci_dev *pdev)
free_irq(pdev->irq, chip);
/* unmap the virtual IO memory space */
- if (chip->regs != 0) {
+ if (chip->regs != NULL) {
iounmap(chip->regs);
- chip->regs = 0;
+ chip->regs = NULL;
}
/* release the reserved IO memory space */
if (chip->mem_base != 0) {
@@ -670,7 +670,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_req_irq:
ptp_clock_unregister(chip->ptp_clock);
iounmap(chip->regs);
- chip->regs = 0;
+ chip->regs = NULL;
err_ioremap:
release_mem_region(chip->mem_base, chip->mem_size);
@@ -723,9 +723,10 @@ static s32 __init ptp_pch_init(void)
module_init(ptp_pch_init);
module_exit(ptp_pch_exit);
-module_param_string(station, pch_param.station, sizeof pch_param.station, 0444);
+module_param_string(station,
+ pch_param.station, sizeof(pch_param.station), 0444);
MODULE_PARM_DESC(station,
- "IEEE 1588 station address to use - column separated hex values");
+ "IEEE 1588 station address to use - colon separated hex values");
MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
MODULE_DESCRIPTION("PTP clock using the EG20T timer");
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 2029b6caa59..fb877b59ec5 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -166,7 +166,7 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq)
vcdev = to_vc_device(info->vq->vdev);
ccw_device_get_schid(vcdev->cdev, &schid);
- do_kvm_notify(schid, virtqueue_get_queue_index(vq));
+ do_kvm_notify(schid, vq->index);
}
static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
@@ -188,7 +188,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
unsigned long flags;
unsigned long size;
int ret;
- unsigned int index = virtqueue_get_queue_index(vq);
+ unsigned int index = vq->index;
/* Remove from our list. */
spin_lock_irqsave(&vcdev->lock, flags);
@@ -610,7 +610,7 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
vq = NULL;
spin_lock_irqsave(&vcdev->lock, flags);
list_for_each_entry(info, &vcdev->virtqueues, node) {
- if (virtqueue_get_queue_index(info->vq) == index) {
+ if (info->vq->index == index) {
vq = info->vq;
break;
}
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
index b581966c88f..913b9a92fb0 100644
--- a/drivers/scsi/csiostor/Makefile
+++ b/drivers/scsi/csiostor/Makefile
@@ -8,4 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
- csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o
+ csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \
+ csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index bdd78fb4fc7..a0b4c8991de 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -61,7 +61,7 @@ int csio_msi = 2;
static int dev_num;
/* FCoE Adapter types & its description */
-static const struct csio_adap_desc csio_fcoe_adapters[] = {
+static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
{"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
{"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
{"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
@@ -77,7 +77,38 @@ static const struct csio_adap_desc csio_fcoe_adapters[] = {
{"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
{"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
{"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
- {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"}
+ {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
+ {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
+ {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
+ {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
+ {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
+ {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
+ {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
+ {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
+ {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
+};
+
+static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
+ {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
+ {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
+ {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
+ {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
+ {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
+ {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
+ {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
+ {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
+ {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
+ {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
+ {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
+ {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
+ {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
+ {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
+ {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
+ {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
+ {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
+ {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
+ {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
+ {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
};
static void csio_mgmtm_cleanup(struct csio_mgmtm *);
@@ -124,7 +155,7 @@ int csio_is_hw_removing(struct csio_hw *hw)
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
-static int
+int
csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
int polarity, int attempts, int delay, uint32_t *valp)
{
@@ -145,6 +176,24 @@ csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
}
}
+/*
+ * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @hw: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void
+csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ csio_wr_reg32(hw, addr, TP_PIO_ADDR);
+ val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
+ csio_wr_reg32(hw, val, TP_PIO_DATA);
+}
+
void
csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
uint32_t value)
@@ -157,242 +206,22 @@ csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
}
-/*
- * csio_hw_mc_read - read from MC through backdoor accesses
- * @hw: the hw module
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from MC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int
-csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data,
- uint64_t *ecc)
-{
- int i;
-
- if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
- return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
- csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
- csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
- csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
- MC_BIST_CMD);
- i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
- 0, 10, 1, NULL);
- if (i)
- return i;
-
-#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
- if (ecc)
- *ecc = csio_rd_reg64(hw, MC_DATA(16));
-#undef MC_DATA
- return 0;
-}
-
-/*
- * csio_hw_edc_read - read from EDC through backdoor accesses
- * @hw: the hw module
- * @idx: which EDC to access
- * @addr: address of first byte requested
- * @data: 64 bytes of data containing the requested address
- * @ecc: where to store the corresponding 64-bit ECC word
- *
- * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
- * that covers the requested address @addr. If @parity is not %NULL it
- * is assigned the 64-bit ECC word for the read data.
- */
-int
-csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
- uint64_t *ecc)
-{
- int i;
-
- idx *= EDC_STRIDE;
- if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
- return -EBUSY;
- csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
- csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
- csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
- csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
- EDC_BIST_CMD + idx);
- i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
- 0, 10, 1, NULL);
- if (i)
- return i;
-
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
-
- for (i = 15; i >= 0; i--)
- *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
- if (ecc)
- *ecc = csio_rd_reg64(hw, EDC_DATA(16));
-#undef EDC_DATA
- return 0;
-}
-
-/*
- * csio_mem_win_rw - read/write memory through PCIE memory window
- * @hw: the adapter
- * @addr: address of first byte requested
- * @data: MEMWIN0_APERTURE bytes of data containing the requested address
- * @dir: direction of transfer 1 => read, 0 => write
- *
- * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
- * MEMWIN0_APERTURE-byte-aligned address that covers the requested
- * address @addr.
- */
-static int
-csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir)
-{
- int i;
-
- /*
- * Setup offset into PCIE memory window. Address must be a
- * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
- * ensure that changes propagate before we attempt to use the new
- * values.)
- */
- csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),
- PCIE_MEM_ACCESS_OFFSET);
- csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);
-
- /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
- for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {
- if (dir)
- *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));
- else
- csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));
- }
-
- return 0;
-}
-
-/*
- * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
- * @hw: the csio_hw
- * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
- * @addr: address within indicated memory type
- * @len: amount of memory to transfer
- * @buf: host memory buffer
- * @dir: direction of transfer 1 => read, 0 => write
- *
- * Reads/writes an [almost] arbitrary memory region in the firmware: the
- * firmware memory address, length and host buffer must be aligned on
- * 32-bit boudaries. The memory is transferred as a raw byte sequence
- * from/to the firmware's memory. If this memory contains data
- * structures which contain multi-byte integers, it's the callers
- * responsibility to perform appropriate byte order conversions.
- */
-static int
-csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,
- uint32_t *buf, int dir)
-{
- uint32_t pos, start, end, offset, memoffset;
- int ret;
- uint32_t *data;
-
- /*
- * Argument sanity checks ...
- */
- if ((addr & 0x3) || (len & 0x3))
- return -EINVAL;
-
- data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- /* Offset into the region of memory which is being accessed
- * MEM_EDC0 = 0
- * MEM_EDC1 = 1
- * MEM_MC = 2
- */
- memoffset = (mtype * (5 * 1024 * 1024));
-
- /* Determine the PCIE_MEM_ACCESS_OFFSET */
- addr = addr + memoffset;
-
- /*
- * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
- * at a time so we need to round down the start and round up the end.
- * We'll start copying out of the first line at (addr - start) a word
- * at a time.
- */
- start = addr & ~(MEMWIN0_APERTURE-1);
- end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
- offset = (addr - start)/sizeof(__be32);
-
- for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
- /*
- * If we're writing, copy the data from the caller's memory
- * buffer
- */
- if (!dir) {
- /*
- * If we're doing a partial write, then we need to do
- * a read-modify-write ...
- */
- if (offset || len < MEMWIN0_APERTURE) {
- ret = csio_mem_win_rw(hw, pos, data, 1);
- if (ret) {
- kfree(data);
- return ret;
- }
- }
- while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
- len > 0) {
- data[offset++] = *buf++;
- len -= sizeof(__be32);
- }
- }
-
- /*
- * Transfer a block of memory and bail if there's an error.
- */
- ret = csio_mem_win_rw(hw, pos, data, dir);
- if (ret) {
- kfree(data);
- return ret;
- }
-
- /*
- * If we're reading, copy the data into the caller's memory
- * buffer.
- */
- if (dir)
- while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
- len > 0) {
- *buf++ = data[offset++];
- len -= sizeof(__be32);
- }
- }
-
- kfree(data);
-
- return 0;
-}
-
static int
csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
{
- return csio_memory_rw(hw, mtype, addr, len, buf, 0);
+ return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
+ addr, len, buf, 0);
}
/*
* EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
*/
-#define EEPROM_MAX_RD_POLL 40
-#define EEPROM_MAX_WR_POLL 6
-#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_BASE 0x400
-#define VPD_BASE_OLD 0
-#define VPD_LEN 512
+#define EEPROM_MAX_RD_POLL 40
+#define EEPROM_MAX_WR_POLL 6
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
+#define VPD_LEN 1024
#define VPD_INFO_FLD_HDR_SIZE 3
/*
@@ -817,23 +646,6 @@ out:
return 0;
}
-/*
- * csio_hw_flash_cfg_addr - return the address of the flash
- * configuration file
- * @hw: the HW module
- *
- * Return the address within the flash where the Firmware Configuration
- * File is stored.
- */
-static unsigned int
-csio_hw_flash_cfg_addr(struct csio_hw *hw)
-{
- if (hw->params.sf_size == 0x100000)
- return FPGA_FLASH_CFG_OFFSET;
- else
- return FLASH_CFG_OFFSET;
-}
-
static void
csio_hw_print_fw_version(struct csio_hw *hw, char *str)
{
@@ -898,13 +710,13 @@ csio_hw_check_fw_version(struct csio_hw *hw)
minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
- if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
csio_err(hw, "card FW has major version %u, driver wants %u\n",
- major, FW_VERSION_MAJOR);
+ major, FW_VERSION_MAJOR(hw));
return -EINVAL;
}
- if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
return 0; /* perfect match */
/* Minor/micro version mismatch */
@@ -1044,7 +856,7 @@ static void
csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
{
uint16_t val;
- uint32_t pcie_cap;
+ int pcie_cap;
if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
pci_read_config_word(hw->pdev,
@@ -1056,84 +868,6 @@ csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
}
}
-
-/*
- * Return the specified PCI-E Configuration Space register from our Physical
- * Function. We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
-static uint32_t
-csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
-{
- u32 val = 0;
- struct csio_mb *mbp;
- int rv;
- struct fw_ldst_cmd *ldst_cmd;
-
- mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
- if (!mbp) {
- CSIO_INC_STATS(hw, n_err_nomem);
- pci_read_config_dword(hw->pdev, reg, &val);
- return val;
- }
-
- csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
-
- rv = csio_mb_issue(hw, mbp);
-
- /*
- * If the LDST Command suucceeded, exctract the returned register
- * value. Otherwise read it directly ourself.
- */
- if (rv == 0) {
- ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
- val = ntohl(ldst_cmd->u.pcie.data[0]);
- } else
- pci_read_config_dword(hw->pdev, reg, &val);
-
- mempool_free(mbp, hw->mb_mempool);
-
- return val;
-} /* csio_read_pcie_cfg4 */
-
-static int
-csio_hw_set_mem_win(struct csio_hw *hw)
-{
- u32 bar0;
-
- /*
- * Truncation intentional: we only read the bottom 32-bits of the
- * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
- * read BAR0 instead of using pci_resource_start() because we could be
- * operating from within a Virtual Machine which is trapping our
- * accesses to our Configuration Space and we need to set up the PCI-E
- * Memory Window decoders with the actual addresses which will be
- * coming across the PCI-E link.
- */
- bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
-
- /*
- * Set up memory window for accessing adapter memory ranges. (Read
- * back MA register to ensure that changes propagate before we attempt
- * to use the new values.)
- */
- csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
- WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
- csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
- WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
- csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
- WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
- csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
- return 0;
-} /* csio_hw_set_mem_win */
-
-
-
/*****************************************************************************/
/* HW State machine assists */
/*****************************************************************************/
@@ -1234,7 +968,9 @@ retry:
for (;;) {
uint32_t pcie_fw;
+ spin_unlock_irq(&hw->lock);
msleep(50);
+ spin_lock_irq(&hw->lock);
waiting -= 50;
/*
@@ -2121,9 +1857,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
uint32_t *cfg_data;
int value_to_add = 0;
- if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) {
- csio_err(hw, "could not find config file " CSIO_CF_FNAME
- ",err: %d\n", ret);
+ if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
+ csio_err(hw, "could not find config file %s, err: %d\n",
+ CSIO_CF_FNAME(hw), ret);
return -ENOENT;
}
@@ -2147,9 +1883,24 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
ret = csio_memory_write(hw, mtype, maddr,
cf->size + value_to_add, cfg_data);
+
+ if ((ret == 0) && (value_to_add != 0)) {
+ union {
+ u32 word;
+ char buf[4];
+ } last;
+ size_t size = cf->size & ~0x3;
+ int i;
+
+ last.word = cfg_data[size >> 2];
+ for (i = value_to_add; i < 4; i++)
+ last.buf[i] = 0;
+ ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
+ }
if (ret == 0) {
- csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n");
- strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
+ csio_info(hw, "config file upgraded to %s\n",
+ CSIO_CF_FNAME(hw));
+ snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
}
leave:
@@ -2179,7 +1930,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
{
unsigned int mtype, maddr;
int rv;
- uint32_t finiver, finicsum, cfcsum;
+ uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
int using_flash;
char path[64];
@@ -2207,7 +1958,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
* config file from flash.
*/
mtype = FW_MEMTYPE_CF_FLASH;
- maddr = csio_hw_flash_cfg_addr(hw);
+ maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
using_flash = 1;
} else {
/*
@@ -2346,30 +2097,32 @@ csio_hw_flash_fw(struct csio_hw *hw)
struct pci_dev *pci_dev = hw->pdev;
struct device *dev = &pci_dev->dev ;
- if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) {
- csio_err(hw, "could not find firmware image " CSIO_FW_FNAME
- ",err: %d\n", ret);
+ if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
+ csio_err(hw, "could not find firmware image %s, err: %d\n",
+ CSIO_FW_FNAME(hw), ret);
return -EINVAL;
}
hdr = (const struct fw_hdr *)fw->data;
fw_ver = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR)
+ if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
return -EINVAL; /* wrong major version, won't do */
/*
* If the flash FW is unusable or we found something newer, load it.
*/
- if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR ||
+ if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
fw_ver > hw->fwrev) {
ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
/*force=*/false);
if (!ret)
- csio_info(hw, "firmware upgraded to version %pI4 from "
- CSIO_FW_FNAME "\n", &hdr->fw_ver);
+ csio_info(hw,
+ "firmware upgraded to version %pI4 from %s\n",
+ &hdr->fw_ver, CSIO_FW_FNAME(hw));
else
csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
- }
+ } else
+ ret = -EINVAL;
release_firmware(fw);
@@ -2410,7 +2163,7 @@ csio_hw_configure(struct csio_hw *hw)
/* Set pci completion timeout value to 4 seconds. */
csio_set_pcie_completion_timeout(hw, 0xd);
- csio_hw_set_mem_win(hw);
+ hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
rv = csio_hw_get_fw_version(hw, &hw->fwrev);
if (rv != 0)
@@ -2478,6 +2231,8 @@ csio_hw_configure(struct csio_hw *hw)
} else {
if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+
/* device parameters */
rv = csio_get_device_params(hw);
if (rv != 0)
@@ -2651,7 +2406,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
}
-static void
+void
csio_hw_fatal_err(struct csio_hw *hw)
{
csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
@@ -2990,14 +2745,6 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
/* END: HW SM */
/*****************************************************************************/
-/* Slow path handlers */
-struct intr_info {
- unsigned int mask; /* bits to check in interrupt status */
- const char *msg; /* message to print or NULL */
- short stat_idx; /* stat counter to increment or -1 */
- unsigned short fatal; /* whether the condition reported is fatal */
-};
-
/*
* csio_handle_intr_status - table driven interrupt handler
* @hw: HW instance
@@ -3011,7 +2758,7 @@ struct intr_info {
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
*/
-static int
+int
csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
const struct intr_info *acts)
{
@@ -3038,80 +2785,6 @@ csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
}
/*
- * Interrupt handler for the PCIE module.
- */
-static void
-csio_pcie_intr_handler(struct csio_hw *hw)
-{
- static struct intr_info sysbus_intr_info[] = {
- { RNPP, "RXNP array parity error", -1, 1 },
- { RPCP, "RXPC array parity error", -1, 1 },
- { RCIP, "RXCIF array parity error", -1, 1 },
- { RCCP, "Rx completions control array parity error", -1, 1 },
- { RFTP, "RXFT array parity error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
- static struct intr_info pcie_port_intr_info[] = {
- { TPCP, "TXPC array parity error", -1, 1 },
- { TNPP, "TXNP array parity error", -1, 1 },
- { TFTP, "TXFT array parity error", -1, 1 },
- { TCAP, "TXCA array parity error", -1, 1 },
- { TCIP, "TXCIF array parity error", -1, 1 },
- { RCAP, "RXCA array parity error", -1, 1 },
- { OTDD, "outbound request TLP discarded", -1, 1 },
- { RDPE, "Rx data parity error", -1, 1 },
- { TDUE, "Tx uncorrectable data error", -1, 1 },
- { 0, NULL, 0, 0 }
- };
- static struct intr_info pcie_intr_info[] = {
- { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
- { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
- { MSIDATAPERR, "MSI data parity error", -1, 1 },
- { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
- { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
- { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
- { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
- { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
- { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
- { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
- { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
- { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
- { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
- { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
- { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
- { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
- { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
- { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
- { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
- { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
- { FIDPERR, "PCI FID parity error", -1, 1 },
- { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
- { MATAGPERR, "PCI MA tag parity error", -1, 1 },
- { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
- { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
- { RXWRPERR, "PCI Rx write parity error", -1, 1 },
- { RPLPERR, "PCI replay buffer parity error", -1, 1 },
- { PCIESINT, "PCI core secondary fault", -1, 1 },
- { PCIEPINT, "PCI core primary fault", -1, 1 },
- { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
- 0 },
- { 0, NULL, 0, 0 }
- };
-
- int fat;
-
- fat = csio_handle_intr_status(hw,
- PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
- sysbus_intr_info) +
- csio_handle_intr_status(hw,
- PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
- pcie_port_intr_info) +
- csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
- if (fat)
- csio_hw_fatal_err(hw);
-}
-
-/*
* TP interrupt handler.
*/
static void csio_tp_intr_handler(struct csio_hw *hw)
@@ -3517,7 +3190,7 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw)
*/
static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
{
- uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
if (!v)
@@ -3527,7 +3200,7 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
if (v & RXFIFO_PRTY_ERR)
csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
- csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
csio_hw_fatal_err(hw);
}
@@ -3596,7 +3269,7 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
csio_xgmac_intr_handler(hw, 3);
if (cause & PCIE)
- csio_pcie_intr_handler(hw);
+ hw->chip_ops->chip_pcie_intr_handler(hw);
if (cause & MC)
csio_mem_intr_handler(hw, MEM_MC);
@@ -4262,6 +3935,7 @@ csio_hw_get_device_id(struct csio_hw *hw)
&hw->params.pci.device_id);
csio_dev_id_cached(hw);
+ hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
} /* csio_hw_get_device_id */
@@ -4280,19 +3954,21 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
- if (prot_type == CSIO_FPGA) {
+ if (prot_type == CSIO_T4_FCOE_ASIC) {
+ memcpy(hw->hw_ver,
+ csio_t4_fcoe_adapters[adap_type].model_no, 16);
memcpy(hw->model_desc,
- csio_fcoe_adapters[13].description, 32);
- } else if (prot_type == CSIO_T4_FCOE_ASIC) {
+ csio_t4_fcoe_adapters[adap_type].description,
+ 32);
+ } else if (prot_type == CSIO_T5_FCOE_ASIC) {
memcpy(hw->hw_ver,
- csio_fcoe_adapters[adap_type].model_no, 16);
+ csio_t5_fcoe_adapters[adap_type].model_no, 16);
memcpy(hw->model_desc,
- csio_fcoe_adapters[adap_type].description, 32);
+ csio_t5_fcoe_adapters[adap_type].description,
+ 32);
} else {
char tempName[32] = "Chelsio FCoE Controller";
memcpy(hw->model_desc, tempName, 32);
-
- CSIO_DB_ASSERT(0);
}
}
} /* csio_hw_set_description */
@@ -4321,6 +3997,9 @@ csio_hw_init(struct csio_hw *hw)
strcpy(hw->name, CSIO_HW_NAME);
+ /* Initialize the HW chip ops with T4/T5 specific ops */
+ hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
+
/* Set the model & its description */
ven_id = hw->params.pci.vendor_id;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9edcca4c71a..489fc095cb0 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -48,6 +48,7 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
+#include "csio_hw_chip.h"
#include "csio_wr.h"
#include "csio_mb.h"
#include "csio_scsi.h"
@@ -60,13 +61,6 @@
*/
#define FW_HOSTERROR 255
-#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
-#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
-
-#define FW_VERSION_MAJOR 1
-#define FW_VERSION_MINOR 2
-#define FW_VERSION_MICRO 8
-
#define CSIO_HW_NAME "Chelsio FCoE Adapter"
#define CSIO_MAX_PFN 8
#define CSIO_MAX_PPORTS 4
@@ -123,8 +117,6 @@ extern int csio_msi;
#define CSIO_VENDOR_ID 0x1425
#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
-#define CSIO_FPGA 0xA000
-#define CSIO_T4_FCOE_ASIC 0x4600
#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
EDC1 | LE | TP | MA | PM_TX | PM_RX | \
@@ -207,17 +199,6 @@ enum {
SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
};
-enum { MEM_EDC0, MEM_EDC1, MEM_MC };
-
-enum {
- MEMWIN0_APERTURE = 2048,
- MEMWIN0_BASE = 0x1b800,
- MEMWIN1_APERTURE = 32768,
- MEMWIN1_BASE = 0x28000,
- MEMWIN2_APERTURE = 65536,
- MEMWIN2_BASE = 0x30000,
-};
-
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -239,9 +220,6 @@ enum {
FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
FLASH_CFG_OFFSET = 0x1f0000,
FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
- FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
- * at 1MB - 64KB */
- FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
};
/*
@@ -259,6 +237,8 @@ enum {
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ /* Location of Firmware Configuration File in FLASH. */
+ FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
};
#undef FLASH_START
@@ -310,7 +290,7 @@ struct csio_adap_desc {
struct pci_params {
uint16_t vendor_id;
uint16_t device_id;
- uint32_t vpd_cap_addr;
+ int vpd_cap_addr;
uint16_t speed;
uint8_t width;
};
@@ -513,6 +493,7 @@ struct csio_hw {
uint32_t fwrev;
uint32_t tp_vers;
char chip_ver;
+ uint16_t chip_id; /* Tells T4/T5 chip */
uint32_t cfg_finiver;
uint32_t cfg_finicsum;
uint32_t cfg_cfcsum;
@@ -556,6 +537,9 @@ struct csio_hw {
*/
struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
+ struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific
+ * Operations
+ */
/* MSIX vectors */
struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
@@ -636,9 +620,16 @@ csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
#define csio_dbg(__hw, __fmt, ...)
#endif
+int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int,
+ int, int, uint32_t *);
+void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int,
+ unsigned int, unsigned int);
int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
void csio_hw_intr_disable(struct csio_hw *);
-int csio_hw_slow_intr_handler(struct csio_hw *hw);
+int csio_hw_slow_intr_handler(struct csio_hw *);
+int csio_handle_intr_status(struct csio_hw *, unsigned int,
+ const struct intr_info *);
+
int csio_hw_start(struct csio_hw *);
int csio_hw_stop(struct csio_hw *);
int csio_hw_reset(struct csio_hw *);
@@ -647,19 +638,17 @@ int csio_is_hw_removing(struct csio_hw *);
int csio_fwevtq_handler(struct csio_hw *);
void csio_evtq_worker(struct work_struct *);
-int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,
- void *evt_msg, uint16_t len);
+int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t);
void csio_evtq_flush(struct csio_hw *hw);
int csio_request_irqs(struct csio_hw *);
void csio_intr_enable(struct csio_hw *);
void csio_intr_disable(struct csio_hw *, bool);
+void csio_hw_fatal_err(struct csio_hw *);
struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
int csio_config_queues(struct csio_hw *);
-int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *);
-int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *);
int csio_hw_init(struct csio_hw *);
void csio_hw_exit(struct csio_hw *);
#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
new file mode 100644
index 00000000000..bca0de61ae8
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -0,0 +1,175 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_HW_CHIP_H__
+#define __CSIO_HW_CHIP_H__
+
+#include "csio_defs.h"
+
+/* FCoE device IDs for T4 */
+#define CSIO_DEVID_T440DBG_FCOE 0x4600
+#define CSIO_DEVID_T420CR_FCOE 0x4601
+#define CSIO_DEVID_T422CR_FCOE 0x4602
+#define CSIO_DEVID_T440CR_FCOE 0x4603
+#define CSIO_DEVID_T420BCH_FCOE 0x4604
+#define CSIO_DEVID_T440BCH_FCOE 0x4605
+#define CSIO_DEVID_T440CH_FCOE 0x4606
+#define CSIO_DEVID_T420SO_FCOE 0x4607
+#define CSIO_DEVID_T420CX_FCOE 0x4608
+#define CSIO_DEVID_T420BT_FCOE 0x4609
+#define CSIO_DEVID_T404BT_FCOE 0x460A
+#define CSIO_DEVID_B420_FCOE 0x460B
+#define CSIO_DEVID_B404_FCOE 0x460C
+#define CSIO_DEVID_T480CR_FCOE 0x460D
+#define CSIO_DEVID_T440LPCR_FCOE 0x460E
+#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F
+#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680
+#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681
+#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682
+#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683
+#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684
+#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685
+#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686
+#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687
+
+/* FCoE device IDs for T5 */
+#define CSIO_DEVID_T580DBG_FCOE 0x5600
+#define CSIO_DEVID_T520CR_FCOE 0x5601
+#define CSIO_DEVID_T522CR_FCOE 0x5602
+#define CSIO_DEVID_T540CR_FCOE 0x5603
+#define CSIO_DEVID_T520BCH_FCOE 0x5604
+#define CSIO_DEVID_T540BCH_FCOE 0x5605
+#define CSIO_DEVID_T540CH_FCOE 0x5606
+#define CSIO_DEVID_T520SO_FCOE 0x5607
+#define CSIO_DEVID_T520CX_FCOE 0x5608
+#define CSIO_DEVID_T520BT_FCOE 0x5609
+#define CSIO_DEVID_T504BT_FCOE 0x560A
+#define CSIO_DEVID_B520_FCOE 0x560B
+#define CSIO_DEVID_B504_FCOE 0x560C
+#define CSIO_DEVID_T580CR2_FCOE 0x560D
+#define CSIO_DEVID_T540LPCR_FCOE 0x560E
+#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F
+#define CSIO_DEVID_T580LPCR_FCOE 0x5610
+#define CSIO_DEVID_T520LLCR_FCOE 0x5611
+#define CSIO_DEVID_T560CR_FCOE 0x5612
+#define CSIO_DEVID_T580CR_FCOE 0x5613
+
+/* Define MACRO values */
+#define CSIO_HW_T4 0x4000
+#define CSIO_T4_FCOE_ASIC 0x4600
+#define CSIO_HW_T5 0x5000
+#define CSIO_T5_FCOE_ASIC 0x5600
+#define CSIO_HW_CHIP_MASK 0xF000
+#define T4_REGMAP_SIZE (160 * 1024)
+#define T5_REGMAP_SIZE (332 * 1024)
+#define FW_FNAME_T4 "cxgb4/t4fw.bin"
+#define FW_FNAME_T5 "cxgb4/t5fw.bin"
+#define FW_CFG_NAME_T4 "cxgb4/t4-config.txt"
+#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
+
+/* Define static functions */
+static inline int csio_is_t4(uint16_t chip)
+{
+ return (chip == CSIO_HW_T4);
+}
+
+static inline int csio_is_t5(uint16_t chip)
+{
+ return (chip == CSIO_HW_T5);
+}
+
+/* Define MACRO DEFINITIONS */
+#define CSIO_DEVICE(devid, idx) \
+ { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
+
+#define CSIO_HW_PIDX(hw, index) \
+ (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \
+ (PIDX_T5(index) | DBTYPE(1U)))
+
+#define CSIO_HW_LP_INT_THRESH(hw, val) \
+ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \
+ (V_LP_INT_THRESH_T5(val)))
+
+#define CSIO_HW_M_LP_INT_THRESH(hw) \
+ (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
+
+#define CSIO_MAC_INT_CAUSE_REG(hw, port) \
+ (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
+ (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
+
+#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
+#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
+#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
+
+#define CSIO_FW_FNAME(hw) \
+ (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
+
+#define CSIO_CF_FNAME(hw) \
+ (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5)
+
+/* Declare ENUMS */
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
+enum {
+ MEMWIN_APERTURE = 2048,
+ MEMWIN_BASE = 0x1b800,
+ MEMWIN_CSIOSTOR = 6, /* PCI-e Memory Window access */
+};
+
+/* Slow path handlers */
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/* T4/T5 Chip specific ops */
+struct csio_hw;
+struct csio_hw_chip_ops {
+ int (*chip_set_mem_win)(struct csio_hw *, uint32_t);
+ void (*chip_pcie_intr_handler)(struct csio_hw *);
+ uint32_t (*chip_flash_cfg_addr)(struct csio_hw *);
+ int (*chip_mc_read)(struct csio_hw *, int, uint32_t,
+ __be32 *, uint64_t *);
+ int (*chip_edc_read)(struct csio_hw *, int, uint32_t,
+ __be32 *, uint64_t *);
+ int (*chip_memory_rw)(struct csio_hw *, u32, int, u32,
+ u32, uint32_t *, int);
+ void (*chip_dfs_create_ext_mem)(struct csio_hw *);
+};
+
+extern struct csio_hw_chip_ops t4_ops;
+extern struct csio_hw_chip_ops t5_ops;
+
+#endif /* #ifndef __CSIO_HW_CHIP_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
new file mode 100644
index 00000000000..89ecbac5478
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t4.c
@@ -0,0 +1,403 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "csio_hw.h"
+#include "csio_init.h"
+
+/*
+ * Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+static uint32_t
+csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg)
+{
+ u32 val = 0;
+ struct csio_mb *mbp;
+ int rv;
+ struct fw_ldst_cmd *ldst_cmd;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ pci_read_config_dword(hw->pdev, reg, &val);
+ return val;
+ }
+
+ csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
+ rv = csio_mb_issue(hw, mbp);
+
+ /*
+ * If the LDST Command suucceeded, exctract the returned register
+ * value. Otherwise read it directly ourself.
+ */
+ if (rv == 0) {
+ ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ val = ntohl(ldst_cmd->u.pcie.data[0]);
+ } else
+ pci_read_config_dword(hw->pdev, reg, &val);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return val;
+}
+
+static int
+csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
+{
+ u32 bar0;
+ u32 mem_win_base;
+
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+ bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ mem_win_base = bar0 + MEMWIN_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, mem_win_base | BIR(0) |
+ WINDOW(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ return 0;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_t4_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ static struct intr_info pcie_intr_info[] = {
+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+ 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_t4_flash_cfg_addr - return the address of the flash configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_t4_flash_cfg_addr(struct csio_hw *hw)
+{
+ return FLASH_CFG_OFFSET;
+}
+
+/*
+ * csio_t4_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @idx: not used for T4 adapter
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
+ csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
+ csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ MC_BIST_CMD);
+ i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_t4_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
+ csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
+ csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
+ EDC_BIST_CMD + idx);
+ i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @win: PCI-E memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
+ u32 len, uint32_t *buf, int dir)
+{
+ u32 pos, start, offset, memoffset, bar0;
+ u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- T4
+ */
+ edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
+ MA_EXT_MEMORY_BAR));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ mem_aperture = 1 << (WINDOW(mem_reg) + 10);
+ mem_base = GET_PCIEOFST(mem_reg) << 10;
+
+ bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+ mem_base -= bar0;
+
+ start = addr & ~(mem_aperture-1);
+ offset = addr - start;
+
+ csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
+ mem_reg, mem_aperture);
+ csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
+ mem_base, memoffset);
+ csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n",
+ bar0, start, offset);
+ csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
+ mtype, addr, len);
+
+ for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
+ /*
+ * Move PCI-E Memory Window to our current transfer
+ * position. Read it back to ensure that changes propagate
+ * before we attempt to use the new value.
+ */
+ csio_wr_reg32(hw, pos,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+
+ while (offset < mem_aperture && len > 0) {
+ if (dir)
+ *buf++ = csio_rd_reg32(hw, mem_base + offset);
+ else
+ csio_wr_reg32(hw, *buf++, mem_base + offset);
+
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+ }
+ }
+ return 0;
+}
+
+/*
+ * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values
+ * @hw: the csio_hw
+ *
+ * This function creates files in the debugfs with external memory region MC.
+ */
+static void
+csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
+{
+ u32 size;
+ int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+ if (i & EXT_MEM_ENABLE) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
+ csio_add_debugfs_mem(hw, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(size));
+ }
+}
+
+/* T4 adapter specific function */
+struct csio_hw_chip_ops t4_ops = {
+ .chip_set_mem_win = csio_t4_set_mem_win,
+ .chip_pcie_intr_handler = csio_t4_pcie_intr_handler,
+ .chip_flash_cfg_addr = csio_t4_flash_cfg_addr,
+ .chip_mc_read = csio_t4_mc_read,
+ .chip_edc_read = csio_t4_edc_read,
+ .chip_memory_rw = csio_t4_memory_rw,
+ .chip_dfs_create_ext_mem = csio_t4_dfs_create_ext_mem,
+};
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
new file mode 100644
index 00000000000..27745c170c2
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -0,0 +1,397 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "csio_hw.h"
+#include "csio_init.h"
+
+static int
+csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
+{
+ u32 mem_win_base;
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win_base = MEMWIN_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, mem_win_base | BIR(0) |
+ WINDOW(ilog2(MEMWIN_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+
+ return 0;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_t5_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ static struct intr_info pcie_intr_info[] = {
+ { MSTGRPPERR, "Master Response Read Queue parity error",
+ -1, 1 },
+ { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+ { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+ -1, 1 },
+ { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+ -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DREQWRPERR, "PCI DMA channel write request parity error",
+ -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+ { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+ -1, 1 },
+ { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+ -1, 1 },
+ { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+ { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+ { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+ { READRSPERR, "Outbound read error", -1, 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_t5_flash_cfg_addr - return the address of the flash configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_t5_flash_cfg_addr(struct csio_hw *hw)
+{
+ return FLASH_CFG_START;
+}
+
+/*
+ * csio_t5_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @idx: index to the register
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+ uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
+ uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
+
+ mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
+ mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
+ mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
+ mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
+ mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
+
+ if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
+ csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
+ csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ mc_bist_cmd_reg);
+ i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_t5_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+static int
+csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+ uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
+ uint32_t edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
+
+/*
+ * These macro are missing in t4_regs.h file.
+ */
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+
+ edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
+ edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
+ edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
+ edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
+ edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
+#undef EDC_REG_T5
+#undef EDC_STRIDE_T5
+
+ if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
+ csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
+ csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ edc_bist_cmd_reg);
+ i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @win: PCI-E memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
+ u32 len, uint32_t *buf, int dir)
+{
+ u32 pos, start, offset, memoffset;
+ u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- T4
+ * MEM_MC0 = 2 -- For T5
+ * MEM_MC1 = 3 -- For T5
+ */
+ edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1)
+ memoffset = (mtype * (edc_size * 1024 * 1024));
+ else {
+ mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
+ MA_EXT_MEMORY_BAR));
+ memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
+ mem_aperture = 1 << (WINDOW(mem_reg) + 10);
+ mem_base = GET_PCIEOFST(mem_reg) << 10;
+
+ start = addr & ~(mem_aperture-1);
+ offset = addr - start;
+ win_pf = V_PFNUM(hw->pfn);
+
+ csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
+ mem_reg, mem_aperture);
+ csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
+ mem_base, memoffset);
+ csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n",
+ start, offset, win_pf);
+ csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
+ mtype, addr, len);
+
+ for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
+ /*
+ * Move PCI-E Memory Window to our current transfer
+ * position. Read it back to ensure that changes propagate
+ * before we attempt to use the new value.
+ */
+ csio_wr_reg32(hw, pos | win_pf,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+ csio_rd_reg32(hw,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+
+ while (offset < mem_aperture && len > 0) {
+ if (dir)
+ *buf++ = csio_rd_reg32(hw, mem_base + offset);
+ else
+ csio_wr_reg32(hw, *buf++, mem_base + offset);
+
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+ }
+ }
+ return 0;
+}
+
+/*
+ * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values
+ * @hw: the csio_hw
+ *
+ * This function creates files in the debugfs with external memory region
+ * MC0 & MC1.
+ */
+static void
+csio_t5_dfs_create_ext_mem(struct csio_hw *hw)
+{
+ u32 size;
+ int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+ if (i & EXT_MEM_ENABLE) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
+ csio_add_debugfs_mem(hw, "mc0", MEM_MC0,
+ EXT_MEM_SIZE_GET(size));
+ }
+ if (i & EXT_MEM1_ENABLE) {
+ size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR);
+ csio_add_debugfs_mem(hw, "mc1", MEM_MC1,
+ EXT_MEM_SIZE_GET(size));
+ }
+}
+
+/* T5 adapter specific function */
+struct csio_hw_chip_ops t5_ops = {
+ .chip_set_mem_win = csio_t5_set_mem_win,
+ .chip_pcie_intr_handler = csio_t5_pcie_intr_handler,
+ .chip_flash_cfg_addr = csio_t5_flash_cfg_addr,
+ .chip_mc_read = csio_t5_mc_read,
+ .chip_edc_read = csio_t5_edc_read,
+ .chip_memory_rw = csio_t5_memory_rw,
+ .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem,
+};
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 0604b5ff363..00346fe939d 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -81,9 +81,11 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
__be32 data[16];
if (mem == MEM_MC)
- ret = csio_hw_mc_read(hw, pos, data, NULL);
+ ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
+ data, NULL);
else
- ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
+ ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
+ data, NULL);
if (ret)
return ret;
@@ -108,7 +110,7 @@ static const struct file_operations csio_mem_debugfs_fops = {
.llseek = default_llseek,
};
-static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
+void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
unsigned int idx, unsigned int size_mb)
{
struct dentry *de;
@@ -131,9 +133,8 @@ static int csio_setup_debugfs(struct csio_hw *hw)
csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
if (i & EDRAM1_ENABLE)
csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
- if (i & EXT_MEM_ENABLE)
- csio_add_debugfs_mem(hw, "mc", MEM_MC,
- EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
+
+ hw->chip_ops->chip_dfs_create_ext_mem(hw);
return 0;
}
@@ -1169,7 +1170,7 @@ static struct pci_error_handlers csio_err_handler = {
};
static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
- CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */
CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
@@ -1184,8 +1185,34 @@ static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
- CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
- CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
+ CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */
+ CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */
{ 0, 0, 0, 0, 0, 0, 0 }
};
@@ -1259,4 +1286,5 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC);
MODULE_LICENSE(CSIO_DRV_LICENSE);
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
-MODULE_FIRMWARE(CSIO_FW_FNAME);
+MODULE_FIRMWARE(FW_FNAME_T4);
+MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 0838fd7ec9c..5cc5d317a44 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -52,31 +52,6 @@
#define CSIO_DRV_DESC "Chelsio FCoE driver"
#define CSIO_DRV_VERSION "1.0.0"
-#define CSIO_DEVICE(devid, idx) \
-{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
-
-#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
- ((_dev) == CSIO_DEVID_PE10K_PF1))
-
-/* FCoE device IDs */
-#define CSIO_DEVID_PE10K 0xA000
-#define CSIO_DEVID_PE10K_PF1 0xA001
-#define CSIO_DEVID_T440DBG_FCOE 0x4600
-#define CSIO_DEVID_T420CR_FCOE 0x4601
-#define CSIO_DEVID_T422CR_FCOE 0x4602
-#define CSIO_DEVID_T440CR_FCOE 0x4603
-#define CSIO_DEVID_T420BCH_FCOE 0x4604
-#define CSIO_DEVID_T440BCH_FCOE 0x4605
-#define CSIO_DEVID_T440CH_FCOE 0x4606
-#define CSIO_DEVID_T420SO_FCOE 0x4607
-#define CSIO_DEVID_T420CX_FCOE 0x4608
-#define CSIO_DEVID_T420BT_FCOE 0x4609
-#define CSIO_DEVID_T404BT_FCOE 0x460A
-#define CSIO_DEVID_B420_FCOE 0x460B
-#define CSIO_DEVID_B404_FCOE 0x460C
-#define CSIO_DEVID_T480CR_FCOE 0x460D
-#define CSIO_DEVID_T440LPCR_FCOE 0x460E
-
extern struct fc_function_template csio_fc_transport_funcs;
extern struct fc_function_template csio_fc_transport_vport_funcs;
@@ -100,6 +75,10 @@ struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
void csio_shost_exit(struct csio_lnode *);
void csio_lnodes_exit(struct csio_hw *, bool);
+/* DebugFS helper routines */
+void csio_add_debugfs_mem(struct csio_hw *, const char *,
+ unsigned int, unsigned int);
+
static inline struct Scsi_Host *
csio_ln_to_shost(struct csio_lnode *ln)
{
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 8d84988ab06..0f9c04175b1 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
uint32_t n_rnode_match; /* matched rnode */
uint32_t n_dev_loss_tmo; /* Device loss timeout */
uint32_t n_fdmi_err; /* fdmi err */
- uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
uint32_t n_rnode_alloc; /* rnode allocated */
uint32_t n_rnode_free; /* rnode freed */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
index 51c6a388de2..e9c3b045f58 100644
--- a/drivers/scsi/csiostor/csio_rnode.c
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -302,7 +302,7 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
{
uint8_t rport_type;
struct csio_rnode *rn, *match_rn;
- uint32_t vnp_flowid;
+ uint32_t vnp_flowid = 0;
__be32 *port_id;
port_id = (__be32 *)&rdevp->r_id[0];
@@ -350,6 +350,14 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
* Else, go ahead and alloc a new rnode.
*/
if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
+ if (rn == match_rn)
+ goto found_rnode;
+ csio_ln_dbg(ln,
+ "nport_id:x%x and wwpn:%llx"
+ " match for ssni:x%x\n",
+ rn->nport_id,
+ wwn_to_u64(rdevp->wwpn),
+ rdev_flowid);
if (csio_is_rnode_ready(rn)) {
csio_ln_warn(ln,
"rnode is already"
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
index a3b434c801d..65940096a80 100644
--- a/drivers/scsi/csiostor/csio_rnode.h
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
uint32_t n_err_nomem; /* error nomem */
uint32_t n_evt_unexp; /* unexpected event */
uint32_t n_evt_drop; /* unexpected event */
- uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
uint32_t n_lun_rst; /* Number of resets of
* of LUNs under this
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index c32df1bdaa9..4255ce264ab 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -85,8 +85,8 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
*/
if (flq->inc_idx >= 8) {
csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
- PIDX(flq->inc_idx / 8),
- MYPF_REG(SGE_PF_KDOORBELL));
+ CSIO_HW_PIDX(hw, flq->inc_idx / 8),
+ MYPF_REG(SGE_PF_KDOORBELL));
flq->inc_idx &= 7;
}
}
@@ -989,7 +989,8 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
wmb();
/* Ring SGE Doorbell writing q->pidx into it */
csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
- PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
+ CSIO_HW_PIDX(hw, q->inc_idx),
+ MYPF_REG(SGE_PF_KDOORBELL));
q->inc_idx = 0;
return 0;
@@ -1331,20 +1332,30 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
- csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE2);
- csio_wr_reg32(hw,
- (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
- sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
- SGE_FL_BUFFER_SIZE3);
+
+ /*
+ * If using hard params, the following will get set correctly
+ * in csio_wr_set_sge().
+ */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE2);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE3);
+ }
csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
/* default value of rx_dma_offset of the NIC driver */
csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+
+ csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
+ CSUM_HAS_PSEUDO_HDR, 0);
}
static void
@@ -1460,18 +1471,21 @@ csio_wr_set_sge(struct csio_hw *hw)
* and generate an interrupt when this occurs so we can recover.
*/
csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
- HP_INT_THRESH(HP_INT_THRESH_MASK) |
- LP_INT_THRESH(LP_INT_THRESH_MASK),
- HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
- LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
+ HP_INT_THRESH(HP_INT_THRESH_MASK) |
+ CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
+ HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
+ CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
+
csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
ENABLE_DROP);
/* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
- CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
- CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
+ csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
+ csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
+ & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1522,22 +1536,24 @@ void
csio_wr_sge_init(struct csio_hw *hw)
{
/*
- * If we are master:
+ * If we are master and chip is not initialized:
* - If we plan to use the config file, we need to fixup some
* host specific registers, and read the rest of the SGE
* configuration.
* - If we dont plan to use the config file, we need to initialize
* SGE entirely, including fixing the host specific registers.
+ * If we are master and chip is initialized, just read and work off of
+ * the already initialized SGE values.
* If we arent the master, we are only allowed to read and work off of
* the already initialized SGE values.
*
* Therefore, before calling this function, we assume that the master-
- * ship of the card, and whether to use config file or not, have
- * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
- * CSIO_HWF_MASTER should be set/unset.
+ * ship of the card, state and whether to use config file or not, have
+ * already been decided.
*/
if (csio_is_hw_master(hw)) {
- csio_wr_fixup_host_params(hw);
+ if (hw->fw_state != CSIO_DEV_STATE_INIT)
+ csio_wr_fixup_host_params(hw);
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
csio_wr_get_sge(hw);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 65123a21b97..fe30ea94ffe 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
u32 rlen;
int err, tport;
- while (skb->len >= NLMSG_SPACE(0)) {
+ while (skb->len >= NLMSG_HDRLEN) {
err = 0;
nlh = nlmsg_hdr(skb);
@@ -70,7 +70,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
goto next_msg;
}
- hdr = NLMSG_DATA(nlh);
+ hdr = nlmsg_data(nlh);
if ((hdr->version != SCSI_NL_VERSION) ||
(hdr->magic != SCSI_NL_MAGIC)) {
err = -EPROTOTYPE;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index e894ca7b54c..e106c276aa0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -35,7 +35,6 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_cmnd.h>
-#include <linux/netlink.h>
#include <net/netlink.h>
#include <scsi/scsi_netlink_fc.h>
#include <scsi/scsi_bsg_fc.h>
@@ -534,7 +533,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
struct nlmsghdr *nlh;
struct fc_nl_event *event;
const char *name;
- u32 len, skblen;
+ u32 len;
int err;
if (!scsi_nl_sock) {
@@ -543,21 +542,19 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
}
len = FC_NL_MSGALIGN(sizeof(*event));
- skblen = NLMSG_SPACE(len);
- skb = alloc_skb(skblen, GFP_KERNEL);
+ skb = nlmsg_new(len, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto send_fail;
}
- nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
- skblen - sizeof(*nlh), 0);
+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
if (!nlh) {
err = -ENOBUFS;
goto send_fail_skb;
}
- event = NLMSG_DATA(nlh);
+ event = nlmsg_data(nlh);
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
@@ -604,7 +601,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
struct sk_buff *skb;
struct nlmsghdr *nlh;
struct fc_nl_event *event;
- u32 len, skblen;
+ u32 len;
int err;
if (!scsi_nl_sock) {
@@ -613,21 +610,19 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
}
len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
- skblen = NLMSG_SPACE(len);
- skb = alloc_skb(skblen, GFP_KERNEL);
+ skb = nlmsg_new(len, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto send_vendor_fail;
}
- nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG,
- skblen - sizeof(*nlh), 0);
+ nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
if (!nlh) {
err = -ENOBUFS;
goto send_vendor_fail_skb;
}
- event = NLMSG_DATA(nlh);
+ event = nlmsg_data(nlh);
INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
FC_NL_ASYNC_EVENT, len);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a74b975efd..2e3816530bb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1344,8 +1344,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
struct iscsi_uevent *ev;
char *pdu;
struct iscsi_internal *priv;
- int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) +
- data_size);
+ int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) +
+ data_size);
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -1360,7 +1360,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
memset(ev, 0, sizeof(*ev));
ev->transport_handle = iscsi_handle(conn->transport);
ev->type = ISCSI_KEVENT_RECV_PDU;
@@ -1381,7 +1381,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
- int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+ int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
@@ -1390,7 +1390,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
memset(ev, 0, sizeof(*ev));
ev->type = type;
ev->transport_handle = iscsi_handle(transport);
@@ -1415,7 +1415,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
struct sk_buff *skb;
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
- int len = NLMSG_SPACE(sizeof(*ev));
+ int len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -1429,7 +1429,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(conn->transport);
ev->type = ISCSI_KEVENT_CONN_ERROR;
ev->r.connerror.error = error;
@@ -1450,7 +1450,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
struct sk_buff *skb;
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
- int len = NLMSG_SPACE(sizeof(*ev));
+ int len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
@@ -1464,7 +1464,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(conn->transport);
ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
ev->r.conn_login.state = state;
@@ -1484,7 +1484,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
- int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+ int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_NOIO);
if (!skb) {
@@ -1494,7 +1494,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(transport);
ev->type = ISCSI_KEVENT_HOST_EVENT;
ev->r.host_event.host_no = host_no;
@@ -1515,7 +1515,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
- int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+ int len = nlmsg_total_size(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_NOIO);
if (!skb) {
@@ -1524,7 +1524,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(transport);
ev->type = ISCSI_KEVENT_PING_COMP;
ev->r.ping_comp.host_no = host_no;
@@ -1543,7 +1543,7 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
- int len = NLMSG_SPACE(size);
+ int len = nlmsg_total_size(size);
int flags = multi ? NLM_F_MULTI : 0;
int t = done ? NLMSG_DONE : type;
@@ -1555,24 +1555,24 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
nlh->nlmsg_flags = flags;
- memcpy(NLMSG_DATA(nlh), payload, size);
+ memcpy(nlmsg_data(nlh), payload, size);
return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
}
static int
iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
{
- struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_stats *stats;
struct sk_buff *skbstat;
struct iscsi_cls_conn *conn;
struct nlmsghdr *nlhstat;
struct iscsi_uevent *evstat;
struct iscsi_internal *priv;
- int len = NLMSG_SPACE(sizeof(*ev) +
- sizeof(struct iscsi_stats) +
- sizeof(struct iscsi_stats_custom) *
- ISCSI_STATS_CUSTOM_MAX);
+ int len = nlmsg_total_size(sizeof(*ev) +
+ sizeof(struct iscsi_stats) +
+ sizeof(struct iscsi_stats_custom) *
+ ISCSI_STATS_CUSTOM_MAX);
int err = 0;
priv = iscsi_if_transport_lookup(transport);
@@ -1595,7 +1595,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
(len - sizeof(*nlhstat)), 0);
- evstat = NLMSG_DATA(nlhstat);
+ evstat = nlmsg_data(nlhstat);
memset(evstat, 0, sizeof(*evstat));
evstat->transport_handle = iscsi_handle(conn->transport);
evstat->type = nlh->nlmsg_type;
@@ -1608,12 +1608,12 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
memset(stats, 0, sizeof(*stats));
transport->get_stats(conn, stats);
- actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) +
- sizeof(struct iscsi_stats) +
- sizeof(struct iscsi_stats_custom) *
- stats->custom_length);
+ actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) +
+ sizeof(struct iscsi_stats) +
+ sizeof(struct iscsi_stats_custom) *
+ stats->custom_length);
actual_size -= sizeof(*nlhstat);
- actual_size = NLMSG_LENGTH(actual_size);
+ actual_size = nlmsg_msg_size(actual_size);
skb_trim(skbstat, NLMSG_ALIGN(actual_size));
nlhstat->nlmsg_len = actual_size;
@@ -1637,7 +1637,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
struct iscsi_uevent *ev;
struct sk_buff *skb;
struct nlmsghdr *nlh;
- int rc, len = NLMSG_SPACE(sizeof(*ev));
+ int rc, len = nlmsg_total_size(sizeof(*ev));
priv = iscsi_if_transport_lookup(session->transport);
if (!priv)
@@ -1653,7 +1653,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
}
nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
ev->transport_handle = iscsi_handle(session->transport);
ev->type = event;
@@ -2005,7 +2005,7 @@ iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
static int
iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
{
- struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
struct Scsi_Host *shost = NULL;
struct iscsi_chap_rec *chap_rec;
struct iscsi_internal *priv;
@@ -2024,7 +2024,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
return -EINVAL;
chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
- len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+ len = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
shost = scsi_host_lookup(ev->u.get_chap.host_no);
if (!shost) {
@@ -2045,7 +2045,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
(len - sizeof(*nlhchap)), 0);
- evchap = NLMSG_DATA(nlhchap);
+ evchap = nlmsg_data(nlhchap);
memset(evchap, 0, sizeof(*evchap));
evchap->transport_handle = iscsi_handle(transport);
evchap->type = nlh->nlmsg_type;
@@ -2058,7 +2058,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
&evchap->u.get_chap.num_entries, buf);
- actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
+ actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
skb_trim(skbchap, NLMSG_ALIGN(actual_size));
nlhchap->nlmsg_len = actual_size;
@@ -2096,7 +2096,7 @@ static int
iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
{
int err = 0;
- struct iscsi_uevent *ev = NLMSG_DATA(nlh);
+ struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_transport *transport = NULL;
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
@@ -2263,7 +2263,7 @@ static void
iscsi_if_rx(struct sk_buff *skb)
{
mutex_lock(&rx_queue_mutex);
- while (skb->len >= NLMSG_SPACE(0)) {
+ while (skb->len >= NLMSG_HDRLEN) {
int err;
uint32_t rlen;
struct nlmsghdr *nlh;
@@ -2276,7 +2276,7 @@ iscsi_if_rx(struct sk_buff *skb)
break;
}
- ev = NLMSG_DATA(nlh);
+ ev = nlmsg_data(nlh);
rlen = NLMSG_ALIGN(nlh->nlmsg_len);
if (rlen > skb->len)
rlen = skb->len;
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 71098a7b5fe..7cb7d2c8fd8 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -354,7 +354,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
if (cc->dev->id.revision >= 11)
cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
- ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
+ ssb_dbg("chipcommon status is 0x%x\n", cc->status);
if (cc->dev->id.revision >= 20) {
chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 4c0f6d883dd..791da2c0d8f 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -110,8 +110,8 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
return;
}
- ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
- (crystalfreq / 1000), (crystalfreq % 1000));
+ ssb_info("Programming PLL to %u.%03u MHz\n",
+ crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */
switch (bus->chip_id) {
@@ -138,7 +138,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
- ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
+ ssb_emerg("Failed to turn the PLL off!\n");
/* Set PDIV in PLL control 0. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
@@ -249,8 +249,8 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
return;
}
- ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
- (crystalfreq / 1000), (crystalfreq % 1000));
+ ssb_info("Programming PLL to %u.%03u MHz\n",
+ crystalfreq / 1000, crystalfreq % 1000);
/* First turn the PLL off. */
switch (bus->chip_id) {
@@ -275,7 +275,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
- ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
+ ssb_emerg("Failed to turn the PLL off!\n");
/* Set p1div and p2div. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
@@ -349,9 +349,8 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
case 43222:
break;
default:
- ssb_printk(KERN_ERR PFX
- "ERROR: PLL init unknown for device %04X\n",
- bus->chip_id);
+ ssb_err("ERROR: PLL init unknown for device %04X\n",
+ bus->chip_id);
}
}
@@ -472,9 +471,8 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
max_msk = 0xFFFFF;
break;
default:
- ssb_printk(KERN_ERR PFX
- "ERROR: PMU resource config unknown for device %04X\n",
- bus->chip_id);
+ ssb_err("ERROR: PMU resource config unknown for device %04X\n",
+ bus->chip_id);
}
if (updown_tab) {
@@ -526,8 +524,8 @@ void ssb_pmu_init(struct ssb_chipcommon *cc)
pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
- ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
- cc->pmu.rev, pmucap);
+ ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n",
+ cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1)
chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
@@ -638,9 +636,8 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
case 0x5354:
ssb_pmu_get_alp_clock_clk0(cc);
default:
- ssb_printk(KERN_ERR PFX
- "ERROR: PMU alp clock unknown for device %04X\n",
- bus->chip_id);
+ ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
+ bus->chip_id);
return 0;
}
}
@@ -654,9 +651,8 @@ u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
/* 5354 chip uses a non programmable PLL of frequency 240MHz */
return 240000000;
default:
- ssb_printk(KERN_ERR PFX
- "ERROR: PMU cpu clock unknown for device %04X\n",
- bus->chip_id);
+ ssb_err("ERROR: PMU cpu clock unknown for device %04X\n",
+ bus->chip_id);
return 0;
}
}
@@ -669,9 +665,8 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
case 0x5354:
return 120000000;
default:
- ssb_printk(KERN_ERR PFX
- "ERROR: PMU controlclock unknown for device %04X\n",
- bus->chip_id);
+ ssb_err("ERROR: PMU controlclock unknown for device %04X\n",
+ bus->chip_id);
return 0;
}
}
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 33b37dac40b..fa385a368a5 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -167,21 +167,22 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
ssb_write32(mdev, SSB_IPSFLAG, irqflag);
}
- ssb_dprintk(KERN_INFO PFX
- "set_irq: core 0x%04x, irq %d => %d\n",
- dev->id.coreid, oldirq+2, irq+2);
+ ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.coreid, oldirq+2, irq+2);
}
static void print_irq(struct ssb_device *dev, unsigned int irq)
{
- int i;
static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
- ssb_dprintk(KERN_INFO PFX
- "core 0x%04x, irq :", dev->id.coreid);
- for (i = 0; i <= 6; i++) {
- ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" ");
- }
- ssb_dprintk("\n");
+ ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
+ dev->id.coreid,
+ irq_name[0], irq == 0 ? "*" : " ",
+ irq_name[1], irq == 1 ? "*" : " ",
+ irq_name[2], irq == 2 ? "*" : " ",
+ irq_name[3], irq == 3 ? "*" : " ",
+ irq_name[4], irq == 4 ? "*" : " ",
+ irq_name[5], irq == 5 ? "*" : " ",
+ irq_name[6], irq == 6 ? "*" : " ");
}
static void dump_irq(struct ssb_bus *bus)
@@ -286,7 +287,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
if (!mcore->dev)
return; /* We don't have a MIPS core */
- ssb_dprintk(KERN_INFO PFX "Initializing MIPS core...\n");
+ ssb_dbg("Initializing MIPS core...\n");
bus = mcore->dev->bus;
hz = ssb_clockspeed(bus);
@@ -334,7 +335,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
break;
}
}
- ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n");
+ ssb_dbg("after irq reconfiguration\n");
dump_irq(bus);
ssb_mips_serial_init(mcore);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 59801d23d7e..d75b72ba267 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -263,8 +263,7 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d)
return -ENODEV;
}
- ssb_printk(KERN_INFO "PCI: Fixing up device %s\n",
- pci_name(d));
+ ssb_info("PCI: Fixing up device %s\n", pci_name(d));
/* Fix up interrupt lines */
d->irq = ssb_mips_irq(extpci_core->dev) + 2;
@@ -285,12 +284,12 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0)
return;
- ssb_printk(KERN_INFO "PCI: Fixing up bridge %s\n", pci_name(dev));
+ ssb_info("PCI: Fixing up bridge %s\n", pci_name(dev));
/* Enable PCI bridge bus mastering and memory space */
pci_set_master(dev);
if (pcibios_enable_device(dev, ~0) < 0) {
- ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n");
+ ssb_err("PCI: SSB bridge enable failed\n");
return;
}
@@ -299,8 +298,8 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
/* Make sure our latency is high enough to handle the devices behind us */
lat = 168;
- ssb_printk(KERN_INFO "PCI: Fixing latency timer of device %s to %u\n",
- pci_name(dev), lat);
+ ssb_info("PCI: Fixing latency timer of device %s to %u\n",
+ pci_name(dev), lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_pcicore_fixup_pcibridge);
@@ -323,7 +322,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
return;
extpci_core = pc;
- ssb_dprintk(KERN_INFO PFX "PCIcore in host mode found\n");
+ ssb_dbg("PCIcore in host mode found\n");
/* Reset devices on the external PCI bus */
val = SSB_PCICORE_CTL_RST_OE;
val |= SSB_PCICORE_CTL_CLK_OE;
@@ -338,7 +337,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
udelay(1); /* Assertion time demanded by the PCI standard */
if (pc->dev->bus->has_cardbus_slot) {
- ssb_dprintk(KERN_INFO PFX "CardBus slot detected\n");
+ ssb_dbg("CardBus slot detected\n");
pc->cardbusmode = 1;
/* GPIO 1 resets the bridge */
ssb_gpio_out(pc->dev->bus, 1, 1);
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index bb18d76f9f2..55e10111503 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -57,9 +57,8 @@ int ssb_watchdog_register(struct ssb_bus *bus)
bus->busnumber, &wdt,
sizeof(wdt));
if (IS_ERR(pdev)) {
- ssb_dprintk(KERN_INFO PFX
- "can not register watchdog device, err: %li\n",
- PTR_ERR(pdev));
+ ssb_dbg("can not register watchdog device, err: %li\n",
+ PTR_ERR(pdev));
return PTR_ERR(pdev);
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 3b645b8a261..812775a4bfb 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -275,8 +275,8 @@ int ssb_devices_thaw(struct ssb_freeze_context *ctx)
err = sdrv->probe(sdev, &sdev->id);
if (err) {
- ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n",
- dev_name(sdev->dev));
+ ssb_err("Failed to thaw device %s\n",
+ dev_name(sdev->dev));
result = err;
}
ssb_device_put(sdev);
@@ -447,10 +447,9 @@ void ssb_bus_unregister(struct ssb_bus *bus)
err = ssb_gpio_unregister(bus);
if (err == -EBUSY)
- ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n");
+ ssb_dbg("Some GPIOs are still in use\n");
else if (err)
- ssb_dprintk(KERN_ERR PFX
- "Can not unregister GPIO driver: %i\n", err);
+ ssb_dbg("Can not unregister GPIO driver: %i\n", err);
ssb_buses_lock();
ssb_devices_unregister(bus);
@@ -497,8 +496,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL);
if (!devwrap) {
- ssb_printk(KERN_ERR PFX
- "Could not allocate device\n");
+ ssb_err("Could not allocate device\n");
err = -ENOMEM;
goto error;
}
@@ -537,9 +535,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
sdev->dev = dev;
err = device_register(dev);
if (err) {
- ssb_printk(KERN_ERR PFX
- "Could not register %s\n",
- dev_name(dev));
+ ssb_err("Could not register %s\n", dev_name(dev));
/* Set dev to NULL to not unregister
* dev on error unwinding. */
sdev->dev = NULL;
@@ -825,10 +821,9 @@ static int ssb_bus_register(struct ssb_bus *bus,
ssb_mipscore_init(&bus->mipscore);
err = ssb_gpio_init(bus);
if (err == -ENOTSUPP)
- ssb_dprintk(KERN_DEBUG PFX "GPIO driver not activated\n");
+ ssb_dbg("GPIO driver not activated\n");
else if (err)
- ssb_dprintk(KERN_ERR PFX
- "Error registering GPIO driver: %i\n", err);
+ ssb_dbg("Error registering GPIO driver: %i\n", err);
err = ssb_fetch_invariants(bus, get_invariants);
if (err) {
ssb_bus_may_powerdown(bus);
@@ -878,11 +873,11 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci)
err = ssb_bus_register(bus, ssb_pci_get_invariants, 0);
if (!err) {
- ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
- "PCI device %s\n", dev_name(&host_pci->dev));
+ ssb_info("Sonics Silicon Backplane found on PCI device %s\n",
+ dev_name(&host_pci->dev));
} else {
- ssb_printk(KERN_ERR PFX "Failed to register PCI version"
- " of SSB with error %d\n", err);
+ ssb_err("Failed to register PCI version of SSB with error %d\n",
+ err);
}
return err;
@@ -903,8 +898,8 @@ int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr);
if (!err) {
- ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
- "PCMCIA device %s\n", pcmcia_dev->devname);
+ ssb_info("Sonics Silicon Backplane found on PCMCIA device %s\n",
+ pcmcia_dev->devname);
}
return err;
@@ -925,8 +920,8 @@ int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0);
if (!err) {
- ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on "
- "SDIO device %s\n", sdio_func_id(func));
+ ssb_info("Sonics Silicon Backplane found on SDIO device %s\n",
+ sdio_func_id(func));
}
return err;
@@ -944,8 +939,8 @@ int ssb_bus_ssbbus_register(struct ssb_bus *bus, unsigned long baseaddr,
err = ssb_bus_register(bus, get_invariants, baseaddr);
if (!err) {
- ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found at "
- "address 0x%08lX\n", baseaddr);
+ ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n",
+ baseaddr);
}
return err;
@@ -1339,7 +1334,7 @@ out:
#endif
return err;
error:
- ssb_printk(KERN_ERR PFX "Bus powerdown failed\n");
+ ssb_err("Bus powerdown failed\n");
goto out;
}
EXPORT_SYMBOL(ssb_bus_may_powerdown);
@@ -1362,7 +1357,7 @@ int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl)
return 0;
error:
- ssb_printk(KERN_ERR PFX "Bus powerup failed\n");
+ ssb_err("Bus powerup failed\n");
return err;
}
EXPORT_SYMBOL(ssb_bus_powerup);
@@ -1470,15 +1465,13 @@ static int __init ssb_modinit(void)
err = b43_pci_ssb_bridge_init();
if (err) {
- ssb_printk(KERN_ERR "Broadcom 43xx PCI-SSB-bridge "
- "initialization failed\n");
+ ssb_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
err = ssb_gige_init();
if (err) {
- ssb_printk(KERN_ERR "SSB Broadcom Gigabit Ethernet "
- "driver initialization failed\n");
+ ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
/* don't fail SSB init because of this */
err = 0;
}
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index e9d94968f39..63ff69f9d3e 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -56,7 +56,7 @@ int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
}
return 0;
error:
- ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx);
+ ssb_err("Failed to switch to core %u\n", coreidx);
return -ENODEV;
}
@@ -67,10 +67,9 @@ int ssb_pci_switch_core(struct ssb_bus *bus,
unsigned long flags;
#if SSB_VERBOSE_PCICORESWITCH_DEBUG
- ssb_printk(KERN_INFO PFX
- "Switching to %s core, index %d\n",
- ssb_core_name(dev->id.coreid),
- dev->core_index);
+ ssb_info("Switching to %s core, index %d\n",
+ ssb_core_name(dev->id.coreid),
+ dev->core_index);
#endif
spin_lock_irqsave(&bus->bar_lock, flags);
@@ -231,6 +230,15 @@ static inline u8 ssb_crc8(u8 crc, u8 data)
return t[crc ^ data];
}
+static void sprom_get_mac(char *mac, const u16 *in)
+{
+ int i;
+ for (i = 0; i < 3; i++) {
+ *mac++ = in[i] >> 8;
+ *mac++ = in[i];
+ }
+}
+
static u8 ssb_sprom_crc(const u16 *sprom, u16 size)
{
int word;
@@ -278,7 +286,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
u32 spromctl;
u16 size = bus->sprom_size;
- ssb_printk(KERN_NOTICE PFX "Writing SPROM. Do NOT turn off the power! Please stand by...\n");
+ ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
if (err)
goto err_ctlreg;
@@ -286,17 +294,17 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl);
if (err)
goto err_ctlreg;
- ssb_printk(KERN_NOTICE PFX "[ 0%%");
+ ssb_notice("[ 0%%");
msleep(500);
for (i = 0; i < size; i++) {
if (i == size / 4)
- ssb_printk("25%%");
+ ssb_cont("25%%");
else if (i == size / 2)
- ssb_printk("50%%");
+ ssb_cont("50%%");
else if (i == (size * 3) / 4)
- ssb_printk("75%%");
+ ssb_cont("75%%");
else if (i % 2)
- ssb_printk(".");
+ ssb_cont(".");
writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
mmiowb();
msleep(20);
@@ -309,12 +317,12 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
if (err)
goto err_ctlreg;
msleep(500);
- ssb_printk("100%% ]\n");
- ssb_printk(KERN_NOTICE PFX "SPROM written.\n");
+ ssb_cont("100%% ]\n");
+ ssb_notice("SPROM written\n");
return 0;
err_ctlreg:
- ssb_printk(KERN_ERR PFX "Could not access SPROM control register.\n");
+ ssb_err("Could not access SPROM control register.\n");
return err;
}
@@ -341,8 +349,6 @@ static s8 r123_extract_antgain(u8 sprom_revision, const u16 *in,
static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
{
- int i;
- u16 v;
u16 loc[3];
if (out->revision == 3) /* rev 3 moved MAC */
@@ -352,19 +358,10 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
loc[1] = SSB_SPROM1_ET0MAC;
loc[2] = SSB_SPROM1_ET1MAC;
}
- for (i = 0; i < 3; i++) {
- v = in[SPOFF(loc[0]) + i];
- *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
- }
+ sprom_get_mac(out->il0mac, &in[SPOFF(loc[0])]);
if (out->revision < 3) { /* only rev 1-2 have et0, et1 */
- for (i = 0; i < 3; i++) {
- v = in[SPOFF(loc[1]) + i];
- *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
- }
- for (i = 0; i < 3; i++) {
- v = in[SPOFF(loc[2]) + i];
- *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
- }
+ sprom_get_mac(out->et0mac, &in[SPOFF(loc[1])]);
+ sprom_get_mac(out->et1mac, &in[SPOFF(loc[2])]);
}
SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0);
SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A,
@@ -454,19 +451,15 @@ static void sprom_extract_r458(struct ssb_sprom *out, const u16 *in)
static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
{
- int i;
- u16 v;
u16 il0mac_offset;
if (out->revision == 4)
il0mac_offset = SSB_SPROM4_IL0MAC;
else
il0mac_offset = SSB_SPROM5_IL0MAC;
- /* extract the MAC address */
- for (i = 0; i < 3; i++) {
- v = in[SPOFF(il0mac_offset) + i];
- *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
- }
+
+ sprom_get_mac(out->il0mac, &in[SPOFF(il0mac_offset)]);
+
SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
SSB_SPROM4_ETHPHY_ET1A_SHIFT);
@@ -530,7 +523,7 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
{
int i;
- u16 v, o;
+ u16 o;
u16 pwr_info_offset[] = {
SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
@@ -539,10 +532,8 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
ARRAY_SIZE(out->core_pwr_info));
/* extract the MAC address */
- for (i = 0; i < 3; i++) {
- v = in[SPOFF(SSB_SPROM8_IL0MAC) + i];
- *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
- }
+ sprom_get_mac(out->il0mac, &in[SPOFF(SSB_SPROM8_IL0MAC)]);
+
SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0);
SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
@@ -743,7 +734,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
memset(out, 0, sizeof(*out));
out->revision = in[size - 1] & 0x00FF;
- ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision);
+ ssb_dbg("SPROM revision %d detected\n", out->revision);
memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
memset(out->et1mac, 0xFF, 6);
@@ -752,7 +743,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
* number stored in the SPROM.
* Always extract r1. */
out->revision = 1;
- ssb_dprintk(KERN_DEBUG PFX "SPROM treated as revision %d\n", out->revision);
+ ssb_dbg("SPROM treated as revision %d\n", out->revision);
}
switch (out->revision) {
@@ -769,9 +760,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
sprom_extract_r8(out, in);
break;
default:
- ssb_printk(KERN_WARNING PFX "Unsupported SPROM"
- " revision %d detected. Will extract"
- " v1\n", out->revision);
+ ssb_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
+ out->revision);
out->revision = 1;
sprom_extract_r123(out, in);
}
@@ -791,7 +781,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
u16 *buf;
if (!ssb_is_sprom_available(bus)) {
- ssb_printk(KERN_ERR PFX "No SPROM available!\n");
+ ssb_err("No SPROM available!\n");
return -ENODEV;
}
if (bus->chipco.dev) { /* can be unavailable! */
@@ -810,7 +800,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
} else {
bus->sprom_offset = SSB_SPROM_BASE1;
}
- ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
+ ssb_dbg("SPROM offset is 0x%x\n", bus->sprom_offset);
buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
if (!buf)
@@ -835,18 +825,15 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
* available for this device in some other storage */
err = ssb_fill_sprom_with_fallback(bus, sprom);
if (err) {
- ssb_printk(KERN_WARNING PFX "WARNING: Using"
- " fallback SPROM failed (err %d)\n",
- err);
+ ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
+ err);
} else {
- ssb_dprintk(KERN_DEBUG PFX "Using SPROM"
- " revision %d provided by"
- " platform.\n", sprom->revision);
+ ssb_dbg("Using SPROM revision %d provided by platform\n",
+ sprom->revision);
err = 0;
goto out_free;
}
- ssb_printk(KERN_WARNING PFX "WARNING: Invalid"
- " SPROM CRC (corrupt SPROM)\n");
+ ssb_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
}
}
err = sprom_extract(bus, sprom, buf, bus->sprom_size);
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index fbafed5b729..b413e018708 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -143,7 +143,7 @@ int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
return 0;
error:
- ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx);
+ ssb_err("Failed to switch to core %u\n", coreidx);
return err;
}
@@ -153,10 +153,9 @@ int ssb_pcmcia_switch_core(struct ssb_bus *bus,
int err;
#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG
- ssb_printk(KERN_INFO PFX
- "Switching to %s core, index %d\n",
- ssb_core_name(dev->id.coreid),
- dev->core_index);
+ ssb_info("Switching to %s core, index %d\n",
+ ssb_core_name(dev->id.coreid),
+ dev->core_index);
#endif
err = ssb_pcmcia_switch_coreidx(bus, dev->core_index);
@@ -192,7 +191,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
return 0;
error:
- ssb_printk(KERN_ERR PFX "Failed to switch pcmcia segment\n");
+ ssb_err("Failed to switch pcmcia segment\n");
return err;
}
@@ -549,44 +548,39 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
bool failed = 0;
size_t size = SSB_PCMCIA_SPROM_SIZE;
- ssb_printk(KERN_NOTICE PFX
- "Writing SPROM. Do NOT turn off the power! "
- "Please stand by...\n");
+ ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN);
if (err) {
- ssb_printk(KERN_NOTICE PFX
- "Could not enable SPROM write access.\n");
+ ssb_notice("Could not enable SPROM write access\n");
return -EBUSY;
}
- ssb_printk(KERN_NOTICE PFX "[ 0%%");
+ ssb_notice("[ 0%%");
msleep(500);
for (i = 0; i < size; i++) {
if (i == size / 4)
- ssb_printk("25%%");
+ ssb_cont("25%%");
else if (i == size / 2)
- ssb_printk("50%%");
+ ssb_cont("50%%");
else if (i == (size * 3) / 4)
- ssb_printk("75%%");
+ ssb_cont("75%%");
else if (i % 2)
- ssb_printk(".");
+ ssb_cont(".");
err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
if (err) {
- ssb_printk(KERN_NOTICE PFX
- "Failed to write to SPROM.\n");
+ ssb_notice("Failed to write to SPROM\n");
failed = 1;
break;
}
}
err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
if (err) {
- ssb_printk(KERN_NOTICE PFX
- "Could not disable SPROM write access.\n");
+ ssb_notice("Could not disable SPROM write access\n");
failed = 1;
}
msleep(500);
if (!failed) {
- ssb_printk("100%% ]\n");
- ssb_printk(KERN_NOTICE PFX "SPROM written.\n");
+ ssb_cont("100%% ]\n");
+ ssb_notice("SPROM written\n");
}
return failed ? -EBUSY : 0;
@@ -700,7 +694,7 @@ static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
return -ENOSPC; /* continue with next entry */
error:
- ssb_printk(KERN_ERR PFX
+ ssb_err(
"PCMCIA: Failed to fetch device invariants: %s\n",
error_description);
return -ENODEV;
@@ -722,7 +716,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
ssb_pcmcia_get_mac, sprom);
if (res != 0) {
- ssb_printk(KERN_ERR PFX
+ ssb_err(
"PCMCIA: Failed to fetch MAC address\n");
return -ENODEV;
}
@@ -733,7 +727,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
if ((res == 0) || (res == -ENOSPC))
return 0;
- ssb_printk(KERN_ERR PFX
+ ssb_err(
"PCMCIA: Failed to fetch device invariants\n");
return -ENODEV;
}
@@ -843,6 +837,6 @@ int ssb_pcmcia_init(struct ssb_bus *bus)
return 0;
error:
- ssb_printk(KERN_ERR PFX "Failed to initialize PCMCIA host device\n");
+ ssb_err("Failed to initialize PCMCIA host device\n");
return err;
}
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index ab4627cf111..b9429df583e 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -125,8 +125,7 @@ static u16 pcidev_to_chipid(struct pci_dev *pci_dev)
chipid_fallback = 0x4401;
break;
default:
- ssb_printk(KERN_ERR PFX
- "PCI-ID not in fallback list\n");
+ ssb_err("PCI-ID not in fallback list\n");
}
return chipid_fallback;
@@ -152,8 +151,7 @@ static u8 chipid_to_nrcores(u16 chipid)
case 0x4704:
return 9;
default:
- ssb_printk(KERN_ERR PFX
- "CHIPID not in nrcores fallback list\n");
+ ssb_err("CHIPID not in nrcores fallback list\n");
}
return 1;
@@ -320,15 +318,13 @@ int ssb_bus_scan(struct ssb_bus *bus,
bus->chip_package = 0;
}
}
- ssb_printk(KERN_INFO PFX "Found chip with id 0x%04X, rev 0x%02X and "
- "package 0x%02X\n", bus->chip_id, bus->chip_rev,
- bus->chip_package);
+ ssb_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
+ bus->chip_id, bus->chip_rev, bus->chip_package);
if (!bus->nr_devices)
bus->nr_devices = chipid_to_nrcores(bus->chip_id);
if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
- ssb_printk(KERN_ERR PFX
- "More than %d ssb cores found (%d)\n",
- SSB_MAX_NR_CORES, bus->nr_devices);
+ ssb_err("More than %d ssb cores found (%d)\n",
+ SSB_MAX_NR_CORES, bus->nr_devices);
goto err_unmap;
}
if (bus->bustype == SSB_BUSTYPE_SSB) {
@@ -370,8 +366,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
nr_80211_cores++;
if (nr_80211_cores > 1) {
if (!we_support_multiple_80211_cores(bus)) {
- ssb_dprintk(KERN_INFO PFX "Ignoring additional "
- "802.11 core\n");
+ ssb_dbg("Ignoring additional 802.11 core\n");
continue;
}
}
@@ -379,8 +374,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
case SSB_DEV_EXTIF:
#ifdef CONFIG_SSB_DRIVER_EXTIF
if (bus->extif.dev) {
- ssb_printk(KERN_WARNING PFX
- "WARNING: Multiple EXTIFs found\n");
+ ssb_warn("WARNING: Multiple EXTIFs found\n");
break;
}
bus->extif.dev = dev;
@@ -388,8 +382,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
break;
case SSB_DEV_CHIPCOMMON:
if (bus->chipco.dev) {
- ssb_printk(KERN_WARNING PFX
- "WARNING: Multiple ChipCommon found\n");
+ ssb_warn("WARNING: Multiple ChipCommon found\n");
break;
}
bus->chipco.dev = dev;
@@ -398,8 +391,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
case SSB_DEV_MIPS_3302:
#ifdef CONFIG_SSB_DRIVER_MIPS
if (bus->mipscore.dev) {
- ssb_printk(KERN_WARNING PFX
- "WARNING: Multiple MIPS cores found\n");
+ ssb_warn("WARNING: Multiple MIPS cores found\n");
break;
}
bus->mipscore.dev = dev;
@@ -420,8 +412,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
}
}
if (bus->pcicore.dev) {
- ssb_printk(KERN_WARNING PFX
- "WARNING: Multiple PCI(E) cores found\n");
+ ssb_warn("WARNING: Multiple PCI(E) cores found\n");
break;
}
bus->pcicore.dev = dev;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 80d366fcf8d..a3b23644b0f 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -127,13 +127,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
goto out_kfree;
err = ssb_devices_freeze(bus, &freeze);
if (err) {
- ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n");
+ ssb_err("SPROM write: Could not freeze all devices\n");
goto out_unlock;
}
res = sprom_write(bus, sprom);
err = ssb_devices_thaw(&freeze);
if (err)
- ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n");
+ ssb_err("SPROM write: Could not thaw all devices\n");
out_unlock:
mutex_unlock(&bus->sprom_mutex);
out_kfree:
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 466171b77f6..4671f17f09a 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -9,16 +9,27 @@
#define PFX "ssb: "
#ifdef CONFIG_SSB_SILENT
-# define ssb_printk(fmt, x...) do { /* nothing */ } while (0)
+# define ssb_printk(fmt, ...) \
+ do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
#else
-# define ssb_printk printk
+# define ssb_printk(fmt, ...) \
+ printk(fmt, ##__VA_ARGS__)
#endif /* CONFIG_SSB_SILENT */
+#define ssb_emerg(fmt, ...) ssb_printk(KERN_EMERG PFX fmt, ##__VA_ARGS__)
+#define ssb_err(fmt, ...) ssb_printk(KERN_ERR PFX fmt, ##__VA_ARGS__)
+#define ssb_warn(fmt, ...) ssb_printk(KERN_WARNING PFX fmt, ##__VA_ARGS__)
+#define ssb_notice(fmt, ...) ssb_printk(KERN_NOTICE PFX fmt, ##__VA_ARGS__)
+#define ssb_info(fmt, ...) ssb_printk(KERN_INFO PFX fmt, ##__VA_ARGS__)
+#define ssb_cont(fmt, ...) ssb_printk(KERN_CONT fmt, ##__VA_ARGS__)
+
/* dprintk: Debugging printk; vanishes for non-debug compilation */
#ifdef CONFIG_SSB_DEBUG
-# define ssb_dprintk(fmt, x...) ssb_printk(fmt , ##x)
+# define ssb_dbg(fmt, ...) \
+ ssb_printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__)
#else
-# define ssb_dprintk(fmt, x...) do { /* nothing */ } while (0)
+# define ssb_dbg(fmt, ...) \
+ do { if (0) printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__); } while (0)
#endif
#ifdef CONFIG_SSB_DEBUG
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 52c25ba5831..c1239aaa628 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
-#include <linux/netlink.h>
+#include <net/netlink.h>
#include <asm/byteorder.h>
#include <net/sock.h>
@@ -25,12 +25,12 @@
#define ND_MAX_GROUP 30
#define ND_IFINDEX_LEN sizeof(int)
-#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
+#define ND_NLMSG_SPACE(len) (nlmsg_total_size(len) + ND_IFINDEX_LEN)
#define ND_NLMSG_DATA(nlh) \
- ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN))
+ ((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN))
#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
-#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
+#define ND_NLMSG_IFIDX(nlh) nlmsg_data(nlh)
#define ND_MAX_MSG_LEN 8096
#if defined(DEFINE_MUTEX)
@@ -51,7 +51,7 @@ static void netlink_rcv_cb(struct sk_buff *skb)
void *msg;
int ifindex;
- if (skb->len >= NLMSG_SPACE(0)) {
+ if (skb->len >= NLMSG_HDRLEN) {
nlh = (struct nlmsghdr *)skb->data;
if (skb->len < nlh->nlmsg_len ||
@@ -124,7 +124,7 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
return -EINVAL;
}
- skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
+ skb = nlmsg_new(len, GFP_ATOMIC);
if (!skb) {
pr_err("netlink_broadcast ret=%d\n", ret);
return -ENOMEM;